repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
jay-johnson/spylunking
spylunking/splunk_publisher.py
https://github.com/jay-johnson/spylunking/blob/95cc86776f04ec5935cf04e291cf18798345d6cb/spylunking/splunk_publisher.py#L349-L368
def start_worker_thread( self, sleep_interval=1.0): """start_worker_thread Start the helper worker thread to publish queued messages to Splunk :param sleep_interval: sleep in seconds before reading from the queue again """ # Start a worker thread responsible for sending logs if self.sleep_interval > 0: self.debug_log( 'starting worker thread') self.timer = Timer( sleep_interval, self.perform_work) self.timer.daemon = True # Auto-kill thread if main process exits self.timer.start()
[ "def", "start_worker_thread", "(", "self", ",", "sleep_interval", "=", "1.0", ")", ":", "# Start a worker thread responsible for sending logs", "if", "self", ".", "sleep_interval", ">", "0", ":", "self", ".", "debug_log", "(", "'starting worker thread'", ")", "self", ".", "timer", "=", "Timer", "(", "sleep_interval", ",", "self", ".", "perform_work", ")", "self", ".", "timer", ".", "daemon", "=", "True", "# Auto-kill thread if main process exits", "self", ".", "timer", ".", "start", "(", ")" ]
start_worker_thread Start the helper worker thread to publish queued messages to Splunk :param sleep_interval: sleep in seconds before reading from the queue again
[ "start_worker_thread" ]
python
train
33.8
inveniosoftware/invenio-records-rest
invenio_records_rest/views.py
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/views.py#L757-L794
def put(self, pid, record, **kwargs): """Replace a record. Permissions: ``update_permission_factory`` The body should be a JSON object, which will fully replace the current record metadata. Procedure description: #. The ETag is checked. #. The record is updated by calling the record API `clear()`, `update()` and then `commit()`. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The modified record. """ if request.mimetype not in self.loaders: raise UnsupportedMediaRESTError(request.mimetype) data = self.loaders[request.mimetype]() if data is None: raise InvalidDataRESTError() self.check_etag(str(record.revision_id)) record.clear() record.update(data) record.commit() db.session.commit() if self.indexer_class: self.indexer_class().index(record) return self.make_response( pid, record, links_factory=self.links_factory)
[ "def", "put", "(", "self", ",", "pid", ",", "record", ",", "*", "*", "kwargs", ")", ":", "if", "request", ".", "mimetype", "not", "in", "self", ".", "loaders", ":", "raise", "UnsupportedMediaRESTError", "(", "request", ".", "mimetype", ")", "data", "=", "self", ".", "loaders", "[", "request", ".", "mimetype", "]", "(", ")", "if", "data", "is", "None", ":", "raise", "InvalidDataRESTError", "(", ")", "self", ".", "check_etag", "(", "str", "(", "record", ".", "revision_id", ")", ")", "record", ".", "clear", "(", ")", "record", ".", "update", "(", "data", ")", "record", ".", "commit", "(", ")", "db", ".", "session", ".", "commit", "(", ")", "if", "self", ".", "indexer_class", ":", "self", ".", "indexer_class", "(", ")", ".", "index", "(", "record", ")", "return", "self", ".", "make_response", "(", "pid", ",", "record", ",", "links_factory", "=", "self", ".", "links_factory", ")" ]
Replace a record. Permissions: ``update_permission_factory`` The body should be a JSON object, which will fully replace the current record metadata. Procedure description: #. The ETag is checked. #. The record is updated by calling the record API `clear()`, `update()` and then `commit()`. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The modified record.
[ "Replace", "a", "record", "." ]
python
train
29.868421
rvswift/EB
EB/builder/postanalysis/postanalysis.py
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/postanalysis/postanalysis.py#L190-L201
def error_check(options): """ Error check :rtype : object """ compare = options.compare ensemble_paths = options.ensemble_paths if compare and len(ensemble_paths) > 2: print("\n Only 2 ensembles can be compared, {d} were specified\n".format(d=len(ensemble_paths))) sys.exit(1)
[ "def", "error_check", "(", "options", ")", ":", "compare", "=", "options", ".", "compare", "ensemble_paths", "=", "options", ".", "ensemble_paths", "if", "compare", "and", "len", "(", "ensemble_paths", ")", ">", "2", ":", "print", "(", "\"\\n Only 2 ensembles can be compared, {d} were specified\\n\"", ".", "format", "(", "d", "=", "len", "(", "ensemble_paths", ")", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
Error check :rtype : object
[ "Error", "check", ":", "rtype", ":", "object" ]
python
train
25.916667
seomoz/qless-py
qless/workers/__init__.py
https://github.com/seomoz/qless-py/blob/3eda4ffcd4c0016c9a7e44f780d6155e1a354dda/qless/workers/__init__.py#L52-L60
def divide(cls, jobs, count): '''Divide up the provided jobs into count evenly-sized groups''' jobs = list(zip(*zip_longest(*[iter(jobs)] * count))) # If we had no jobs to resume, then we get an empty list jobs = jobs or [()] * count for index in range(count): # Filter out the items in jobs that are Nones jobs[index] = [j for j in jobs[index] if j != None] return jobs
[ "def", "divide", "(", "cls", ",", "jobs", ",", "count", ")", ":", "jobs", "=", "list", "(", "zip", "(", "*", "zip_longest", "(", "*", "[", "iter", "(", "jobs", ")", "]", "*", "count", ")", ")", ")", "# If we had no jobs to resume, then we get an empty list", "jobs", "=", "jobs", "or", "[", "(", ")", "]", "*", "count", "for", "index", "in", "range", "(", "count", ")", ":", "# Filter out the items in jobs that are Nones", "jobs", "[", "index", "]", "=", "[", "j", "for", "j", "in", "jobs", "[", "index", "]", "if", "j", "!=", "None", "]", "return", "jobs" ]
Divide up the provided jobs into count evenly-sized groups
[ "Divide", "up", "the", "provided", "jobs", "into", "count", "evenly", "-", "sized", "groups" ]
python
train
48.222222
StackStorm/pybind
pybind/slxos/v17s_1_02/isis_state/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/isis_state/__init__.py#L110-L133
def _set_global_isis_info(self, v, load=False): """ Setter method for global_isis_info, mapped from YANG variable /isis_state/global_isis_info (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_isis_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_isis_info() directly. YANG Description: ISIS Global """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=global_isis_info.global_isis_info, is_container='container', presence=False, yang_name="global-isis-info", rest_name="global-isis-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-global', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """global_isis_info must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=global_isis_info.global_isis_info, is_container='container', presence=False, yang_name="global-isis-info", rest_name="global-isis-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-global', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__global_isis_info = t if hasattr(self, '_set'): self._set()
[ "def", "_set_global_isis_info", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "global_isis_info", ".", "global_isis_info", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"global-isis-info\"", ",", "rest_name", "=", "\"global-isis-info\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'isis-global'", ",", "u'cli-suppress-show-path'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-isis-operational'", ",", "defining_module", "=", "'brocade-isis-operational'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "False", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"global_isis_info must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=global_isis_info.global_isis_info, is_container='container', presence=False, yang_name=\"global-isis-info\", rest_name=\"global-isis-info\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-global', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)\"\"\"", ",", "}", ")", "self", ".", "__global_isis_info", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for global_isis_info, mapped from YANG variable /isis_state/global_isis_info (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_isis_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_isis_info() directly. YANG Description: ISIS Global
[ "Setter", "method", "for", "global_isis_info", "mapped", "from", "YANG", "variable", "/", "isis_state", "/", "global_isis_info", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_global_isis_info", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_global_isis_info", "()", "directly", "." ]
python
train
75.333333
log2timeline/plaso
plaso/cli/helpers/interface.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/interface.py#L53-L94
def _ParseStringOption(cls, options, argument_name, default_value=None): """Parses a string command line argument. Args: options (argparse.Namespace): parser options. argument_name (str): name of the command line argument. default_value (Optional[str]): default value of the command line argument. Returns: str: command line argument value or the default value if the command line argument is not set Raises: BadConfigOption: if the command line argument value cannot be converted to a Unicode string. """ argument_value = getattr(options, argument_name, None) if argument_value is None: return default_value if isinstance(argument_value, py2to3.BYTES_TYPE): encoding = sys.stdin.encoding # Note that sys.stdin.encoding can be None. if not encoding: encoding = locale.getpreferredencoding() if not encoding: encoding = cls._PREFERRED_ENCODING try: argument_value = argument_value.decode(encoding) except UnicodeDecodeError as exception: raise errors.BadConfigOption(( 'Unable to convert option: {0:s} to Unicode with error: ' '{1!s}.').format(argument_name, exception)) elif not isinstance(argument_value, py2to3.UNICODE_TYPE): raise errors.BadConfigOption( 'Unsupported option: {0:s} string type required.'.format( argument_name)) return argument_value
[ "def", "_ParseStringOption", "(", "cls", ",", "options", ",", "argument_name", ",", "default_value", "=", "None", ")", ":", "argument_value", "=", "getattr", "(", "options", ",", "argument_name", ",", "None", ")", "if", "argument_value", "is", "None", ":", "return", "default_value", "if", "isinstance", "(", "argument_value", ",", "py2to3", ".", "BYTES_TYPE", ")", ":", "encoding", "=", "sys", ".", "stdin", ".", "encoding", "# Note that sys.stdin.encoding can be None.", "if", "not", "encoding", ":", "encoding", "=", "locale", ".", "getpreferredencoding", "(", ")", "if", "not", "encoding", ":", "encoding", "=", "cls", ".", "_PREFERRED_ENCODING", "try", ":", "argument_value", "=", "argument_value", ".", "decode", "(", "encoding", ")", "except", "UnicodeDecodeError", "as", "exception", ":", "raise", "errors", ".", "BadConfigOption", "(", "(", "'Unable to convert option: {0:s} to Unicode with error: '", "'{1!s}.'", ")", ".", "format", "(", "argument_name", ",", "exception", ")", ")", "elif", "not", "isinstance", "(", "argument_value", ",", "py2to3", ".", "UNICODE_TYPE", ")", ":", "raise", "errors", ".", "BadConfigOption", "(", "'Unsupported option: {0:s} string type required.'", ".", "format", "(", "argument_name", ")", ")", "return", "argument_value" ]
Parses a string command line argument. Args: options (argparse.Namespace): parser options. argument_name (str): name of the command line argument. default_value (Optional[str]): default value of the command line argument. Returns: str: command line argument value or the default value if the command line argument is not set Raises: BadConfigOption: if the command line argument value cannot be converted to a Unicode string.
[ "Parses", "a", "string", "command", "line", "argument", "." ]
python
train
34.238095
scheibler/khard
khard/carddav_object.py
https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/carddav_object.py#L670-L678
def _get_webpages(self): """ :rtype: list(list(str)) """ urls = [] for child in self.vcard.getChildren(): if child.name == "URL": urls.append(child.value) return sorted(urls)
[ "def", "_get_webpages", "(", "self", ")", ":", "urls", "=", "[", "]", "for", "child", "in", "self", ".", "vcard", ".", "getChildren", "(", ")", ":", "if", "child", ".", "name", "==", "\"URL\"", ":", "urls", ".", "append", "(", "child", ".", "value", ")", "return", "sorted", "(", "urls", ")" ]
:rtype: list(list(str))
[ ":", "rtype", ":", "list", "(", "list", "(", "str", "))" ]
python
test
26.888889
click-contrib/click-configfile
click_configfile.py
https://github.com/click-contrib/click-configfile/blob/a616204cb9944125fd5051556f27a7ccef611e22/click_configfile.py#L81-L126
def assign_param_names(cls=None, param_class=None): """Class decorator to assign parameter name to instances of :class:`Param`. .. sourcecode:: @assign_param_names class ConfigSectionSchema(object): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.alice.name == "alice" assert ConfigSectionSchema.bob.name == "bob" .. sourcecode:: # -- NESTED ASSIGN: Covers also nested SectionSchema subclasses. @assign_param_names class ConfigSectionSchema(object): class Foo(SectionSchema): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.Foo.alice.name == "alice" assert ConfigSectionSchema.Foo.bob.name == "bob" """ if param_class is None: param_class = Param def decorate_class(cls): for name, value in select_params_from_section_schema(cls, param_class, deep=True): # -- ANNOTATE PARAM: By assigning its name if not value.name: value.name = name return cls # -- DECORATOR LOGIC: if cls is None: # -- CASE: @assign_param_names # -- CASE: @assign_param_names(...) return decorate_class else: # -- CASE: @assign_param_names class X: ... # -- CASE: assign_param_names(my_class) # -- CASE: my_class = assign_param_names(my_class) return decorate_class(cls)
[ "def", "assign_param_names", "(", "cls", "=", "None", ",", "param_class", "=", "None", ")", ":", "if", "param_class", "is", "None", ":", "param_class", "=", "Param", "def", "decorate_class", "(", "cls", ")", ":", "for", "name", ",", "value", "in", "select_params_from_section_schema", "(", "cls", ",", "param_class", ",", "deep", "=", "True", ")", ":", "# -- ANNOTATE PARAM: By assigning its name", "if", "not", "value", ".", "name", ":", "value", ".", "name", "=", "name", "return", "cls", "# -- DECORATOR LOGIC:", "if", "cls", "is", "None", ":", "# -- CASE: @assign_param_names", "# -- CASE: @assign_param_names(...)", "return", "decorate_class", "else", ":", "# -- CASE: @assign_param_names class X: ...", "# -- CASE: assign_param_names(my_class)", "# -- CASE: my_class = assign_param_names(my_class)", "return", "decorate_class", "(", "cls", ")" ]
Class decorator to assign parameter name to instances of :class:`Param`. .. sourcecode:: @assign_param_names class ConfigSectionSchema(object): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.alice.name == "alice" assert ConfigSectionSchema.bob.name == "bob" .. sourcecode:: # -- NESTED ASSIGN: Covers also nested SectionSchema subclasses. @assign_param_names class ConfigSectionSchema(object): class Foo(SectionSchema): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.Foo.alice.name == "alice" assert ConfigSectionSchema.Foo.bob.name == "bob"
[ "Class", "decorator", "to", "assign", "parameter", "name", "to", "instances", "of", ":", "class", ":", "Param", "." ]
python
train
32.913043
petl-developers/petl
petl/transform/hashjoins.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/hashjoins.py#L316-L331
def hashantijoin(left, right, key=None, lkey=None, rkey=None): """Alternative implementation of :func:`petl.transform.joins.antijoin`, where the join is executed by constructing an in-memory set for all keys found in the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments. """ lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return HashAntiJoinView(left, right, lkey, rkey)
[ "def", "hashantijoin", "(", "left", ",", "right", ",", "key", "=", "None", ",", "lkey", "=", "None", ",", "rkey", "=", "None", ")", ":", "lkey", ",", "rkey", "=", "keys_from_args", "(", "left", ",", "right", ",", "key", ",", "lkey", ",", "rkey", ")", "return", "HashAntiJoinView", "(", "left", ",", "right", ",", "lkey", ",", "rkey", ")" ]
Alternative implementation of :func:`petl.transform.joins.antijoin`, where the join is executed by constructing an in-memory set for all keys found in the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
[ "Alternative", "implementation", "of", ":", "func", ":", "petl", ".", "transform", ".", "joins", ".", "antijoin", "where", "the", "join", "is", "executed", "by", "constructing", "an", "in", "-", "memory", "set", "for", "all", "keys", "found", "in", "the", "right", "hand", "table", "then", "iterating", "over", "rows", "from", "the", "left", "hand", "table", ".", "May", "be", "faster", "and", "/", "or", "more", "resource", "efficient", "where", "the", "right", "table", "is", "small", "and", "the", "left", "table", "is", "large", ".", "Left", "and", "right", "tables", "with", "different", "key", "fields", "can", "be", "handled", "via", "the", "lkey", "and", "rkey", "arguments", "." ]
python
train
40.625
hobson/pug-invest
pug/invest/sandbox/sim.py
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/sandbox/sim.py#L437-L475
def buy_on_drop(symbol_set="sp5002012", dataobj=dataobj, start=datetime.datetime(2008, 1, 3), end=datetime.datetime(2009, 12, 28), market_sym='$SPX', threshold=6, sell_delay=5, ): '''Compute and display an "event profile" for multiple sets of symbols''' if symbol_set: if isinstance(symbol_set, basestring): if symbol_set.lower().startswith('sp'): symbol_set = dataobj.get_symbols_from_list(symbol_set.lower()) else: symbol_set = [sym.stip().upper() for sym in symbol_set.split(",")] else: symbol_set = dataobj.get_symbols_from_list("sp5002012") if market_sym: symbol_set.append(market_sym) print "Starting Event Study, retrieving data for the {0} symbol list...".format(symbol_set) market_data = get_clean_prices(symbol_set, dataobj=dataobj, start=start, end=end) print "Finding events for {0} symbols between {1} and {2}...".format(len(symbol_set), start, end) trigger_kwargs={'threshold': threshold} events = find_events(symbol_set, market_data, market_sym=market_sym, trigger=drop_below, trigger_kwargs=trigger_kwargs) csvwriter = csv.writer(getattr(args, 'outfile', open('buy_on_drop_outfile.csv', 'w')), dialect='excel', quoting=csv.QUOTE_MINIMAL) for order in generate_orders(events, sell_delay=sell_delay, sep=None): csvwriter.writerow(order) print "Creating Study report for {0} events...".format(len(events)) ep.eventprofiler(events, market_data, i_lookback=20, i_lookforward=20, s_filename='Event report--buy on drop below {0} for {1} symbols.pdf'.format(threshold, len(symbol_set)), b_market_neutral=True, b_errorbars=True, s_market_sym=market_sym, ) return events
[ "def", "buy_on_drop", "(", "symbol_set", "=", "\"sp5002012\"", ",", "dataobj", "=", "dataobj", ",", "start", "=", "datetime", ".", "datetime", "(", "2008", ",", "1", ",", "3", ")", ",", "end", "=", "datetime", ".", "datetime", "(", "2009", ",", "12", ",", "28", ")", ",", "market_sym", "=", "'$SPX'", ",", "threshold", "=", "6", ",", "sell_delay", "=", "5", ",", ")", ":", "if", "symbol_set", ":", "if", "isinstance", "(", "symbol_set", ",", "basestring", ")", ":", "if", "symbol_set", ".", "lower", "(", ")", ".", "startswith", "(", "'sp'", ")", ":", "symbol_set", "=", "dataobj", ".", "get_symbols_from_list", "(", "symbol_set", ".", "lower", "(", ")", ")", "else", ":", "symbol_set", "=", "[", "sym", ".", "stip", "(", ")", ".", "upper", "(", ")", "for", "sym", "in", "symbol_set", ".", "split", "(", "\",\"", ")", "]", "else", ":", "symbol_set", "=", "dataobj", ".", "get_symbols_from_list", "(", "\"sp5002012\"", ")", "if", "market_sym", ":", "symbol_set", ".", "append", "(", "market_sym", ")", "print", "\"Starting Event Study, retrieving data for the {0} symbol list...\"", ".", "format", "(", "symbol_set", ")", "market_data", "=", "get_clean_prices", "(", "symbol_set", ",", "dataobj", "=", "dataobj", ",", "start", "=", "start", ",", "end", "=", "end", ")", "print", "\"Finding events for {0} symbols between {1} and {2}...\"", ".", "format", "(", "len", "(", "symbol_set", ")", ",", "start", ",", "end", ")", "trigger_kwargs", "=", "{", "'threshold'", ":", "threshold", "}", "events", "=", "find_events", "(", "symbol_set", ",", "market_data", ",", "market_sym", "=", "market_sym", ",", "trigger", "=", "drop_below", ",", "trigger_kwargs", "=", "trigger_kwargs", ")", "csvwriter", "=", "csv", ".", "writer", "(", "getattr", "(", "args", ",", "'outfile'", ",", "open", "(", "'buy_on_drop_outfile.csv'", ",", "'w'", ")", ")", ",", "dialect", "=", "'excel'", ",", "quoting", "=", "csv", ".", "QUOTE_MINIMAL", ")", "for", "order", "in", "generate_orders", "(", "events", ",", "sell_delay", "=", "sell_delay", ",", "sep", "=", "None", ")", ":", "csvwriter", ".", "writerow", "(", "order", ")", "print", "\"Creating Study report for {0} events...\"", ".", "format", "(", "len", "(", "events", ")", ")", "ep", ".", "eventprofiler", "(", "events", ",", "market_data", ",", "i_lookback", "=", "20", ",", "i_lookforward", "=", "20", ",", "s_filename", "=", "'Event report--buy on drop below {0} for {1} symbols.pdf'", ".", "format", "(", "threshold", ",", "len", "(", "symbol_set", ")", ")", ",", "b_market_neutral", "=", "True", ",", "b_errorbars", "=", "True", ",", "s_market_sym", "=", "market_sym", ",", ")", "return", "events" ]
Compute and display an "event profile" for multiple sets of symbols
[ "Compute", "and", "display", "an", "event", "profile", "for", "multiple", "sets", "of", "symbols" ]
python
train
49.333333
django-json-api/django-rest-framework-json-api
rest_framework_json_api/filters.py
https://github.com/django-json-api/django-rest-framework-json-api/blob/de7021f9e011615ce8b65d0cb38227c6c12721b6/rest_framework_json_api/filters.py#L75-L89
def validate_query_params(self, request): """ Validate that query params are in the list of valid query keywords in :py:attr:`query_regex` :raises ValidationError: if not. """ # TODO: For jsonapi error object conformance, must set jsonapi errors "parameter" for # the ValidationError. This requires extending DRF/DJA Exceptions. for qp in request.query_params.keys(): if not self.query_regex.match(qp): raise ValidationError('invalid query parameter: {}'.format(qp)) if len(request.query_params.getlist(qp)) > 1: raise ValidationError( 'repeated query parameter not allowed: {}'.format(qp))
[ "def", "validate_query_params", "(", "self", ",", "request", ")", ":", "# TODO: For jsonapi error object conformance, must set jsonapi errors \"parameter\" for", "# the ValidationError. This requires extending DRF/DJA Exceptions.", "for", "qp", "in", "request", ".", "query_params", ".", "keys", "(", ")", ":", "if", "not", "self", ".", "query_regex", ".", "match", "(", "qp", ")", ":", "raise", "ValidationError", "(", "'invalid query parameter: {}'", ".", "format", "(", "qp", ")", ")", "if", "len", "(", "request", ".", "query_params", ".", "getlist", "(", "qp", ")", ")", ">", "1", ":", "raise", "ValidationError", "(", "'repeated query parameter not allowed: {}'", ".", "format", "(", "qp", ")", ")" ]
Validate that query params are in the list of valid query keywords in :py:attr:`query_regex` :raises ValidationError: if not.
[ "Validate", "that", "query", "params", "are", "in", "the", "list", "of", "valid", "query", "keywords", "in", ":", "py", ":", "attr", ":", "query_regex" ]
python
train
47.8
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_from_mos_library.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_from_mos_library.py#L55-L410
def rectwv_coeff_from_mos_library(reduced_image, master_rectwv, ignore_dtu_configuration=True, debugplot=0): """Evaluate rect.+wavecal. coefficients from MOS library Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. master_rectwv : MasterRectWave instance Rectification and Wavelength Calibrartion Library product. Contains the library of polynomial coefficients necessary to generate an instance of RectWaveCoeff with the rectification and wavelength calibration coefficients for the particular CSU configuration. ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. """ logger = logging.getLogger(__name__) logger.info('Computing expected RectWaveCoeff from CSU configuration') # header header = reduced_image[0].header # read the CSU configuration from the image header csu_conf = CsuConfiguration.define_from_header(header) # read the DTU configuration from the image header dtu_conf = DtuConfiguration.define_from_header(header) # retrieve DTU configuration from MasterRectWave object dtu_conf_calib = DtuConfiguration.define_from_dictionary( master_rectwv.meta_info['dtu_configuration'] ) # check that the DTU configuration employed to obtain the calibration # corresponds to the DTU configuration in the input FITS file if dtu_conf != dtu_conf_calib: if ignore_dtu_configuration: logger.warning('DTU configuration differences found!') else: logger.info('DTU configuration from image header:') logger.info(dtu_conf) logger.info('DTU configuration from master calibration:') logger.info(dtu_conf_calib) raise ValueError("DTU configurations do not match!") else: logger.info('DTU configuration match!') # check grism and filter filter_name = header['filter'] logger.debug('Filter: ' + filter_name) if filter_name != master_rectwv.tags['filter']: raise ValueError('Filter name does not match!') grism_name = header['grism'] logger.debug('Grism: ' + grism_name) if grism_name != master_rectwv.tags['grism']: raise ValueError('Grism name does not match!') # valid slitlet numbers list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in master_rectwv.missing_slitlets: list_valid_islitlets.remove(idel) logger.debug('valid slitlet numbers: ' + str(list_valid_islitlets)) # initialize intermediate dictionary with relevant information # (note: this dictionary corresponds to an old structure employed to # store the information in a JSON file; this is no longer necessary, # but here we reuse that dictionary for convenience) outdict = {} outdict['instrument'] = 'EMIR' outdict['meta_info'] = {} outdict['meta_info']['creation_date'] = datetime.now().isoformat() outdict['meta_info']['description'] = \ 'computation of rectification and wavelength calibration polynomial ' \ 'coefficients for a particular CSU configuration from a MOS model ' outdict['meta_info']['recipe_name'] = 'undefined' outdict['meta_info']['origin'] = {} outdict['meta_info']['origin']['fits_frame_uuid'] = 'TBD' outdict['meta_info']['origin']['rect_wpoly_mos_uuid'] = \ master_rectwv.uuid outdict['meta_info']['origin']['fitted_boundary_param_uuid'] = \ master_rectwv.meta_info['origin']['bound_param'] outdict['tags'] = {} outdict['tags']['grism'] = grism_name outdict['tags']['filter'] = filter_name outdict['dtu_configuration'] = dtu_conf.outdict() outdict['uuid'] = str(uuid4()) outdict['contents'] = {} # compute rectification and wavelength calibration coefficients for each # slitlet according to its csu_bar_slit_center value for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # input data structure tmpdict = master_rectwv.contents[islitlet - 1] list_csu_bar_slit_center = tmpdict['list_csu_bar_slit_center'] # check extrapolations if csu_bar_slit_center < min(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('minimum tabulated value: ' + str(min(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) if csu_bar_slit_center > max(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('maximum tabulated value: ' + str(max(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) # rectification coefficients ttd_order = tmpdict['ttd_order'] ncoef = ncoef_fmap(ttd_order) outdict['contents'][cslitlet] = {} outdict['contents'][cslitlet]['ttd_order'] = ttd_order outdict['contents'][cslitlet]['ttd_order_longslit_model'] = None for keycoef in ['ttd_aij', 'ttd_bij', 'tti_aij', 'tti_bij']: coef_out = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_' + keycoef + '_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) coef_out.append(dum[0]) outdict['contents'][cslitlet][keycoef] = coef_out outdict['contents'][cslitlet][keycoef + '_longslit_model'] = None # wavelength calibration coefficients ncoef = tmpdict['wpoly_degree'] + 1 wpoly_coeff = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_wpoly_coeff_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) wpoly_coeff.append(dum[0]) outdict['contents'][cslitlet]['wpoly_coeff'] = wpoly_coeff outdict['contents'][cslitlet]['wpoly_coeff_longslit_model'] = None # update cdelt1_linear and crval1_linear wpoly_function = np.polynomial.Polynomial(wpoly_coeff) crmin1_linear = wpoly_function(1) crmax1_linear = wpoly_function(EMIR_NAXIS1) cdelt1_linear = (crmax1_linear - crmin1_linear) / (EMIR_NAXIS1 - 1) crval1_linear = crmin1_linear outdict['contents'][cslitlet]['crval1_linear'] = crval1_linear outdict['contents'][cslitlet]['cdelt1_linear'] = cdelt1_linear # update CSU keywords outdict['contents'][cslitlet]['csu_bar_left'] = \ csu_conf.csu_bar_left(islitlet) outdict['contents'][cslitlet]['csu_bar_right'] = \ csu_conf.csu_bar_right(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_center'] = \ csu_conf.csu_bar_slit_center(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_width'] = \ csu_conf.csu_bar_slit_width(islitlet) # for each slitlet compute spectrum trails and frontiers using the # fitted boundary parameters fitted_bound_param_json = { 'contents': master_rectwv.meta_info['refined_boundary_model'] } parmodel = fitted_bound_param_json['contents']['parmodel'] fitted_bound_param_json.update({'meta_info': {'parmodel': parmodel}}) params = bound_params_from_dict(fitted_bound_param_json) if abs(debugplot) >= 10: logger.debug('Fitted boundary parameters:') logger.debug(params.pretty_print()) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # compute and store x0_reference value x0_reference = float(EMIR_NAXIS1) / 2.0 + 0.5 outdict['contents'][cslitlet]['x0_reference'] = x0_reference # compute spectrum trails (lower, middle and upper) list_spectrails = expected_distorted_boundaries( islitlet, csu_bar_slit_center, [0, 0.5, 1], params, parmodel, numpts=101, deg=5, debugplot=0 ) # store spectrails in output JSON file outdict['contents'][cslitlet]['spectrail'] = {} for idum, cdum in zip(range(3), ['lower', 'middle', 'upper']): outdict['contents'][cslitlet]['spectrail']['poly_coef_' + cdum] = \ list_spectrails[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_reference_' + cdum] = \ list_spectrails[idum].poly_funct(x0_reference) # compute frontiers (lower, upper) list_frontiers = expected_distorted_frontiers( islitlet, csu_bar_slit_center, params, parmodel, numpts=101, deg=5, debugplot=0 ) # store frontiers in output JSON outdict['contents'][cslitlet]['frontier'] = {} for idum, cdum in zip(range(2), ['lower', 'upper']): outdict['contents'][cslitlet]['frontier']['poly_coef_' + cdum] = \ list_frontiers[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_frontier_' + cdum] = \ list_frontiers[idum].poly_funct(x0_reference) # store bounding box parameters for each slitlet xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # parameters already available in the input JSON file for par in ['bb_nc1_orig', 'bb_nc2_orig', 'ymargin_bb']: outdict['contents'][cslitlet][par] = \ master_rectwv.contents[islitlet - 1][par] # estimate bb_ns1_orig and bb_ns2_orig using the already computed # frontiers and the value of ymargin_bb, following the same approach # employed in Slitlet2dArc.__init__() poly_lower_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_lower'] ) poly_upper_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_upper'] ) ylower = poly_lower_frontier(xdum) yupper = poly_upper_frontier(xdum) ymargin_bb = master_rectwv.contents[islitlet - 1]['ymargin_bb'] bb_ns1_orig = int(ylower.min() + 0.5) - ymargin_bb if bb_ns1_orig < 1: bb_ns1_orig = 1 bb_ns2_orig = int(yupper.max() + 0.5) + ymargin_bb if bb_ns2_orig > EMIR_NAXIS2: bb_ns2_orig = EMIR_NAXIS2 outdict['contents'][cslitlet]['bb_ns1_orig'] = bb_ns1_orig outdict['contents'][cslitlet]['bb_ns2_orig'] = bb_ns2_orig # additional parameters (see Slitlet2dArc.__init__) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # define expected frontier ordinates at x0_reference for the rectified # image imposing the vertical length of the slitlet to be constant # and equal to EMIR_NPIXPERSLIT_RECTIFIED outdict['contents'][cslitlet]['y0_frontier_lower_expected'] = \ expected_y0_lower_frontier(islitlet) outdict['contents'][cslitlet]['y0_frontier_upper_expected'] = \ expected_y0_upper_frontier(islitlet) # compute linear transformation to place the rectified slitlet at # the center of the current slitlet bounding box tmpdict = outdict['contents'][cslitlet] xdum1 = tmpdict['y0_frontier_lower'] ydum1 = tmpdict['y0_frontier_lower_expected'] xdum2 = tmpdict['y0_frontier_upper'] ydum2 = tmpdict['y0_frontier_upper_expected'] corr_yrect_b = (ydum2 - ydum1) / (xdum2 - xdum1) corr_yrect_a = ydum1 - corr_yrect_b * xdum1 # compute expected location of rectified boundaries y0_reference_lower_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_lower'] y0_reference_middle_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_middle'] y0_reference_upper_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_upper'] # shift transformation to center the rectified slitlet within the # slitlet bounding box ydummid = (ydum1 + ydum2) / 2 ioffset = int( ydummid - (tmpdict['bb_ns1_orig'] + tmpdict['bb_ns2_orig']) / 2.0) corr_yrect_a -= ioffset # minimum and maximum row in the rectified slitlet encompassing # EMIR_NPIXPERSLIT_RECTIFIED pixels # a) scan number (in pixels, from 1 to NAXIS2) xdum1 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_lower'] xdum2 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_upper'] # b) row number (starting from zero) min_row_rectified = \ int((round(xdum1 * 10) + 5) / 10) - tmpdict['bb_ns1_orig'] max_row_rectified = \ int((round(xdum2 * 10) - 5) / 10) - tmpdict['bb_ns1_orig'] # save previous results in outdict outdict['contents'][cslitlet]['y0_reference_lower_expected'] = \ y0_reference_lower_expected outdict['contents'][cslitlet]['y0_reference_middle_expected'] = \ y0_reference_middle_expected outdict['contents'][cslitlet]['y0_reference_upper_expected'] = \ y0_reference_upper_expected outdict['contents'][cslitlet]['corr_yrect_a'] = corr_yrect_a outdict['contents'][cslitlet]['corr_yrect_b'] = corr_yrect_b outdict['contents'][cslitlet]['min_row_rectified'] = min_row_rectified outdict['contents'][cslitlet]['max_row_rectified'] = max_row_rectified # --- # Create object of type RectWaveCoeff with coefficients for # rectification and wavelength calibration rectwv_coeff = RectWaveCoeff(instrument='EMIR') rectwv_coeff.quality_control = numina.types.qc.QC.GOOD rectwv_coeff.tags['grism'] = grism_name rectwv_coeff.tags['filter'] = filter_name rectwv_coeff.meta_info['origin']['bound_param'] = \ master_rectwv.meta_info['origin']['bound_param'] rectwv_coeff.meta_info['origin']['master_rectwv'] = \ 'uuid' + master_rectwv.uuid rectwv_coeff.meta_info['dtu_configuration'] = outdict['dtu_configuration'] rectwv_coeff.total_slitlets = EMIR_NBARS for i in range(EMIR_NBARS): islitlet = i + 1 dumdict = {'islitlet': islitlet} cslitlet = 'slitlet' + str(islitlet).zfill(2) if cslitlet in outdict['contents']: dumdict.update(outdict['contents'][cslitlet]) else: dumdict.update({ 'csu_bar_left': csu_conf.csu_bar_left(islitlet), 'csu_bar_right': csu_conf.csu_bar_right(islitlet), 'csu_bar_slit_center': csu_conf.csu_bar_slit_center(islitlet), 'csu_bar_slit_width': csu_conf.csu_bar_slit_width(islitlet), 'x0_reference': float(EMIR_NAXIS1) / 2.0 + 0.5, 'y0_frontier_lower_expected': expected_y0_lower_frontier(islitlet), 'y0_frontier_upper_expected': expected_y0_upper_frontier(islitlet) }) rectwv_coeff.missing_slitlets.append(islitlet) rectwv_coeff.contents.append(dumdict) # debugging __getstate__ and __setstate__ # rectwv_coeff.writeto(args.out_rect_wpoly.name) # print('>>> Saving file ' + args.out_rect_wpoly.name) # check_setstate_getstate(rectwv_coeff, args.out_rect_wpoly.name) logger.info('Generating RectWaveCoeff object with uuid=' + rectwv_coeff.uuid) return rectwv_coeff
[ "def", "rectwv_coeff_from_mos_library", "(", "reduced_image", ",", "master_rectwv", ",", "ignore_dtu_configuration", "=", "True", ",", "debugplot", "=", "0", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "info", "(", "'Computing expected RectWaveCoeff from CSU configuration'", ")", "# header", "header", "=", "reduced_image", "[", "0", "]", ".", "header", "# read the CSU configuration from the image header", "csu_conf", "=", "CsuConfiguration", ".", "define_from_header", "(", "header", ")", "# read the DTU configuration from the image header", "dtu_conf", "=", "DtuConfiguration", ".", "define_from_header", "(", "header", ")", "# retrieve DTU configuration from MasterRectWave object", "dtu_conf_calib", "=", "DtuConfiguration", ".", "define_from_dictionary", "(", "master_rectwv", ".", "meta_info", "[", "'dtu_configuration'", "]", ")", "# check that the DTU configuration employed to obtain the calibration", "# corresponds to the DTU configuration in the input FITS file", "if", "dtu_conf", "!=", "dtu_conf_calib", ":", "if", "ignore_dtu_configuration", ":", "logger", ".", "warning", "(", "'DTU configuration differences found!'", ")", "else", ":", "logger", ".", "info", "(", "'DTU configuration from image header:'", ")", "logger", ".", "info", "(", "dtu_conf", ")", "logger", ".", "info", "(", "'DTU configuration from master calibration:'", ")", "logger", ".", "info", "(", "dtu_conf_calib", ")", "raise", "ValueError", "(", "\"DTU configurations do not match!\"", ")", "else", ":", "logger", ".", "info", "(", "'DTU configuration match!'", ")", "# check grism and filter", "filter_name", "=", "header", "[", "'filter'", "]", "logger", ".", "debug", "(", "'Filter: '", "+", "filter_name", ")", "if", "filter_name", "!=", "master_rectwv", ".", "tags", "[", "'filter'", "]", ":", "raise", "ValueError", "(", "'Filter name does not match!'", ")", "grism_name", "=", "header", "[", "'grism'", "]", "logger", ".", "debug", "(", "'Grism: '", "+", "grism_name", ")", "if", "grism_name", "!=", "master_rectwv", ".", "tags", "[", "'grism'", "]", ":", "raise", "ValueError", "(", "'Grism name does not match!'", ")", "# valid slitlet numbers", "list_valid_islitlets", "=", "list", "(", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ")", "for", "idel", "in", "master_rectwv", ".", "missing_slitlets", ":", "list_valid_islitlets", ".", "remove", "(", "idel", ")", "logger", ".", "debug", "(", "'valid slitlet numbers: '", "+", "str", "(", "list_valid_islitlets", ")", ")", "# initialize intermediate dictionary with relevant information", "# (note: this dictionary corresponds to an old structure employed to", "# store the information in a JSON file; this is no longer necessary,", "# but here we reuse that dictionary for convenience)", "outdict", "=", "{", "}", "outdict", "[", "'instrument'", "]", "=", "'EMIR'", "outdict", "[", "'meta_info'", "]", "=", "{", "}", "outdict", "[", "'meta_info'", "]", "[", "'creation_date'", "]", "=", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "outdict", "[", "'meta_info'", "]", "[", "'description'", "]", "=", "'computation of rectification and wavelength calibration polynomial '", "'coefficients for a particular CSU configuration from a MOS model '", "outdict", "[", "'meta_info'", "]", "[", "'recipe_name'", "]", "=", "'undefined'", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "=", "{", "}", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "[", "'fits_frame_uuid'", "]", "=", "'TBD'", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "[", "'rect_wpoly_mos_uuid'", "]", "=", "master_rectwv", ".", "uuid", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "[", "'fitted_boundary_param_uuid'", "]", "=", "master_rectwv", ".", "meta_info", "[", "'origin'", "]", "[", "'bound_param'", "]", "outdict", "[", "'tags'", "]", "=", "{", "}", "outdict", "[", "'tags'", "]", "[", "'grism'", "]", "=", "grism_name", "outdict", "[", "'tags'", "]", "[", "'filter'", "]", "=", "filter_name", "outdict", "[", "'dtu_configuration'", "]", "=", "dtu_conf", ".", "outdict", "(", ")", "outdict", "[", "'uuid'", "]", "=", "str", "(", "uuid4", "(", ")", ")", "outdict", "[", "'contents'", "]", "=", "{", "}", "# compute rectification and wavelength calibration coefficients for each", "# slitlet according to its csu_bar_slit_center value", "for", "islitlet", "in", "list_valid_islitlets", ":", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "# csu_bar_slit_center of current slitlet in initial FITS image", "csu_bar_slit_center", "=", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", "# input data structure", "tmpdict", "=", "master_rectwv", ".", "contents", "[", "islitlet", "-", "1", "]", "list_csu_bar_slit_center", "=", "tmpdict", "[", "'list_csu_bar_slit_center'", "]", "# check extrapolations", "if", "csu_bar_slit_center", "<", "min", "(", "list_csu_bar_slit_center", ")", ":", "logger", ".", "warning", "(", "'extrapolating table with '", "+", "cslitlet", ")", "logger", ".", "warning", "(", "'minimum tabulated value: '", "+", "str", "(", "min", "(", "list_csu_bar_slit_center", ")", ")", ")", "logger", ".", "warning", "(", "'sought value...........: '", "+", "str", "(", "csu_bar_slit_center", ")", ")", "if", "csu_bar_slit_center", ">", "max", "(", "list_csu_bar_slit_center", ")", ":", "logger", ".", "warning", "(", "'extrapolating table with '", "+", "cslitlet", ")", "logger", ".", "warning", "(", "'maximum tabulated value: '", "+", "str", "(", "max", "(", "list_csu_bar_slit_center", ")", ")", ")", "logger", ".", "warning", "(", "'sought value...........: '", "+", "str", "(", "csu_bar_slit_center", ")", ")", "# rectification coefficients", "ttd_order", "=", "tmpdict", "[", "'ttd_order'", "]", "ncoef", "=", "ncoef_fmap", "(", "ttd_order", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "=", "{", "}", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'ttd_order'", "]", "=", "ttd_order", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'ttd_order_longslit_model'", "]", "=", "None", "for", "keycoef", "in", "[", "'ttd_aij'", ",", "'ttd_bij'", ",", "'tti_aij'", ",", "'tti_bij'", "]", ":", "coef_out", "=", "[", "]", "for", "icoef", "in", "range", "(", "ncoef", ")", ":", "ccoef", "=", "str", "(", "icoef", ")", ".", "zfill", "(", "2", ")", "list_cij", "=", "tmpdict", "[", "'list_'", "+", "keycoef", "+", "'_'", "+", "ccoef", "]", "funinterp_coef", "=", "interp1d", "(", "list_csu_bar_slit_center", ",", "list_cij", ",", "kind", "=", "'linear'", ",", "fill_value", "=", "'extrapolate'", ")", "# note: funinterp_coef expects a numpy array", "dum", "=", "funinterp_coef", "(", "[", "csu_bar_slit_center", "]", ")", "coef_out", ".", "append", "(", "dum", "[", "0", "]", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "keycoef", "]", "=", "coef_out", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "keycoef", "+", "'_longslit_model'", "]", "=", "None", "# wavelength calibration coefficients", "ncoef", "=", "tmpdict", "[", "'wpoly_degree'", "]", "+", "1", "wpoly_coeff", "=", "[", "]", "for", "icoef", "in", "range", "(", "ncoef", ")", ":", "ccoef", "=", "str", "(", "icoef", ")", ".", "zfill", "(", "2", ")", "list_cij", "=", "tmpdict", "[", "'list_wpoly_coeff_'", "+", "ccoef", "]", "funinterp_coef", "=", "interp1d", "(", "list_csu_bar_slit_center", ",", "list_cij", ",", "kind", "=", "'linear'", ",", "fill_value", "=", "'extrapolate'", ")", "# note: funinterp_coef expects a numpy array", "dum", "=", "funinterp_coef", "(", "[", "csu_bar_slit_center", "]", ")", "wpoly_coeff", ".", "append", "(", "dum", "[", "0", "]", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'wpoly_coeff'", "]", "=", "wpoly_coeff", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'wpoly_coeff_longslit_model'", "]", "=", "None", "# update cdelt1_linear and crval1_linear", "wpoly_function", "=", "np", ".", "polynomial", ".", "Polynomial", "(", "wpoly_coeff", ")", "crmin1_linear", "=", "wpoly_function", "(", "1", ")", "crmax1_linear", "=", "wpoly_function", "(", "EMIR_NAXIS1", ")", "cdelt1_linear", "=", "(", "crmax1_linear", "-", "crmin1_linear", ")", "/", "(", "EMIR_NAXIS1", "-", "1", ")", "crval1_linear", "=", "crmin1_linear", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'crval1_linear'", "]", "=", "crval1_linear", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'cdelt1_linear'", "]", "=", "cdelt1_linear", "# update CSU keywords", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'csu_bar_left'", "]", "=", "csu_conf", ".", "csu_bar_left", "(", "islitlet", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'csu_bar_right'", "]", "=", "csu_conf", ".", "csu_bar_right", "(", "islitlet", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'csu_bar_slit_center'", "]", "=", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'csu_bar_slit_width'", "]", "=", "csu_conf", ".", "csu_bar_slit_width", "(", "islitlet", ")", "# for each slitlet compute spectrum trails and frontiers using the", "# fitted boundary parameters", "fitted_bound_param_json", "=", "{", "'contents'", ":", "master_rectwv", ".", "meta_info", "[", "'refined_boundary_model'", "]", "}", "parmodel", "=", "fitted_bound_param_json", "[", "'contents'", "]", "[", "'parmodel'", "]", "fitted_bound_param_json", ".", "update", "(", "{", "'meta_info'", ":", "{", "'parmodel'", ":", "parmodel", "}", "}", ")", "params", "=", "bound_params_from_dict", "(", "fitted_bound_param_json", ")", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "logger", ".", "debug", "(", "'Fitted boundary parameters:'", ")", "logger", ".", "debug", "(", "params", ".", "pretty_print", "(", ")", ")", "for", "islitlet", "in", "list_valid_islitlets", ":", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "# csu_bar_slit_center of current slitlet in initial FITS image", "csu_bar_slit_center", "=", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", "# compute and store x0_reference value", "x0_reference", "=", "float", "(", "EMIR_NAXIS1", ")", "/", "2.0", "+", "0.5", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'x0_reference'", "]", "=", "x0_reference", "# compute spectrum trails (lower, middle and upper)", "list_spectrails", "=", "expected_distorted_boundaries", "(", "islitlet", ",", "csu_bar_slit_center", ",", "[", "0", ",", "0.5", ",", "1", "]", ",", "params", ",", "parmodel", ",", "numpts", "=", "101", ",", "deg", "=", "5", ",", "debugplot", "=", "0", ")", "# store spectrails in output JSON file", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'spectrail'", "]", "=", "{", "}", "for", "idum", ",", "cdum", "in", "zip", "(", "range", "(", "3", ")", ",", "[", "'lower'", ",", "'middle'", ",", "'upper'", "]", ")", ":", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'spectrail'", "]", "[", "'poly_coef_'", "+", "cdum", "]", "=", "list_spectrails", "[", "idum", "]", ".", "poly_funct", ".", "coef", ".", "tolist", "(", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_reference_'", "+", "cdum", "]", "=", "list_spectrails", "[", "idum", "]", ".", "poly_funct", "(", "x0_reference", ")", "# compute frontiers (lower, upper)", "list_frontiers", "=", "expected_distorted_frontiers", "(", "islitlet", ",", "csu_bar_slit_center", ",", "params", ",", "parmodel", ",", "numpts", "=", "101", ",", "deg", "=", "5", ",", "debugplot", "=", "0", ")", "# store frontiers in output JSON", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'frontier'", "]", "=", "{", "}", "for", "idum", ",", "cdum", "in", "zip", "(", "range", "(", "2", ")", ",", "[", "'lower'", ",", "'upper'", "]", ")", ":", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'frontier'", "]", "[", "'poly_coef_'", "+", "cdum", "]", "=", "list_frontiers", "[", "idum", "]", ".", "poly_funct", ".", "coef", ".", "tolist", "(", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_frontier_'", "+", "cdum", "]", "=", "list_frontiers", "[", "idum", "]", ".", "poly_funct", "(", "x0_reference", ")", "# store bounding box parameters for each slitlet", "xdum", "=", "np", ".", "linspace", "(", "1", ",", "EMIR_NAXIS1", ",", "num", "=", "EMIR_NAXIS1", ")", "for", "islitlet", "in", "list_valid_islitlets", ":", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "# parameters already available in the input JSON file", "for", "par", "in", "[", "'bb_nc1_orig'", ",", "'bb_nc2_orig'", ",", "'ymargin_bb'", "]", ":", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "par", "]", "=", "master_rectwv", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "par", "]", "# estimate bb_ns1_orig and bb_ns2_orig using the already computed", "# frontiers and the value of ymargin_bb, following the same approach", "# employed in Slitlet2dArc.__init__()", "poly_lower_frontier", "=", "np", ".", "polynomial", ".", "Polynomial", "(", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'frontier'", "]", "[", "'poly_coef_lower'", "]", ")", "poly_upper_frontier", "=", "np", ".", "polynomial", ".", "Polynomial", "(", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'frontier'", "]", "[", "'poly_coef_upper'", "]", ")", "ylower", "=", "poly_lower_frontier", "(", "xdum", ")", "yupper", "=", "poly_upper_frontier", "(", "xdum", ")", "ymargin_bb", "=", "master_rectwv", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "'ymargin_bb'", "]", "bb_ns1_orig", "=", "int", "(", "ylower", ".", "min", "(", ")", "+", "0.5", ")", "-", "ymargin_bb", "if", "bb_ns1_orig", "<", "1", ":", "bb_ns1_orig", "=", "1", "bb_ns2_orig", "=", "int", "(", "yupper", ".", "max", "(", ")", "+", "0.5", ")", "+", "ymargin_bb", "if", "bb_ns2_orig", ">", "EMIR_NAXIS2", ":", "bb_ns2_orig", "=", "EMIR_NAXIS2", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'bb_ns1_orig'", "]", "=", "bb_ns1_orig", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'bb_ns2_orig'", "]", "=", "bb_ns2_orig", "# additional parameters (see Slitlet2dArc.__init__)", "for", "islitlet", "in", "list_valid_islitlets", ":", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "# define expected frontier ordinates at x0_reference for the rectified", "# image imposing the vertical length of the slitlet to be constant", "# and equal to EMIR_NPIXPERSLIT_RECTIFIED", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_frontier_lower_expected'", "]", "=", "expected_y0_lower_frontier", "(", "islitlet", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_frontier_upper_expected'", "]", "=", "expected_y0_upper_frontier", "(", "islitlet", ")", "# compute linear transformation to place the rectified slitlet at", "# the center of the current slitlet bounding box", "tmpdict", "=", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "xdum1", "=", "tmpdict", "[", "'y0_frontier_lower'", "]", "ydum1", "=", "tmpdict", "[", "'y0_frontier_lower_expected'", "]", "xdum2", "=", "tmpdict", "[", "'y0_frontier_upper'", "]", "ydum2", "=", "tmpdict", "[", "'y0_frontier_upper_expected'", "]", "corr_yrect_b", "=", "(", "ydum2", "-", "ydum1", ")", "/", "(", "xdum2", "-", "xdum1", ")", "corr_yrect_a", "=", "ydum1", "-", "corr_yrect_b", "*", "xdum1", "# compute expected location of rectified boundaries", "y0_reference_lower_expected", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_reference_lower'", "]", "y0_reference_middle_expected", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_reference_middle'", "]", "y0_reference_upper_expected", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_reference_upper'", "]", "# shift transformation to center the rectified slitlet within the", "# slitlet bounding box", "ydummid", "=", "(", "ydum1", "+", "ydum2", ")", "/", "2", "ioffset", "=", "int", "(", "ydummid", "-", "(", "tmpdict", "[", "'bb_ns1_orig'", "]", "+", "tmpdict", "[", "'bb_ns2_orig'", "]", ")", "/", "2.0", ")", "corr_yrect_a", "-=", "ioffset", "# minimum and maximum row in the rectified slitlet encompassing", "# EMIR_NPIXPERSLIT_RECTIFIED pixels", "# a) scan number (in pixels, from 1 to NAXIS2)", "xdum1", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_frontier_lower'", "]", "xdum2", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_frontier_upper'", "]", "# b) row number (starting from zero)", "min_row_rectified", "=", "int", "(", "(", "round", "(", "xdum1", "*", "10", ")", "+", "5", ")", "/", "10", ")", "-", "tmpdict", "[", "'bb_ns1_orig'", "]", "max_row_rectified", "=", "int", "(", "(", "round", "(", "xdum2", "*", "10", ")", "-", "5", ")", "/", "10", ")", "-", "tmpdict", "[", "'bb_ns1_orig'", "]", "# save previous results in outdict", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_reference_lower_expected'", "]", "=", "y0_reference_lower_expected", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_reference_middle_expected'", "]", "=", "y0_reference_middle_expected", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_reference_upper_expected'", "]", "=", "y0_reference_upper_expected", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'corr_yrect_a'", "]", "=", "corr_yrect_a", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'corr_yrect_b'", "]", "=", "corr_yrect_b", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'min_row_rectified'", "]", "=", "min_row_rectified", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'max_row_rectified'", "]", "=", "max_row_rectified", "# ---", "# Create object of type RectWaveCoeff with coefficients for", "# rectification and wavelength calibration", "rectwv_coeff", "=", "RectWaveCoeff", "(", "instrument", "=", "'EMIR'", ")", "rectwv_coeff", ".", "quality_control", "=", "numina", ".", "types", ".", "qc", ".", "QC", ".", "GOOD", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", "=", "grism_name", "rectwv_coeff", ".", "tags", "[", "'filter'", "]", "=", "filter_name", "rectwv_coeff", ".", "meta_info", "[", "'origin'", "]", "[", "'bound_param'", "]", "=", "master_rectwv", ".", "meta_info", "[", "'origin'", "]", "[", "'bound_param'", "]", "rectwv_coeff", ".", "meta_info", "[", "'origin'", "]", "[", "'master_rectwv'", "]", "=", "'uuid'", "+", "master_rectwv", ".", "uuid", "rectwv_coeff", ".", "meta_info", "[", "'dtu_configuration'", "]", "=", "outdict", "[", "'dtu_configuration'", "]", "rectwv_coeff", ".", "total_slitlets", "=", "EMIR_NBARS", "for", "i", "in", "range", "(", "EMIR_NBARS", ")", ":", "islitlet", "=", "i", "+", "1", "dumdict", "=", "{", "'islitlet'", ":", "islitlet", "}", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "if", "cslitlet", "in", "outdict", "[", "'contents'", "]", ":", "dumdict", ".", "update", "(", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", ")", "else", ":", "dumdict", ".", "update", "(", "{", "'csu_bar_left'", ":", "csu_conf", ".", "csu_bar_left", "(", "islitlet", ")", ",", "'csu_bar_right'", ":", "csu_conf", ".", "csu_bar_right", "(", "islitlet", ")", ",", "'csu_bar_slit_center'", ":", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", ",", "'csu_bar_slit_width'", ":", "csu_conf", ".", "csu_bar_slit_width", "(", "islitlet", ")", ",", "'x0_reference'", ":", "float", "(", "EMIR_NAXIS1", ")", "/", "2.0", "+", "0.5", ",", "'y0_frontier_lower_expected'", ":", "expected_y0_lower_frontier", "(", "islitlet", ")", ",", "'y0_frontier_upper_expected'", ":", "expected_y0_upper_frontier", "(", "islitlet", ")", "}", ")", "rectwv_coeff", ".", "missing_slitlets", ".", "append", "(", "islitlet", ")", "rectwv_coeff", ".", "contents", ".", "append", "(", "dumdict", ")", "# debugging __getstate__ and __setstate__", "# rectwv_coeff.writeto(args.out_rect_wpoly.name)", "# print('>>> Saving file ' + args.out_rect_wpoly.name)", "# check_setstate_getstate(rectwv_coeff, args.out_rect_wpoly.name)", "logger", ".", "info", "(", "'Generating RectWaveCoeff object with uuid='", "+", "rectwv_coeff", ".", "uuid", ")", "return", "rectwv_coeff" ]
Evaluate rect.+wavecal. coefficients from MOS library Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. master_rectwv : MasterRectWave instance Rectification and Wavelength Calibrartion Library product. Contains the library of polynomial coefficients necessary to generate an instance of RectWaveCoeff with the rectification and wavelength calibration coefficients for the particular CSU configuration. ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration.
[ "Evaluate", "rect", ".", "+", "wavecal", ".", "coefficients", "from", "MOS", "library" ]
python
train
47.075843
saltstack/salt
salt/modules/hadoop.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/hadoop.py#L30-L57
def _hadoop_cmd(module, command, *args): ''' Hadoop/hdfs command wrapper As Hadoop command has been deprecated this module will default to use hdfs command and fall back to hadoop if it is not found In order to prevent random execution the module name is checked Follows hadoop command template: hadoop module -command args E.g.: hadoop dfs -ls / ''' tool = 'hadoop' if salt.utils.path.which('hdfs'): tool = 'hdfs' out = None if module and command: if module in __authorized_modules__: mappings = {'tool': tool, 'module': module, 'command': command, 'args': ' '.join(args)} cmd = '{tool} {module} -{command} {args}'.format(**mappings) out = __salt__['cmd.run'](cmd, python_shell=False) else: return 'Error: Unknown module' else: return 'Error: Module and command not defined' return out
[ "def", "_hadoop_cmd", "(", "module", ",", "command", ",", "*", "args", ")", ":", "tool", "=", "'hadoop'", "if", "salt", ".", "utils", ".", "path", ".", "which", "(", "'hdfs'", ")", ":", "tool", "=", "'hdfs'", "out", "=", "None", "if", "module", "and", "command", ":", "if", "module", "in", "__authorized_modules__", ":", "mappings", "=", "{", "'tool'", ":", "tool", ",", "'module'", ":", "module", ",", "'command'", ":", "command", ",", "'args'", ":", "' '", ".", "join", "(", "args", ")", "}", "cmd", "=", "'{tool} {module} -{command} {args}'", ".", "format", "(", "*", "*", "mappings", ")", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "else", ":", "return", "'Error: Unknown module'", "else", ":", "return", "'Error: Module and command not defined'", "return", "out" ]
Hadoop/hdfs command wrapper As Hadoop command has been deprecated this module will default to use hdfs command and fall back to hadoop if it is not found In order to prevent random execution the module name is checked Follows hadoop command template: hadoop module -command args E.g.: hadoop dfs -ls /
[ "Hadoop", "/", "hdfs", "command", "wrapper" ]
python
train
33.107143
spyder-ide/conda-manager
conda_manager/api/conda_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/conda_api.py#L1094-L1108
def _pip_search(stdout, stderr): """Callback for pip search.""" result = {} lines = to_text_string(stdout).split('\n') while '' in lines: lines.remove('') for line in lines: if ' - ' in line: parts = line.split(' - ') name = parts[0].strip() description = parts[1].strip() result[name] = description return result
[ "def", "_pip_search", "(", "stdout", ",", "stderr", ")", ":", "result", "=", "{", "}", "lines", "=", "to_text_string", "(", "stdout", ")", ".", "split", "(", "'\\n'", ")", "while", "''", "in", "lines", ":", "lines", ".", "remove", "(", "''", ")", "for", "line", "in", "lines", ":", "if", "' - '", "in", "line", ":", "parts", "=", "line", ".", "split", "(", "' - '", ")", "name", "=", "parts", "[", "0", "]", ".", "strip", "(", ")", "description", "=", "parts", "[", "1", "]", ".", "strip", "(", ")", "result", "[", "name", "]", "=", "description", "return", "result" ]
Callback for pip search.
[ "Callback", "for", "pip", "search", "." ]
python
train
29.133333
ulule/django-badgify
badgify/registry.py
https://github.com/ulule/django-badgify/blob/1bf233ffeb6293ee659454de7b3794682128b6ca/badgify/registry.py#L61-L69
def get_recipe_instance(self, badge): """ Returns the recipe instance for the given badge slug. If badge has not been registered, raises ``exceptions.BadgeNotFound``. """ from .exceptions import BadgeNotFound if badge in self._registry: return self.recipes[badge] raise BadgeNotFound()
[ "def", "get_recipe_instance", "(", "self", ",", "badge", ")", ":", "from", ".", "exceptions", "import", "BadgeNotFound", "if", "badge", "in", "self", ".", "_registry", ":", "return", "self", ".", "recipes", "[", "badge", "]", "raise", "BadgeNotFound", "(", ")" ]
Returns the recipe instance for the given badge slug. If badge has not been registered, raises ``exceptions.BadgeNotFound``.
[ "Returns", "the", "recipe", "instance", "for", "the", "given", "badge", "slug", ".", "If", "badge", "has", "not", "been", "registered", "raises", "exceptions", ".", "BadgeNotFound", "." ]
python
train
38.333333
jgorset/fandjango
fandjango/middleware.py
https://github.com/jgorset/fandjango/blob/01334a76c1d9f0629842aa6830678ae097756551/fandjango/middleware.py#L277-L297
def process_response(self, request, response): """ Set compact P3P policies and save auth token to cookie. P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most browsers it is considered by IE before accepting third-party cookies (ie. cookies set by documents in iframes). If they are not set correctly, IE will not set these cookies. """ if hasattr(request, "facebook") and request.facebook and request.facebook.oauth_token: if "code" in request.REQUEST: """ Remove auth related query params """ path = get_full_path(request, remove_querystrings=['code', 'web_canvas']) response = HttpResponseRedirect(path) response.set_cookie('oauth_token', request.facebook.oauth_token.token) else: response.delete_cookie('oauth_token') response['P3P'] = 'CP="IDC CURa ADMa OUR IND PHY ONL COM STA"' return response
[ "def", "process_response", "(", "self", ",", "request", ",", "response", ")", ":", "if", "hasattr", "(", "request", ",", "\"facebook\"", ")", "and", "request", ".", "facebook", "and", "request", ".", "facebook", ".", "oauth_token", ":", "if", "\"code\"", "in", "request", ".", "REQUEST", ":", "\"\"\" Remove auth related query params \"\"\"", "path", "=", "get_full_path", "(", "request", ",", "remove_querystrings", "=", "[", "'code'", ",", "'web_canvas'", "]", ")", "response", "=", "HttpResponseRedirect", "(", "path", ")", "response", ".", "set_cookie", "(", "'oauth_token'", ",", "request", ".", "facebook", ".", "oauth_token", ".", "token", ")", "else", ":", "response", ".", "delete_cookie", "(", "'oauth_token'", ")", "response", "[", "'P3P'", "]", "=", "'CP=\"IDC CURa ADMa OUR IND PHY ONL COM STA\"'", "return", "response" ]
Set compact P3P policies and save auth token to cookie. P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most browsers it is considered by IE before accepting third-party cookies (ie. cookies set by documents in iframes). If they are not set correctly, IE will not set these cookies.
[ "Set", "compact", "P3P", "policies", "and", "save", "auth", "token", "to", "cookie", "." ]
python
train
47.047619
jahuth/litus
spikes.py
https://github.com/jahuth/litus/blob/712b016ea2dbb1cf0a30bfdbb0a136945a7b7c5e/spikes.py#L145-L151
def find(self,cell_designation,cell_filter=lambda x,c: 'c' in x and x['c'] == c): """ finds spike containers in a multi spike containers collection """ res = [i for i,sc in enumerate(self.spike_containers) if cell_filter(sc.meta,cell_designation)] if len(res) > 0: return res[0]
[ "def", "find", "(", "self", ",", "cell_designation", ",", "cell_filter", "=", "lambda", "x", ",", "c", ":", "'c'", "in", "x", "and", "x", "[", "'c'", "]", "==", "c", ")", ":", "res", "=", "[", "i", "for", "i", ",", "sc", "in", "enumerate", "(", "self", ".", "spike_containers", ")", "if", "cell_filter", "(", "sc", ".", "meta", ",", "cell_designation", ")", "]", "if", "len", "(", "res", ")", ">", "0", ":", "return", "res", "[", "0", "]" ]
finds spike containers in a multi spike containers collection
[ "finds", "spike", "containers", "in", "a", "multi", "spike", "containers", "collection" ]
python
train
46.857143
christophertbrown/bioscripts
ctbBio/rRNA_insertions_gff.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L42-L76
def parse_orf(insertion, gff): """ parse ORF to gff format """ offset = insertion['offset'] if type(insertion['orf']) is not str: return gff for orf in parse_fasta(insertion['orf'].split('|')): ID = orf[0].split('>')[1].split()[0] Start, End, strand = [int(i) for i in orf[0].split(' # ')[1:4]] if strand == 1: strand = '+' else: strand = '-' GeneStrand = insertion['strand'] if strand != GeneStrand: if strand == '+': strand = '-' else: strand = '+' Start, End = End - 2, Start - 2 Start, End = abs(Start + offset) - 1, abs(End + offset) - 1 annot = orf[0].split()[1] if annot == 'n/a': annot = 'unknown' gff['#seqname'].append(insertion['ID']) gff['source'].append('Prodigal and Pfam') gff['feature'].append('CDS') gff['start'].append(Start) gff['end'].append(End) gff['score'].append('.') gff['strand'].append(strand) gff['frame'].append('.') gff['attribute'].append('ID=%s; Name=%s' % (ID, annot)) return gff
[ "def", "parse_orf", "(", "insertion", ",", "gff", ")", ":", "offset", "=", "insertion", "[", "'offset'", "]", "if", "type", "(", "insertion", "[", "'orf'", "]", ")", "is", "not", "str", ":", "return", "gff", "for", "orf", "in", "parse_fasta", "(", "insertion", "[", "'orf'", "]", ".", "split", "(", "'|'", ")", ")", ":", "ID", "=", "orf", "[", "0", "]", ".", "split", "(", "'>'", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", "Start", ",", "End", ",", "strand", "=", "[", "int", "(", "i", ")", "for", "i", "in", "orf", "[", "0", "]", ".", "split", "(", "' # '", ")", "[", "1", ":", "4", "]", "]", "if", "strand", "==", "1", ":", "strand", "=", "'+'", "else", ":", "strand", "=", "'-'", "GeneStrand", "=", "insertion", "[", "'strand'", "]", "if", "strand", "!=", "GeneStrand", ":", "if", "strand", "==", "'+'", ":", "strand", "=", "'-'", "else", ":", "strand", "=", "'+'", "Start", ",", "End", "=", "End", "-", "2", ",", "Start", "-", "2", "Start", ",", "End", "=", "abs", "(", "Start", "+", "offset", ")", "-", "1", ",", "abs", "(", "End", "+", "offset", ")", "-", "1", "annot", "=", "orf", "[", "0", "]", ".", "split", "(", ")", "[", "1", "]", "if", "annot", "==", "'n/a'", ":", "annot", "=", "'unknown'", "gff", "[", "'#seqname'", "]", ".", "append", "(", "insertion", "[", "'ID'", "]", ")", "gff", "[", "'source'", "]", ".", "append", "(", "'Prodigal and Pfam'", ")", "gff", "[", "'feature'", "]", ".", "append", "(", "'CDS'", ")", "gff", "[", "'start'", "]", ".", "append", "(", "Start", ")", "gff", "[", "'end'", "]", ".", "append", "(", "End", ")", "gff", "[", "'score'", "]", ".", "append", "(", "'.'", ")", "gff", "[", "'strand'", "]", ".", "append", "(", "strand", ")", "gff", "[", "'frame'", "]", ".", "append", "(", "'.'", ")", "gff", "[", "'attribute'", "]", ".", "append", "(", "'ID=%s; Name=%s'", "%", "(", "ID", ",", "annot", ")", ")", "return", "gff" ]
parse ORF to gff format
[ "parse", "ORF", "to", "gff", "format" ]
python
train
33.2
ThreatResponse/margaritashotgun
margaritashotgun/remote_shell.py
https://github.com/ThreatResponse/margaritashotgun/blob/6dee53ef267959b214953439968244cc46a19690/margaritashotgun/remote_shell.py#L160-L183
def execute_async(self, command, callback=None): """ Executes command on remote hosts without blocking :type command: str :param command: command to be run on remote host :type callback: function :param callback: function to call when execution completes """ try: logger.debug(('{0}: execute async "{1}"' 'with callback {2}'.format(self.target_address, command, callback))) future = self.executor.submit(self.execute, command) if callback is not None: future.add_done_callback(callback) return future except (AuthenticationException, SSHException, ChannelException, SocketError) as ex: logger.critical(("{0} execution failed on {1} with exception:" "{2}".format(command, self.target_address, ex))) raise SSHCommandError(self.target_address, command, ex)
[ "def", "execute_async", "(", "self", ",", "command", ",", "callback", "=", "None", ")", ":", "try", ":", "logger", ".", "debug", "(", "(", "'{0}: execute async \"{1}\"'", "'with callback {2}'", ".", "format", "(", "self", ".", "target_address", ",", "command", ",", "callback", ")", ")", ")", "future", "=", "self", ".", "executor", ".", "submit", "(", "self", ".", "execute", ",", "command", ")", "if", "callback", "is", "not", "None", ":", "future", ".", "add_done_callback", "(", "callback", ")", "return", "future", "except", "(", "AuthenticationException", ",", "SSHException", ",", "ChannelException", ",", "SocketError", ")", "as", "ex", ":", "logger", ".", "critical", "(", "(", "\"{0} execution failed on {1} with exception:\"", "\"{2}\"", ".", "format", "(", "command", ",", "self", ".", "target_address", ",", "ex", ")", ")", ")", "raise", "SSHCommandError", "(", "self", ".", "target_address", ",", "command", ",", "ex", ")" ]
Executes command on remote hosts without blocking :type command: str :param command: command to be run on remote host :type callback: function :param callback: function to call when execution completes
[ "Executes", "command", "on", "remote", "hosts", "without", "blocking" ]
python
train
46.458333
tensorflow/tensorboard
tensorboard/manager.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/manager.py#L273-L286
def remove_info_file(): """Remove the current process's TensorBoardInfo file, if it exists. If the file does not exist, no action is taken and no error is raised. """ try: os.unlink(_get_info_file_path()) except OSError as e: if e.errno == errno.ENOENT: # The user may have wiped their temporary directory or something. # Not a problem: we're already in the state that we want to be in. pass else: raise
[ "def", "remove_info_file", "(", ")", ":", "try", ":", "os", ".", "unlink", "(", "_get_info_file_path", "(", ")", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "# The user may have wiped their temporary directory or something.", "# Not a problem: we're already in the state that we want to be in.", "pass", "else", ":", "raise" ]
Remove the current process's TensorBoardInfo file, if it exists. If the file does not exist, no action is taken and no error is raised.
[ "Remove", "the", "current", "process", "s", "TensorBoardInfo", "file", "if", "it", "exists", "." ]
python
train
31.214286
ArchiveTeam/wpull
wpull/url.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/url.py#L685-L697
def urljoin(base_url, url, allow_fragments=True): '''Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.''' if url.startswith('//') and len(url) > 2: scheme = base_url.partition(':')[0] if scheme: return urllib.parse.urljoin( base_url, '{0}:{1}'.format(scheme, url), allow_fragments=allow_fragments ) return urllib.parse.urljoin( base_url, url, allow_fragments=allow_fragments)
[ "def", "urljoin", "(", "base_url", ",", "url", ",", "allow_fragments", "=", "True", ")", ":", "if", "url", ".", "startswith", "(", "'//'", ")", "and", "len", "(", "url", ")", ">", "2", ":", "scheme", "=", "base_url", ".", "partition", "(", "':'", ")", "[", "0", "]", "if", "scheme", ":", "return", "urllib", ".", "parse", ".", "urljoin", "(", "base_url", ",", "'{0}:{1}'", ".", "format", "(", "scheme", ",", "url", ")", ",", "allow_fragments", "=", "allow_fragments", ")", "return", "urllib", ".", "parse", ".", "urljoin", "(", "base_url", ",", "url", ",", "allow_fragments", "=", "allow_fragments", ")" ]
Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.
[ "Join", "URLs", "like", "urllib", ".", "parse", ".", "urljoin", "but", "allow", "scheme", "-", "relative", "URL", "." ]
python
train
37.923077
google/grr
grr/server/grr_response_server/databases/mem_events.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_events.py#L63-L67
def WriteAPIAuditEntry(self, entry): """Writes an audit entry to the database.""" copy = entry.Copy() copy.timestamp = rdfvalue.RDFDatetime.Now() self.api_audit_entries.append(copy)
[ "def", "WriteAPIAuditEntry", "(", "self", ",", "entry", ")", ":", "copy", "=", "entry", ".", "Copy", "(", ")", "copy", ".", "timestamp", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "self", ".", "api_audit_entries", ".", "append", "(", "copy", ")" ]
Writes an audit entry to the database.
[ "Writes", "an", "audit", "entry", "to", "the", "database", "." ]
python
train
38.6
bouncer-app/bouncer
bouncer/__init__.py
https://github.com/bouncer-app/bouncer/blob/2d645dce18e3849d338d21380529abf8db5eeb9d/bouncer/__init__.py#L37-L41
def ensure(user, action, subject): """ Similar to ``can`` but will raise a AccessDenied Exception if does not have access""" ability = Ability(user, get_authorization_method()) if ability.cannot(action, subject): raise AccessDenied()
[ "def", "ensure", "(", "user", ",", "action", ",", "subject", ")", ":", "ability", "=", "Ability", "(", "user", ",", "get_authorization_method", "(", ")", ")", "if", "ability", ".", "cannot", "(", "action", ",", "subject", ")", ":", "raise", "AccessDenied", "(", ")" ]
Similar to ``can`` but will raise a AccessDenied Exception if does not have access
[ "Similar", "to", "can", "but", "will", "raise", "a", "AccessDenied", "Exception", "if", "does", "not", "have", "access" ]
python
train
49.8
pandas-dev/pandas
pandas/util/_validators.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L221-L227
def validate_bool_kwarg(value, arg_name): """ Ensures that argument passed in arg_name is of type bool. """ if not (is_bool(value) or value is None): raise ValueError('For argument "{arg}" expected type bool, received ' 'type {typ}.'.format(arg=arg_name, typ=type(value).__name__)) return value
[ "def", "validate_bool_kwarg", "(", "value", ",", "arg_name", ")", ":", "if", "not", "(", "is_bool", "(", "value", ")", "or", "value", "is", "None", ")", ":", "raise", "ValueError", "(", "'For argument \"{arg}\" expected type bool, received '", "'type {typ}.'", ".", "format", "(", "arg", "=", "arg_name", ",", "typ", "=", "type", "(", "value", ")", ".", "__name__", ")", ")", "return", "value" ]
Ensures that argument passed in arg_name is of type bool.
[ "Ensures", "that", "argument", "passed", "in", "arg_name", "is", "of", "type", "bool", "." ]
python
train
54.142857
jobovy/galpy
galpy/potential/Potential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/Potential.py#L1316-L1356
def conc(self,H=70.,Om=0.3,overdens=200.,wrtcrit=False, ro=None,vo=None): """ NAME: conc PURPOSE: return the concentration INPUT: H= (default: 70) Hubble constant in km/s/Mpc Om= (default: 0.3) Omega matter overdens= (200) overdensity which defines the virial radius wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density ro= distance scale in kpc or as Quantity (default: object-wide, which if not set is 8 kpc)) vo= velocity scale in km/s or as Quantity (default: object-wide, which if not set is 220 km/s)) OUTPUT: concentration (scale/rvir) HISTORY: 2014-04-03 - Written - Bovy (IAS) """ if ro is None: ro= self._ro if vo is None: vo= self._vo try: return self.rvir(H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit, ro=ro,vo=vo,use_physical=False)/self._scale except AttributeError: raise AttributeError("This potential does not have a '_scale' defined to base the concentration on or does not support calculating the virial radius")
[ "def", "conc", "(", "self", ",", "H", "=", "70.", ",", "Om", "=", "0.3", ",", "overdens", "=", "200.", ",", "wrtcrit", "=", "False", ",", "ro", "=", "None", ",", "vo", "=", "None", ")", ":", "if", "ro", "is", "None", ":", "ro", "=", "self", ".", "_ro", "if", "vo", "is", "None", ":", "vo", "=", "self", ".", "_vo", "try", ":", "return", "self", ".", "rvir", "(", "H", "=", "H", ",", "Om", "=", "Om", ",", "overdens", "=", "overdens", ",", "wrtcrit", "=", "wrtcrit", ",", "ro", "=", "ro", ",", "vo", "=", "vo", ",", "use_physical", "=", "False", ")", "/", "self", ".", "_scale", "except", "AttributeError", ":", "raise", "AttributeError", "(", "\"This potential does not have a '_scale' defined to base the concentration on or does not support calculating the virial radius\"", ")" ]
NAME: conc PURPOSE: return the concentration INPUT: H= (default: 70) Hubble constant in km/s/Mpc Om= (default: 0.3) Omega matter overdens= (200) overdensity which defines the virial radius wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density ro= distance scale in kpc or as Quantity (default: object-wide, which if not set is 8 kpc)) vo= velocity scale in km/s or as Quantity (default: object-wide, which if not set is 220 km/s)) OUTPUT: concentration (scale/rvir) HISTORY: 2014-04-03 - Written - Bovy (IAS)
[ "NAME", ":" ]
python
train
30.512195
MrTango/RISparser
RISparser/parser.py
https://github.com/MrTango/RISparser/blob/d133d74022d3edbbdec19ef72bd34c8902a0bad1/RISparser/parser.py#L180-L204
def read(filelines, mapping=None, wok=False): """Parse a ris lines and return a list of entries. Entries are codified as dictionaries whose keys are the different tags. For single line and singly occurring tags, the content is codified as a string. In the case of multiline or multiple key occurrences, the content is returned as a list of strings. Keyword arguments: bibliography_file -- ris filehandle mapping -- custom RIS tags mapping wok -- flag, Web of Knowledge format is used if True, otherwise Refman's RIS specifications are used. """ if wok: if not mapping: mapping = WOK_TAG_KEY_MAPPING return Wok(filelines, mapping).parse() else: if not mapping: mapping = TAG_KEY_MAPPING return Ris(filelines, mapping).parse()
[ "def", "read", "(", "filelines", ",", "mapping", "=", "None", ",", "wok", "=", "False", ")", ":", "if", "wok", ":", "if", "not", "mapping", ":", "mapping", "=", "WOK_TAG_KEY_MAPPING", "return", "Wok", "(", "filelines", ",", "mapping", ")", ".", "parse", "(", ")", "else", ":", "if", "not", "mapping", ":", "mapping", "=", "TAG_KEY_MAPPING", "return", "Ris", "(", "filelines", ",", "mapping", ")", ".", "parse", "(", ")" ]
Parse a ris lines and return a list of entries. Entries are codified as dictionaries whose keys are the different tags. For single line and singly occurring tags, the content is codified as a string. In the case of multiline or multiple key occurrences, the content is returned as a list of strings. Keyword arguments: bibliography_file -- ris filehandle mapping -- custom RIS tags mapping wok -- flag, Web of Knowledge format is used if True, otherwise Refman's RIS specifications are used.
[ "Parse", "a", "ris", "lines", "and", "return", "a", "list", "of", "entries", "." ]
python
train
32.92
openthread/openthread
tools/harness-thci/OpenThread.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L443-L483
def __convertRlocToRouterId(self, xRloc16): """mapping Rloc16 to router id Args: xRloc16: hex rloc16 short address Returns: actual router id allocated by leader """ routerList = [] routerList = self.__sendCommand('router list')[0].split() print routerList print xRloc16 for index in routerList: router = [] cmd = 'router %s' % index router = self.__sendCommand(cmd) for line in router: if 'Done' in line: break elif 'Router ID' in line: routerid = line.split()[2] elif 'Rloc' in line: rloc16 = line.split()[1] else: pass # process input rloc16 if isinstance(xRloc16, str): rloc16 = '0x' + rloc16 if rloc16 == xRloc16: return routerid elif isinstance(xRloc16, int): if int(rloc16, 16) == xRloc16: return routerid else: pass return None
[ "def", "__convertRlocToRouterId", "(", "self", ",", "xRloc16", ")", ":", "routerList", "=", "[", "]", "routerList", "=", "self", ".", "__sendCommand", "(", "'router list'", ")", "[", "0", "]", ".", "split", "(", ")", "print", "routerList", "print", "xRloc16", "for", "index", "in", "routerList", ":", "router", "=", "[", "]", "cmd", "=", "'router %s'", "%", "index", "router", "=", "self", ".", "__sendCommand", "(", "cmd", ")", "for", "line", "in", "router", ":", "if", "'Done'", "in", "line", ":", "break", "elif", "'Router ID'", "in", "line", ":", "routerid", "=", "line", ".", "split", "(", ")", "[", "2", "]", "elif", "'Rloc'", "in", "line", ":", "rloc16", "=", "line", ".", "split", "(", ")", "[", "1", "]", "else", ":", "pass", "# process input rloc16", "if", "isinstance", "(", "xRloc16", ",", "str", ")", ":", "rloc16", "=", "'0x'", "+", "rloc16", "if", "rloc16", "==", "xRloc16", ":", "return", "routerid", "elif", "isinstance", "(", "xRloc16", ",", "int", ")", ":", "if", "int", "(", "rloc16", ",", "16", ")", "==", "xRloc16", ":", "return", "routerid", "else", ":", "pass", "return", "None" ]
mapping Rloc16 to router id Args: xRloc16: hex rloc16 short address Returns: actual router id allocated by leader
[ "mapping", "Rloc16", "to", "router", "id" ]
python
train
28
awslabs/serverless-application-model
samtranslator/plugins/globals/globals.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/plugins/globals/globals.py#L362-L384
def _token_of(self, input): """ Returns the token type of the input. :param input: Input whose type is to be determined :return TOKENS: Token type of the input """ if isinstance(input, dict): # Intrinsic functions are always dicts if is_intrinsics(input): # Intrinsic functions are handled *exactly* like a primitive type because # they resolve to a primitive type when creating a stack with CloudFormation return self.TOKEN.PRIMITIVE else: return self.TOKEN.DICT elif isinstance(input, list): return self.TOKEN.LIST else: return self.TOKEN.PRIMITIVE
[ "def", "_token_of", "(", "self", ",", "input", ")", ":", "if", "isinstance", "(", "input", ",", "dict", ")", ":", "# Intrinsic functions are always dicts", "if", "is_intrinsics", "(", "input", ")", ":", "# Intrinsic functions are handled *exactly* like a primitive type because", "# they resolve to a primitive type when creating a stack with CloudFormation", "return", "self", ".", "TOKEN", ".", "PRIMITIVE", "else", ":", "return", "self", ".", "TOKEN", ".", "DICT", "elif", "isinstance", "(", "input", ",", "list", ")", ":", "return", "self", ".", "TOKEN", ".", "LIST", "else", ":", "return", "self", ".", "TOKEN", ".", "PRIMITIVE" ]
Returns the token type of the input. :param input: Input whose type is to be determined :return TOKENS: Token type of the input
[ "Returns", "the", "token", "type", "of", "the", "input", "." ]
python
train
31.347826
DLR-RM/RAFCON
source/rafcon/core/global_variable_manager.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/global_variable_manager.py#L243-L250
def set_locked_variable(self, key, access_key, value): """Set an already locked global variable :param key: the key of the global variable to be set :param access_key: the access key to the already locked global variable :param value: the new value of the global variable """ return self.set_variable(key, value, per_reference=False, access_key=access_key)
[ "def", "set_locked_variable", "(", "self", ",", "key", ",", "access_key", ",", "value", ")", ":", "return", "self", ".", "set_variable", "(", "key", ",", "value", ",", "per_reference", "=", "False", ",", "access_key", "=", "access_key", ")" ]
Set an already locked global variable :param key: the key of the global variable to be set :param access_key: the access key to the already locked global variable :param value: the new value of the global variable
[ "Set", "an", "already", "locked", "global", "variable" ]
python
train
49.75
markovmodel/PyEMMA
pyemma/coordinates/transform/vamp.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/transform/vamp.py#L264-L301
def _diagonalize(self): """Performs SVD on covariance matrices and save left, right singular vectors and values in the model. Parameters ---------- scaling : None or string, default=None Scaling to be applied to the VAMP modes upon transformation * None: no scaling will be applied, variance of the singular functions is 1 * 'kinetic map' or 'km': singular functions are scaled by singular value. Note that only the left singular functions induce a kinetic map. """ L0 = spd_inv_split(self.C00, epsilon=self.epsilon) self._rank0 = L0.shape[1] if L0.ndim == 2 else 1 Lt = spd_inv_split(self.Ctt, epsilon=self.epsilon) self._rankt = Lt.shape[1] if Lt.ndim == 2 else 1 W = np.dot(L0.T, self.C0t).dot(Lt) from scipy.linalg import svd A, s, BT = svd(W, compute_uv=True, lapack_driver='gesvd') self._singular_values = s # don't pass any values in the argument list that call _diagonalize again!!! m = VAMPModel._dimension(self._rank0, self._rankt, self.dim, self._singular_values) U = np.dot(L0, A[:, :m]) V = np.dot(Lt, BT[:m, :].T) # scale vectors if self.scaling is not None: U *= s[np.newaxis, 0:m] # scaled left singular functions induce a kinetic map V *= s[np.newaxis, 0:m] # scaled right singular functions induce a kinetic map wrt. backward propagator self._U = U self._V = V self._svd_performed = True
[ "def", "_diagonalize", "(", "self", ")", ":", "L0", "=", "spd_inv_split", "(", "self", ".", "C00", ",", "epsilon", "=", "self", ".", "epsilon", ")", "self", ".", "_rank0", "=", "L0", ".", "shape", "[", "1", "]", "if", "L0", ".", "ndim", "==", "2", "else", "1", "Lt", "=", "spd_inv_split", "(", "self", ".", "Ctt", ",", "epsilon", "=", "self", ".", "epsilon", ")", "self", ".", "_rankt", "=", "Lt", ".", "shape", "[", "1", "]", "if", "Lt", ".", "ndim", "==", "2", "else", "1", "W", "=", "np", ".", "dot", "(", "L0", ".", "T", ",", "self", ".", "C0t", ")", ".", "dot", "(", "Lt", ")", "from", "scipy", ".", "linalg", "import", "svd", "A", ",", "s", ",", "BT", "=", "svd", "(", "W", ",", "compute_uv", "=", "True", ",", "lapack_driver", "=", "'gesvd'", ")", "self", ".", "_singular_values", "=", "s", "# don't pass any values in the argument list that call _diagonalize again!!!", "m", "=", "VAMPModel", ".", "_dimension", "(", "self", ".", "_rank0", ",", "self", ".", "_rankt", ",", "self", ".", "dim", ",", "self", ".", "_singular_values", ")", "U", "=", "np", ".", "dot", "(", "L0", ",", "A", "[", ":", ",", ":", "m", "]", ")", "V", "=", "np", ".", "dot", "(", "Lt", ",", "BT", "[", ":", "m", ",", ":", "]", ".", "T", ")", "# scale vectors", "if", "self", ".", "scaling", "is", "not", "None", ":", "U", "*=", "s", "[", "np", ".", "newaxis", ",", "0", ":", "m", "]", "# scaled left singular functions induce a kinetic map", "V", "*=", "s", "[", "np", ".", "newaxis", ",", "0", ":", "m", "]", "# scaled right singular functions induce a kinetic map wrt. backward propagator", "self", ".", "_U", "=", "U", "self", ".", "_V", "=", "V", "self", ".", "_svd_performed", "=", "True" ]
Performs SVD on covariance matrices and save left, right singular vectors and values in the model. Parameters ---------- scaling : None or string, default=None Scaling to be applied to the VAMP modes upon transformation * None: no scaling will be applied, variance of the singular functions is 1 * 'kinetic map' or 'km': singular functions are scaled by singular value. Note that only the left singular functions induce a kinetic map.
[ "Performs", "SVD", "on", "covariance", "matrices", "and", "save", "left", "right", "singular", "vectors", "and", "values", "in", "the", "model", "." ]
python
train
40.921053
Cymmetria/honeycomb
honeycomb/integrationmanager/tasks.py
https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/integrationmanager/tasks.py#L74-L89
def get_valid_configured_integrations(alert): """Return a list of integrations for alert filtered by alert_type. :returns: A list of relevant integrations """ if not configured_integrations: return [] # Collect all integrations that are configured for specific alert_type # or have no specific supported_event_types (i.e., all alert types) valid_configured_integrations = [ _ for _ in configured_integrations if _.integration.integration_type == IntegrationTypes.EVENT_OUTPUT.name and (not _.integration.supported_event_types or alert.alert_type in _.integration.supported_event_types) ] return valid_configured_integrations
[ "def", "get_valid_configured_integrations", "(", "alert", ")", ":", "if", "not", "configured_integrations", ":", "return", "[", "]", "# Collect all integrations that are configured for specific alert_type", "# or have no specific supported_event_types (i.e., all alert types)", "valid_configured_integrations", "=", "[", "_", "for", "_", "in", "configured_integrations", "if", "_", ".", "integration", ".", "integration_type", "==", "IntegrationTypes", ".", "EVENT_OUTPUT", ".", "name", "and", "(", "not", "_", ".", "integration", ".", "supported_event_types", "or", "alert", ".", "alert_type", "in", "_", ".", "integration", ".", "supported_event_types", ")", "]", "return", "valid_configured_integrations" ]
Return a list of integrations for alert filtered by alert_type. :returns: A list of relevant integrations
[ "Return", "a", "list", "of", "integrations", "for", "alert", "filtered", "by", "alert_type", "." ]
python
train
42
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L254-L312
def _delete(self, pos, idx): """ Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc. """ _maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index keys_pos = _keys[pos] lists_pos = _lists[pos] del keys_pos[idx] del lists_pos[idx] self._len -= 1 len_keys_pos = len(keys_pos) if len_keys_pos > self._half: _maxes[pos] = keys_pos[-1] if len(_index) > 0: child = self._offset + pos while child > 0: _index[child] -= 1 child = (child - 1) >> 1 _index[0] -= 1 elif len(_keys) > 1: if not pos: pos += 1 prev = pos - 1 _keys[prev].extend(_keys[pos]) _lists[prev].extend(_lists[pos]) _maxes[prev] = _keys[prev][-1] del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:] self._expand(prev) elif len_keys_pos: _maxes[pos] = keys_pos[-1] else: del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:]
[ "def", "_delete", "(", "self", ",", "pos", ",", "idx", ")", ":", "_maxes", ",", "_lists", ",", "_keys", ",", "_index", "=", "self", ".", "_maxes", ",", "self", ".", "_lists", ",", "self", ".", "_keys", ",", "self", ".", "_index", "keys_pos", "=", "_keys", "[", "pos", "]", "lists_pos", "=", "_lists", "[", "pos", "]", "del", "keys_pos", "[", "idx", "]", "del", "lists_pos", "[", "idx", "]", "self", ".", "_len", "-=", "1", "len_keys_pos", "=", "len", "(", "keys_pos", ")", "if", "len_keys_pos", ">", "self", ".", "_half", ":", "_maxes", "[", "pos", "]", "=", "keys_pos", "[", "-", "1", "]", "if", "len", "(", "_index", ")", ">", "0", ":", "child", "=", "self", ".", "_offset", "+", "pos", "while", "child", ">", "0", ":", "_index", "[", "child", "]", "-=", "1", "child", "=", "(", "child", "-", "1", ")", ">>", "1", "_index", "[", "0", "]", "-=", "1", "elif", "len", "(", "_keys", ")", ">", "1", ":", "if", "not", "pos", ":", "pos", "+=", "1", "prev", "=", "pos", "-", "1", "_keys", "[", "prev", "]", ".", "extend", "(", "_keys", "[", "pos", "]", ")", "_lists", "[", "prev", "]", ".", "extend", "(", "_lists", "[", "pos", "]", ")", "_maxes", "[", "prev", "]", "=", "_keys", "[", "prev", "]", "[", "-", "1", "]", "del", "_keys", "[", "pos", "]", "del", "_lists", "[", "pos", "]", "del", "_maxes", "[", "pos", "]", "del", "_index", "[", ":", "]", "self", ".", "_expand", "(", "prev", ")", "elif", "len_keys_pos", ":", "_maxes", "[", "pos", "]", "=", "keys_pos", "[", "-", "1", "]", "else", ":", "del", "_keys", "[", "pos", "]", "del", "_lists", "[", "pos", "]", "del", "_maxes", "[", "pos", "]", "del", "_index", "[", ":", "]" ]
Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc.
[ "Delete", "the", "item", "at", "the", "given", "(", "pos", "idx", ")", "." ]
python
train
25.033898
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L3848-L3996
def _prm_store_parameter_or_result(self, instance, store_data=pypetconstants.STORE_DATA, store_flags=None, overwrite=None, with_links=False, recursive=False, _hdf5_group=None, _newly_created=False, **kwargs): """Stores a parameter or result to hdf5. :param instance: The instance to be stored :param store_data: How to store data :param store_flags: Dictionary containing how to store individual data, usually empty. :param overwrite: Instructions how to overwrite data :param with_links: Placeholder because leaves have no links :param recursive: Placeholder, because leaves have no children :param _hdf5_group: The hdf5 group for storing the parameter or result :param _newly_created: If should be created in a new form """ if store_data == pypetconstants.STORE_NOTHING: return elif store_data == pypetconstants.STORE_DATA_SKIPPING and instance._stored: self._logger.debug('Already found `%s` on disk I will not store it!' % instance.v_full_name) return elif store_data == pypetconstants.OVERWRITE_DATA: if not overwrite: overwrite = True fullname = instance.v_full_name self._logger.debug('Storing `%s`.' % fullname) if _hdf5_group is None: # If no group is provided we might need to create one _hdf5_group, _newly_created = self._all_create_or_get_groups(fullname) # kwargs_flags = {} # Dictionary to change settings # old_kwargs = {} store_dict = {} # If the user did not supply storage flags, we need to set it to the empty dictionary if store_flags is None: store_flags = {} try: # Get the data to store from the instance if not instance.f_is_empty(): store_dict = instance._store() try: # Ask the instance for storage flags instance_flags = instance._store_flags().copy() # copy to avoid modifying the # original data except AttributeError: # If it does not provide any, set it to the empty dictionary instance_flags = {} # User specified flags have priority over the flags from the instance instance_flags.update(store_flags) store_flags = instance_flags # If we still have data in `store_dict` about which we do not know how to store # it, pick default storage flags self._prm_extract_missing_flags(store_dict, store_flags) if overwrite: if isinstance(overwrite, str): overwrite = [overwrite] if overwrite is True: to_delete = [key for key in store_dict.keys() if key in _hdf5_group] self._all_delete_parameter_or_result_or_group(instance, delete_only=to_delete, _hdf5_group=_hdf5_group) elif isinstance(overwrite, (list, tuple)): overwrite_set = set(overwrite) key_set = set(store_dict.keys()) stuff_not_to_be_overwritten = overwrite_set - key_set if overwrite!='v_annotations' and len(stuff_not_to_be_overwritten) > 0: self._logger.warning('Cannot overwrite `%s`, these items are not supposed to ' 'be stored by the leaf node.' % str(stuff_not_to_be_overwritten)) stuff_to_overwrite = overwrite_set & key_set if len(stuff_to_overwrite) > 0: self._all_delete_parameter_or_result_or_group(instance, delete_only=list( stuff_to_overwrite)) else: raise ValueError('Your value of overwrite `%s` is not understood. ' 'Please pass `True` of a list of strings to fine grain ' 'overwriting.' % str(overwrite)) self._prm_store_from_dict(fullname, store_dict, _hdf5_group, store_flags, kwargs) # Store annotations self._ann_store_annotations(instance, _hdf5_group, overwrite=overwrite) if _newly_created or overwrite is True: # If we created a new group or the parameter was extended we need to # update the meta information and summary tables self._prm_add_meta_info(instance, _hdf5_group, overwrite=not _newly_created) instance._stored = True #self._logger.debug('Finished Storing `%s`.' % fullname) # Signal completed node loading self._node_processing_timer.signal_update() except: # I anything fails, we want to remove the data of the parameter again self._logger.error( 'Failed storing leaf `%s`. I will remove the hdf5 data I added again.' % fullname) # Delete data for key in store_dict.keys(): if key in _hdf5_group: hdf5_child = _hdf5_group._f_get_child(key) hdf5_child._f_remove(recursive=True) # If no data left delete the whole parameter if _hdf5_group._v_nchildren == 0: _hdf5_group._f_remove(recursive=True) raise
[ "def", "_prm_store_parameter_or_result", "(", "self", ",", "instance", ",", "store_data", "=", "pypetconstants", ".", "STORE_DATA", ",", "store_flags", "=", "None", ",", "overwrite", "=", "None", ",", "with_links", "=", "False", ",", "recursive", "=", "False", ",", "_hdf5_group", "=", "None", ",", "_newly_created", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "store_data", "==", "pypetconstants", ".", "STORE_NOTHING", ":", "return", "elif", "store_data", "==", "pypetconstants", ".", "STORE_DATA_SKIPPING", "and", "instance", ".", "_stored", ":", "self", ".", "_logger", ".", "debug", "(", "'Already found `%s` on disk I will not store it!'", "%", "instance", ".", "v_full_name", ")", "return", "elif", "store_data", "==", "pypetconstants", ".", "OVERWRITE_DATA", ":", "if", "not", "overwrite", ":", "overwrite", "=", "True", "fullname", "=", "instance", ".", "v_full_name", "self", ".", "_logger", ".", "debug", "(", "'Storing `%s`.'", "%", "fullname", ")", "if", "_hdf5_group", "is", "None", ":", "# If no group is provided we might need to create one", "_hdf5_group", ",", "_newly_created", "=", "self", ".", "_all_create_or_get_groups", "(", "fullname", ")", "# kwargs_flags = {} # Dictionary to change settings", "# old_kwargs = {}", "store_dict", "=", "{", "}", "# If the user did not supply storage flags, we need to set it to the empty dictionary", "if", "store_flags", "is", "None", ":", "store_flags", "=", "{", "}", "try", ":", "# Get the data to store from the instance", "if", "not", "instance", ".", "f_is_empty", "(", ")", ":", "store_dict", "=", "instance", ".", "_store", "(", ")", "try", ":", "# Ask the instance for storage flags", "instance_flags", "=", "instance", ".", "_store_flags", "(", ")", ".", "copy", "(", ")", "# copy to avoid modifying the", "# original data", "except", "AttributeError", ":", "# If it does not provide any, set it to the empty dictionary", "instance_flags", "=", "{", "}", "# User specified flags have priority over the flags from the instance", "instance_flags", ".", "update", "(", "store_flags", ")", "store_flags", "=", "instance_flags", "# If we still have data in `store_dict` about which we do not know how to store", "# it, pick default storage flags", "self", ".", "_prm_extract_missing_flags", "(", "store_dict", ",", "store_flags", ")", "if", "overwrite", ":", "if", "isinstance", "(", "overwrite", ",", "str", ")", ":", "overwrite", "=", "[", "overwrite", "]", "if", "overwrite", "is", "True", ":", "to_delete", "=", "[", "key", "for", "key", "in", "store_dict", ".", "keys", "(", ")", "if", "key", "in", "_hdf5_group", "]", "self", ".", "_all_delete_parameter_or_result_or_group", "(", "instance", ",", "delete_only", "=", "to_delete", ",", "_hdf5_group", "=", "_hdf5_group", ")", "elif", "isinstance", "(", "overwrite", ",", "(", "list", ",", "tuple", ")", ")", ":", "overwrite_set", "=", "set", "(", "overwrite", ")", "key_set", "=", "set", "(", "store_dict", ".", "keys", "(", ")", ")", "stuff_not_to_be_overwritten", "=", "overwrite_set", "-", "key_set", "if", "overwrite", "!=", "'v_annotations'", "and", "len", "(", "stuff_not_to_be_overwritten", ")", ">", "0", ":", "self", ".", "_logger", ".", "warning", "(", "'Cannot overwrite `%s`, these items are not supposed to '", "'be stored by the leaf node.'", "%", "str", "(", "stuff_not_to_be_overwritten", ")", ")", "stuff_to_overwrite", "=", "overwrite_set", "&", "key_set", "if", "len", "(", "stuff_to_overwrite", ")", ">", "0", ":", "self", ".", "_all_delete_parameter_or_result_or_group", "(", "instance", ",", "delete_only", "=", "list", "(", "stuff_to_overwrite", ")", ")", "else", ":", "raise", "ValueError", "(", "'Your value of overwrite `%s` is not understood. '", "'Please pass `True` of a list of strings to fine grain '", "'overwriting.'", "%", "str", "(", "overwrite", ")", ")", "self", ".", "_prm_store_from_dict", "(", "fullname", ",", "store_dict", ",", "_hdf5_group", ",", "store_flags", ",", "kwargs", ")", "# Store annotations", "self", ".", "_ann_store_annotations", "(", "instance", ",", "_hdf5_group", ",", "overwrite", "=", "overwrite", ")", "if", "_newly_created", "or", "overwrite", "is", "True", ":", "# If we created a new group or the parameter was extended we need to", "# update the meta information and summary tables", "self", ".", "_prm_add_meta_info", "(", "instance", ",", "_hdf5_group", ",", "overwrite", "=", "not", "_newly_created", ")", "instance", ".", "_stored", "=", "True", "#self._logger.debug('Finished Storing `%s`.' % fullname)", "# Signal completed node loading", "self", ".", "_node_processing_timer", ".", "signal_update", "(", ")", "except", ":", "# I anything fails, we want to remove the data of the parameter again", "self", ".", "_logger", ".", "error", "(", "'Failed storing leaf `%s`. I will remove the hdf5 data I added again.'", "%", "fullname", ")", "# Delete data", "for", "key", "in", "store_dict", ".", "keys", "(", ")", ":", "if", "key", "in", "_hdf5_group", ":", "hdf5_child", "=", "_hdf5_group", ".", "_f_get_child", "(", "key", ")", "hdf5_child", ".", "_f_remove", "(", "recursive", "=", "True", ")", "# If no data left delete the whole parameter", "if", "_hdf5_group", ".", "_v_nchildren", "==", "0", ":", "_hdf5_group", ".", "_f_remove", "(", "recursive", "=", "True", ")", "raise" ]
Stores a parameter or result to hdf5. :param instance: The instance to be stored :param store_data: How to store data :param store_flags: Dictionary containing how to store individual data, usually empty. :param overwrite: Instructions how to overwrite data :param with_links: Placeholder because leaves have no links :param recursive: Placeholder, because leaves have no children :param _hdf5_group: The hdf5 group for storing the parameter or result :param _newly_created: If should be created in a new form
[ "Stores", "a", "parameter", "or", "result", "to", "hdf5", "." ]
python
test
41.060403
openvax/mhctools
mhctools/netmhc_pan.py
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/netmhc_pan.py#L28-L64
def NetMHCpan( alleles, program_name="netMHCpan", process_limit=-1, default_peptide_lengths=[9], extra_flags=[]): """ This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class to use, with the help of the miraculous and strange '--version' netmhcpan argument. """ # convert to str since Python3 returns a `bytes` object. # The '_MHCTOOLS_VERSION_SNIFFING' here is meaningless, but it is necessary # to call `netmhcpan --version` with some argument, otherwise it hangs. with open(os.devnull, 'w') as devnull: output = check_output([ program_name, "--version", "_MHCTOOLS_VERSION_SNIFFING"], stderr=devnull) output_str = output.decode("ascii", "ignore") common_kwargs = { "alleles": alleles, "default_peptide_lengths": default_peptide_lengths, "program_name": program_name, "process_limit": process_limit, "extra_flags": extra_flags, } if "NetMHCpan version 2.8" in output_str: return NetMHCpan28(**common_kwargs) elif "NetMHCpan version 3.0" in output_str: return NetMHCpan3(**common_kwargs) elif "NetMHCpan version 4.0" in output_str: return NetMHCpan4(**common_kwargs) else: raise RuntimeError( "This software expects NetMHCpan version 2.8, 3.0, or 4.0")
[ "def", "NetMHCpan", "(", "alleles", ",", "program_name", "=", "\"netMHCpan\"", ",", "process_limit", "=", "-", "1", ",", "default_peptide_lengths", "=", "[", "9", "]", ",", "extra_flags", "=", "[", "]", ")", ":", "# convert to str since Python3 returns a `bytes` object.", "# The '_MHCTOOLS_VERSION_SNIFFING' here is meaningless, but it is necessary", "# to call `netmhcpan --version` with some argument, otherwise it hangs.", "with", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "as", "devnull", ":", "output", "=", "check_output", "(", "[", "program_name", ",", "\"--version\"", ",", "\"_MHCTOOLS_VERSION_SNIFFING\"", "]", ",", "stderr", "=", "devnull", ")", "output_str", "=", "output", ".", "decode", "(", "\"ascii\"", ",", "\"ignore\"", ")", "common_kwargs", "=", "{", "\"alleles\"", ":", "alleles", ",", "\"default_peptide_lengths\"", ":", "default_peptide_lengths", ",", "\"program_name\"", ":", "program_name", ",", "\"process_limit\"", ":", "process_limit", ",", "\"extra_flags\"", ":", "extra_flags", ",", "}", "if", "\"NetMHCpan version 2.8\"", "in", "output_str", ":", "return", "NetMHCpan28", "(", "*", "*", "common_kwargs", ")", "elif", "\"NetMHCpan version 3.0\"", "in", "output_str", ":", "return", "NetMHCpan3", "(", "*", "*", "common_kwargs", ")", "elif", "\"NetMHCpan version 4.0\"", "in", "output_str", ":", "return", "NetMHCpan4", "(", "*", "*", "common_kwargs", ")", "else", ":", "raise", "RuntimeError", "(", "\"This software expects NetMHCpan version 2.8, 3.0, or 4.0\"", ")" ]
This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class to use, with the help of the miraculous and strange '--version' netmhcpan argument.
[ "This", "function", "wraps", "NetMHCpan28", "and", "NetMHCpan3", "to", "automatically", "detect", "which", "class", "to", "use", "with", "the", "help", "of", "the", "miraculous", "and", "strange", "--", "version", "netmhcpan", "argument", "." ]
python
valid
36.837838
senaite/senaite.core
bika/lims/api/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/__init__.py#L545-L558
def get_parent_path(brain_or_object): """Calculate the physical parent path of this object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Physical path of the parent object :rtype: string """ if is_portal(brain_or_object): return get_path(get_portal()) if is_brain(brain_or_object): path = get_path(brain_or_object) return path.rpartition("/")[0] return get_path(get_object(brain_or_object).aq_parent)
[ "def", "get_parent_path", "(", "brain_or_object", ")", ":", "if", "is_portal", "(", "brain_or_object", ")", ":", "return", "get_path", "(", "get_portal", "(", ")", ")", "if", "is_brain", "(", "brain_or_object", ")", ":", "path", "=", "get_path", "(", "brain_or_object", ")", "return", "path", ".", "rpartition", "(", "\"/\"", ")", "[", "0", "]", "return", "get_path", "(", "get_object", "(", "brain_or_object", ")", ".", "aq_parent", ")" ]
Calculate the physical parent path of this object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Physical path of the parent object :rtype: string
[ "Calculate", "the", "physical", "parent", "path", "of", "this", "object" ]
python
train
39.142857
KnorrFG/pyparadigm
pyparadigm/eventlistener.py
https://github.com/KnorrFG/pyparadigm/blob/69944cdf3ce2f6414ae1aa1d27a0d8c6e5fb3fd3/pyparadigm/eventlistener.py#L295-L303
def wait_for_unicode_char(self, ignored_chars=None, timeout=0): """Returns a str that contains the single character that was pressed. This already respects modifier keys and keyboard layouts. If timeout is not none and no key is pressed within the specified timeout, None is returned. If a key is ingnored_chars it will be ignored. As argument for irgnored_chars any object that has a __contains__ method can be used, e.g. a string, a set, a list, etc""" return self.listen_until_return(Handler.unicode_char(ignored_chars), timeout=timeout)
[ "def", "wait_for_unicode_char", "(", "self", ",", "ignored_chars", "=", "None", ",", "timeout", "=", "0", ")", ":", "return", "self", ".", "listen_until_return", "(", "Handler", ".", "unicode_char", "(", "ignored_chars", ")", ",", "timeout", "=", "timeout", ")" ]
Returns a str that contains the single character that was pressed. This already respects modifier keys and keyboard layouts. If timeout is not none and no key is pressed within the specified timeout, None is returned. If a key is ingnored_chars it will be ignored. As argument for irgnored_chars any object that has a __contains__ method can be used, e.g. a string, a set, a list, etc
[ "Returns", "a", "str", "that", "contains", "the", "single", "character", "that", "was", "pressed", ".", "This", "already", "respects", "modifier", "keys", "and", "keyboard", "layouts", ".", "If", "timeout", "is", "not", "none", "and", "no", "key", "is", "pressed", "within", "the", "specified", "timeout", "None", "is", "returned", ".", "If", "a", "key", "is", "ingnored_chars", "it", "will", "be", "ignored", ".", "As", "argument", "for", "irgnored_chars", "any", "object", "that", "has", "a", "__contains__", "method", "can", "be", "used", "e", ".", "g", ".", "a", "string", "a", "set", "a", "list", "etc" ]
python
train
70
abilian/abilian-core
abilian/web/frontend.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/web/frontend.py#L178-L183
def _check_view_permission(self, view): """ :param view: a :class:`ObjectView` class or instance """ security = get_service("security") return security.has_permission(current_user, view.permission, self.obj)
[ "def", "_check_view_permission", "(", "self", ",", "view", ")", ":", "security", "=", "get_service", "(", "\"security\"", ")", "return", "security", ".", "has_permission", "(", "current_user", ",", "view", ".", "permission", ",", "self", ".", "obj", ")" ]
:param view: a :class:`ObjectView` class or instance
[ ":", "param", "view", ":", "a", ":", "class", ":", "ObjectView", "class", "or", "instance" ]
python
train
40.333333
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1341-L1375
def _adjust_inferential_results_for_parameter_constraints(self, constraints): """ Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None. """ if constraints is not None: # Ensure the model object has inferential results inferential_attributes = ["standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] assert all([hasattr(self, x) for x in inferential_attributes]) assert hasattr(self, "params") all_names = self.params.index.tolist() for series in [getattr(self, x) for x in inferential_attributes]: for pos in constraints: series.loc[all_names[pos]] = np.nan return None
[ "def", "_adjust_inferential_results_for_parameter_constraints", "(", "self", ",", "constraints", ")", ":", "if", "constraints", "is", "not", "None", ":", "# Ensure the model object has inferential results", "inferential_attributes", "=", "[", "\"standard_errors\"", ",", "\"tvalues\"", ",", "\"pvalues\"", ",", "\"robust_std_errs\"", ",", "\"robust_t_stats\"", ",", "\"robust_p_vals\"", "]", "assert", "all", "(", "[", "hasattr", "(", "self", ",", "x", ")", "for", "x", "in", "inferential_attributes", "]", ")", "assert", "hasattr", "(", "self", ",", "\"params\"", ")", "all_names", "=", "self", ".", "params", ".", "index", ".", "tolist", "(", ")", "for", "series", "in", "[", "getattr", "(", "self", ",", "x", ")", "for", "x", "in", "inferential_attributes", "]", ":", "for", "pos", "in", "constraints", ":", "series", ".", "loc", "[", "all_names", "[", "pos", "]", "]", "=", "np", ".", "nan", "return", "None" ]
Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None.
[ "Ensure", "that", "parameters", "that", "were", "constrained", "during", "estimation", "do", "not", "have", "any", "values", "showed", "for", "inferential", "results", ".", "After", "all", "no", "inference", "was", "performed", "." ]
python
train
39.371429
PmagPy/PmagPy
dialogs/magic_grid3.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/magic_grid3.py#L236-L249
def on_edit_grid(self, event): """sets self.changes to true when user edits the grid. provides down and up key functionality for exiting the editor""" if not self.changes: self.changes = {event.Row} else: self.changes.add(event.Row) #self.changes = True try: editor = event.GetControl() editor.Bind(wx.EVT_KEY_DOWN, self.onEditorKey) except AttributeError: # if it's a EVT_GRID_EDITOR_SHOWN, it doesn't have the GetControl method pass
[ "def", "on_edit_grid", "(", "self", ",", "event", ")", ":", "if", "not", "self", ".", "changes", ":", "self", ".", "changes", "=", "{", "event", ".", "Row", "}", "else", ":", "self", ".", "changes", ".", "add", "(", "event", ".", "Row", ")", "#self.changes = True", "try", ":", "editor", "=", "event", ".", "GetControl", "(", ")", "editor", ".", "Bind", "(", "wx", ".", "EVT_KEY_DOWN", ",", "self", ".", "onEditorKey", ")", "except", "AttributeError", ":", "# if it's a EVT_GRID_EDITOR_SHOWN, it doesn't have the GetControl method", "pass" ]
sets self.changes to true when user edits the grid. provides down and up key functionality for exiting the editor
[ "sets", "self", ".", "changes", "to", "true", "when", "user", "edits", "the", "grid", ".", "provides", "down", "and", "up", "key", "functionality", "for", "exiting", "the", "editor" ]
python
train
39.214286
smarie/python-parsyfiles
parsyfiles/parsing_core_api.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_core_api.py#L243-L269
def create_for_caught_error(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T], obj: PersistedObject, caught: Exception, options: Dict[str, Dict[str, Any]]): """ Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param caught: :param options: :return: """ try: typ = get_pretty_type_str(desired_type) except: typ = str(desired_type) e = ParsingException('Error while parsing ' + str(obj) + ' as a ' + typ + ' with parser \'' + str(parser) + '\' using options=(' + str(options) + ') : caught \n ' + str(caught.__class__.__name__) + ' : ' + str(caught))\ .with_traceback(caught.__traceback__) # 'from e' was hiding the inner traceback. This is much better for debug e.__cause__ = None # e.__cause__ = caught # store the exception still, to be able to handle it later e.caught = caught return e
[ "def", "create_for_caught_error", "(", "parser", ":", "_BaseParserDeclarationForRegistries", ",", "desired_type", ":", "Type", "[", "T", "]", ",", "obj", ":", "PersistedObject", ",", "caught", ":", "Exception", ",", "options", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "Any", "]", "]", ")", ":", "try", ":", "typ", "=", "get_pretty_type_str", "(", "desired_type", ")", "except", ":", "typ", "=", "str", "(", "desired_type", ")", "e", "=", "ParsingException", "(", "'Error while parsing '", "+", "str", "(", "obj", ")", "+", "' as a '", "+", "typ", "+", "' with parser \\''", "+", "str", "(", "parser", ")", "+", "'\\' using options=('", "+", "str", "(", "options", ")", "+", "') : caught \\n '", "+", "str", "(", "caught", ".", "__class__", ".", "__name__", ")", "+", "' : '", "+", "str", "(", "caught", ")", ")", ".", "with_traceback", "(", "caught", ".", "__traceback__", ")", "# 'from e' was hiding the inner traceback. This is much better for debug", "e", ".", "__cause__", "=", "None", "# e.__cause__ = caught", "# store the exception still, to be able to handle it later", "e", ".", "caught", "=", "caught", "return", "e" ]
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param caught: :param options: :return:
[ "Helper", "method", "provided", "because", "we", "actually", "can", "t", "put", "that", "in", "the", "constructor", "it", "creates", "a", "bug", "in", "Nose", "tests", "https", ":", "//", "github", ".", "com", "/", "nose", "-", "devs", "/", "nose", "/", "issues", "/", "725" ]
python
train
44.888889
FutunnOpen/futuquant
futuquant/common/event/eventEngine.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/common/event/eventEngine.py#L176-L179
def unregisterGeneralHandler(self, handler): """注销通用事件处理函数监听""" if handler in self.__generalHandlers: self.__generalHandlers.remove(handler)
[ "def", "unregisterGeneralHandler", "(", "self", ",", "handler", ")", ":", "if", "handler", "in", "self", ".", "__generalHandlers", ":", "self", ".", "__generalHandlers", ".", "remove", "(", "handler", ")" ]
注销通用事件处理函数监听
[ "注销通用事件处理函数监听" ]
python
train
41.25
shakefu/pyconfig
pyconfig/__init__.py
https://github.com/shakefu/pyconfig/blob/000cb127db51e03cb4070aae6943e956193cbad5/pyconfig/__init__.py#L603-L619
def env(key, default): """ Helper to try to get a setting from the environment, or pyconfig, or finally use a provided default. """ value = os.environ.get(key, None) if value is not None: log.info(' %s = %r', key.lower().replace('_', '.'), value) return value key = key.lower().replace('_', '.') value = get(key) if value is not None: return value return default
[ "def", "env", "(", "key", ",", "default", ")", ":", "value", "=", "os", ".", "environ", ".", "get", "(", "key", ",", "None", ")", "if", "value", "is", "not", "None", ":", "log", ".", "info", "(", "' %s = %r'", ",", "key", ".", "lower", "(", ")", ".", "replace", "(", "'_'", ",", "'.'", ")", ",", "value", ")", "return", "value", "key", "=", "key", ".", "lower", "(", ")", ".", "replace", "(", "'_'", ",", "'.'", ")", "value", "=", "get", "(", "key", ")", "if", "value", "is", "not", "None", ":", "return", "value", "return", "default" ]
Helper to try to get a setting from the environment, or pyconfig, or finally use a provided default.
[ "Helper", "to", "try", "to", "get", "a", "setting", "from", "the", "environment", "or", "pyconfig", "or", "finally", "use", "a", "provided", "default", "." ]
python
valid
24.470588
jobovy/galpy
galpy/util/bovy_plot.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_plot.py#L121-L183
def bovy_hist(x,xlabel=None,ylabel=None,overplot=False,**kwargs): """ NAME: bovy_hist PURPOSE: wrapper around matplotlib's hist function INPUT: x - array to histogram xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed yrange - set the y-axis range +all pyplot.hist keywords OUTPUT: (from the matplotlib docs: http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.hist) The return value is a tuple (n, bins, patches) or ([n0, n1, ...], bins, [patches0, patches1,...]) if the input contains multiple data HISTORY: 2009-12-23 - Written - Bovy (NYU) """ if not overplot: pyplot.figure() if 'xrange' in kwargs: xlimits= kwargs.pop('xrange') if not 'range' in kwargs: kwargs['range']= xlimits xrangeSet= True else: xrangeSet= False if 'yrange' in kwargs: ylimits= kwargs.pop('yrange') yrangeSet= True else: yrangeSet= False out= pyplot.hist(x,**kwargs) if overplot: return out _add_axislabels(xlabel,ylabel) if not 'range' in kwargs and not xrangeSet: if isinstance(x,list): xlimits=(sc.array(x).min(),sc.array(x).max()) else: pyplot.xlim(x.min(),x.max()) elif xrangeSet: pyplot.xlim(xlimits) else: pyplot.xlim(kwargs['range']) if yrangeSet: pyplot.ylim(ylimits) _add_ticks() return out
[ "def", "bovy_hist", "(", "x", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "overplot", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "overplot", ":", "pyplot", ".", "figure", "(", ")", "if", "'xrange'", "in", "kwargs", ":", "xlimits", "=", "kwargs", ".", "pop", "(", "'xrange'", ")", "if", "not", "'range'", "in", "kwargs", ":", "kwargs", "[", "'range'", "]", "=", "xlimits", "xrangeSet", "=", "True", "else", ":", "xrangeSet", "=", "False", "if", "'yrange'", "in", "kwargs", ":", "ylimits", "=", "kwargs", ".", "pop", "(", "'yrange'", ")", "yrangeSet", "=", "True", "else", ":", "yrangeSet", "=", "False", "out", "=", "pyplot", ".", "hist", "(", "x", ",", "*", "*", "kwargs", ")", "if", "overplot", ":", "return", "out", "_add_axislabels", "(", "xlabel", ",", "ylabel", ")", "if", "not", "'range'", "in", "kwargs", "and", "not", "xrangeSet", ":", "if", "isinstance", "(", "x", ",", "list", ")", ":", "xlimits", "=", "(", "sc", ".", "array", "(", "x", ")", ".", "min", "(", ")", ",", "sc", ".", "array", "(", "x", ")", ".", "max", "(", ")", ")", "else", ":", "pyplot", ".", "xlim", "(", "x", ".", "min", "(", ")", ",", "x", ".", "max", "(", ")", ")", "elif", "xrangeSet", ":", "pyplot", ".", "xlim", "(", "xlimits", ")", "else", ":", "pyplot", ".", "xlim", "(", "kwargs", "[", "'range'", "]", ")", "if", "yrangeSet", ":", "pyplot", ".", "ylim", "(", "ylimits", ")", "_add_ticks", "(", ")", "return", "out" ]
NAME: bovy_hist PURPOSE: wrapper around matplotlib's hist function INPUT: x - array to histogram xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed yrange - set the y-axis range +all pyplot.hist keywords OUTPUT: (from the matplotlib docs: http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.hist) The return value is a tuple (n, bins, patches) or ([n0, n1, ...], bins, [patches0, patches1,...]) if the input contains multiple data HISTORY: 2009-12-23 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
24.31746
AtteqCom/zsl
src/zsl/interface/cli.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/interface/cli.py#L22-L32
def _get_version(ctx, _, value): """Click callback for option to show current ZSL version.""" if not value or ctx.resilient_parsing: return message = 'Zsl %(version)s\nPython %(python_version)s' click.echo(message % { 'version': version, 'python_version': sys.version, }, color=ctx.color) ctx.exit()
[ "def", "_get_version", "(", "ctx", ",", "_", ",", "value", ")", ":", "if", "not", "value", "or", "ctx", ".", "resilient_parsing", ":", "return", "message", "=", "'Zsl %(version)s\\nPython %(python_version)s'", "click", ".", "echo", "(", "message", "%", "{", "'version'", ":", "version", ",", "'python_version'", ":", "sys", ".", "version", ",", "}", ",", "color", "=", "ctx", ".", "color", ")", "ctx", ".", "exit", "(", ")" ]
Click callback for option to show current ZSL version.
[ "Click", "callback", "for", "option", "to", "show", "current", "ZSL", "version", "." ]
python
train
30.727273
justinmayer/django-autoslug
autoslug/fields.py
https://github.com/justinmayer/django-autoslug/blob/b3991daddf5a476a829b48c28afad4ae08a18179/autoslug/fields.py#L308-L315
def south_field_triple(self): "Returns a suitable description of this field for South." args, kwargs = introspector(self) kwargs.update({ 'populate_from': 'None' if callable(self.populate_from) else repr(self.populate_from), 'unique_with': repr(self.unique_with) }) return ('autoslug.fields.AutoSlugField', args, kwargs)
[ "def", "south_field_triple", "(", "self", ")", ":", "args", ",", "kwargs", "=", "introspector", "(", "self", ")", "kwargs", ".", "update", "(", "{", "'populate_from'", ":", "'None'", "if", "callable", "(", "self", ".", "populate_from", ")", "else", "repr", "(", "self", ".", "populate_from", ")", ",", "'unique_with'", ":", "repr", "(", "self", ".", "unique_with", ")", "}", ")", "return", "(", "'autoslug.fields.AutoSlugField'", ",", "args", ",", "kwargs", ")" ]
Returns a suitable description of this field for South.
[ "Returns", "a", "suitable", "description", "of", "this", "field", "for", "South", "." ]
python
train
47.125
zqfang/GSEApy
gseapy/enrichr.py
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L250-L300
def enrich(self, gmt): """use local mode p = p-value computed using the Fisher exact test (Hypergeometric test) Not implemented here: combine score = log(p)·z see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4 columns contain: Term Overlap P-value Adjusted_P-value Genes """ if isscalar(self.background): if isinstance(self.background, int) or self.background.isdigit(): self._bg = int(self.background) elif isinstance(self.background, str): # self.background = set(reduce(lambda x,y: x+y, gmt.values(),[])) self._bg = self.get_background() self._logger.info("Background: found %s genes"%(len(self._bg))) else: raise Exception("Unsupported background data type") else: # handle array object: nd.array, list, tuple, set, Series try: it = iter(self.background) self._bg = set(self.background) except TypeError: self._logger.error("Unsupported background data type") # statistical testing hgtest = list(calc_pvalues(query=self._gls, gene_sets=gmt, background=self._bg)) if len(hgtest) > 0: terms, pvals, olsz, gsetsz, genes = hgtest fdrs, rej = multiple_testing_correction(ps = pvals, alpha=self.cutoff, method='benjamini-hochberg') # save to a dataframe odict = OrderedDict() odict['Term'] = terms odict['Overlap'] = list(map(lambda h,g: "%s/%s"%(h, g), olsz, gsetsz)) odict['P-value'] = pvals odict['Adjusted P-value'] = fdrs # odict['Reject (FDR< %s)'%self.cutoff ] = rej odict['Genes'] = [";".join(g) for g in genes] res = pd.DataFrame(odict) return res return
[ "def", "enrich", "(", "self", ",", "gmt", ")", ":", "if", "isscalar", "(", "self", ".", "background", ")", ":", "if", "isinstance", "(", "self", ".", "background", ",", "int", ")", "or", "self", ".", "background", ".", "isdigit", "(", ")", ":", "self", ".", "_bg", "=", "int", "(", "self", ".", "background", ")", "elif", "isinstance", "(", "self", ".", "background", ",", "str", ")", ":", "# self.background = set(reduce(lambda x,y: x+y, gmt.values(),[]))", "self", ".", "_bg", "=", "self", ".", "get_background", "(", ")", "self", ".", "_logger", ".", "info", "(", "\"Background: found %s genes\"", "%", "(", "len", "(", "self", ".", "_bg", ")", ")", ")", "else", ":", "raise", "Exception", "(", "\"Unsupported background data type\"", ")", "else", ":", "# handle array object: nd.array, list, tuple, set, Series", "try", ":", "it", "=", "iter", "(", "self", ".", "background", ")", "self", ".", "_bg", "=", "set", "(", "self", ".", "background", ")", "except", "TypeError", ":", "self", ".", "_logger", ".", "error", "(", "\"Unsupported background data type\"", ")", "# statistical testing", "hgtest", "=", "list", "(", "calc_pvalues", "(", "query", "=", "self", ".", "_gls", ",", "gene_sets", "=", "gmt", ",", "background", "=", "self", ".", "_bg", ")", ")", "if", "len", "(", "hgtest", ")", ">", "0", ":", "terms", ",", "pvals", ",", "olsz", ",", "gsetsz", ",", "genes", "=", "hgtest", "fdrs", ",", "rej", "=", "multiple_testing_correction", "(", "ps", "=", "pvals", ",", "alpha", "=", "self", ".", "cutoff", ",", "method", "=", "'benjamini-hochberg'", ")", "# save to a dataframe", "odict", "=", "OrderedDict", "(", ")", "odict", "[", "'Term'", "]", "=", "terms", "odict", "[", "'Overlap'", "]", "=", "list", "(", "map", "(", "lambda", "h", ",", "g", ":", "\"%s/%s\"", "%", "(", "h", ",", "g", ")", ",", "olsz", ",", "gsetsz", ")", ")", "odict", "[", "'P-value'", "]", "=", "pvals", "odict", "[", "'Adjusted P-value'", "]", "=", "fdrs", "# odict['Reject (FDR< %s)'%self.cutoff ] = rej", "odict", "[", "'Genes'", "]", "=", "[", "\";\"", ".", "join", "(", "g", ")", "for", "g", "in", "genes", "]", "res", "=", "pd", ".", "DataFrame", "(", "odict", ")", "return", "res", "return" ]
use local mode p = p-value computed using the Fisher exact test (Hypergeometric test) Not implemented here: combine score = log(p)·z see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4 columns contain: Term Overlap P-value Adjusted_P-value Genes
[ "use", "local", "mode", "p", "=", "p", "-", "value", "computed", "using", "the", "Fisher", "exact", "test", "(", "Hypergeometric", "test", ")" ]
python
test
40.529412
tensorflow/datasets
tensorflow_datasets/core/registered.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/registered.py#L137-L172
def builder(name, **builder_init_kwargs): """Fetches a `tfds.core.DatasetBuilder` by string name. Args: name: `str`, the registered name of the `DatasetBuilder` (the snake case version of the class name). This can be either `"dataset_name"` or `"dataset_name/config_name"` for datasets with `BuilderConfig`s. As a convenience, this string may contain comma-separated keyword arguments for the builder. For example `"foo_bar/a=True,b=3"` would use the `FooBar` dataset passing the keyword arguments `a=True` and `b=3` (for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to use the `"zoo"` config and pass to the builder keyword arguments `a=True` and `b=3`). **builder_init_kwargs: `dict` of keyword arguments passed to the `DatasetBuilder`. These will override keyword arguments passed in `name`, if any. Returns: A `tfds.core.DatasetBuilder`. Raises: DatasetNotFoundError: if `name` is unrecognized. """ name, builder_kwargs = _dataset_name_and_kwargs_from_name_str(name) builder_kwargs.update(builder_init_kwargs) if name in _ABSTRACT_DATASET_REGISTRY: raise DatasetNotFoundError(name, is_abstract=True) if name in _IN_DEVELOPMENT_REGISTRY: raise DatasetNotFoundError(name, in_development=True) if name not in _DATASET_REGISTRY: raise DatasetNotFoundError(name) try: return _DATASET_REGISTRY[name](**builder_kwargs) except BaseException: logging.error("Failed to construct dataset %s", name) raise
[ "def", "builder", "(", "name", ",", "*", "*", "builder_init_kwargs", ")", ":", "name", ",", "builder_kwargs", "=", "_dataset_name_and_kwargs_from_name_str", "(", "name", ")", "builder_kwargs", ".", "update", "(", "builder_init_kwargs", ")", "if", "name", "in", "_ABSTRACT_DATASET_REGISTRY", ":", "raise", "DatasetNotFoundError", "(", "name", ",", "is_abstract", "=", "True", ")", "if", "name", "in", "_IN_DEVELOPMENT_REGISTRY", ":", "raise", "DatasetNotFoundError", "(", "name", ",", "in_development", "=", "True", ")", "if", "name", "not", "in", "_DATASET_REGISTRY", ":", "raise", "DatasetNotFoundError", "(", "name", ")", "try", ":", "return", "_DATASET_REGISTRY", "[", "name", "]", "(", "*", "*", "builder_kwargs", ")", "except", "BaseException", ":", "logging", ".", "error", "(", "\"Failed to construct dataset %s\"", ",", "name", ")", "raise" ]
Fetches a `tfds.core.DatasetBuilder` by string name. Args: name: `str`, the registered name of the `DatasetBuilder` (the snake case version of the class name). This can be either `"dataset_name"` or `"dataset_name/config_name"` for datasets with `BuilderConfig`s. As a convenience, this string may contain comma-separated keyword arguments for the builder. For example `"foo_bar/a=True,b=3"` would use the `FooBar` dataset passing the keyword arguments `a=True` and `b=3` (for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to use the `"zoo"` config and pass to the builder keyword arguments `a=True` and `b=3`). **builder_init_kwargs: `dict` of keyword arguments passed to the `DatasetBuilder`. These will override keyword arguments passed in `name`, if any. Returns: A `tfds.core.DatasetBuilder`. Raises: DatasetNotFoundError: if `name` is unrecognized.
[ "Fetches", "a", "tfds", ".", "core", ".", "DatasetBuilder", "by", "string", "name", "." ]
python
train
41.833333
veripress/veripress
veripress/helpers.py
https://github.com/veripress/veripress/blob/9e3df3a10eb1db32da596bf52118fe6acbe4b14a/veripress/helpers.py#L60-L78
def timezone_from_str(tz_str): """ Convert a timezone string to a timezone object. :param tz_str: string with format 'Asia/Shanghai' or 'UTC±[hh]:[mm]' :return: a timezone object (tzinfo) """ m = re.match(r'UTC([+|-]\d{1,2}):(\d{2})', tz_str) if m: # in format 'UTC±[hh]:[mm]' delta_h = int(m.group(1)) delta_m = int(m.group(2)) if delta_h >= 0 else -int(m.group(2)) return timezone(timedelta(hours=delta_h, minutes=delta_m)) # in format 'Asia/Shanghai' try: return pytz.timezone(tz_str) except pytz.exceptions.UnknownTimeZoneError: return None
[ "def", "timezone_from_str", "(", "tz_str", ")", ":", "m", "=", "re", ".", "match", "(", "r'UTC([+|-]\\d{1,2}):(\\d{2})'", ",", "tz_str", ")", "if", "m", ":", "# in format 'UTC±[hh]:[mm]'", "delta_h", "=", "int", "(", "m", ".", "group", "(", "1", ")", ")", "delta_m", "=", "int", "(", "m", ".", "group", "(", "2", ")", ")", "if", "delta_h", ">=", "0", "else", "-", "int", "(", "m", ".", "group", "(", "2", ")", ")", "return", "timezone", "(", "timedelta", "(", "hours", "=", "delta_h", ",", "minutes", "=", "delta_m", ")", ")", "# in format 'Asia/Shanghai'", "try", ":", "return", "pytz", ".", "timezone", "(", "tz_str", ")", "except", "pytz", ".", "exceptions", ".", "UnknownTimeZoneError", ":", "return", "None" ]
Convert a timezone string to a timezone object. :param tz_str: string with format 'Asia/Shanghai' or 'UTC±[hh]:[mm]' :return: a timezone object (tzinfo)
[ "Convert", "a", "timezone", "string", "to", "a", "timezone", "object", "." ]
python
train
32.421053
LabKey/labkey-api-python
labkey/security.py
https://github.com/LabKey/labkey-api-python/blob/3c8d393384d7cbb2785f8a7f5fe34007b17a76b8/labkey/security.py#L123-L141
def get_user_by_email(server_context, email): """ Get the user with the provided email. Throws a ValueError if not found. :param server_context: A LabKey server context. See utils.create_server_context. :param email: :return: """ url = server_context.build_url(user_controller, 'getUsers.api') payload = dict(includeDeactivatedAccounts=True) result = server_context.make_request(url, payload) if result is None or result['users'] is None: raise ValueError("No Users in container" + email) for user in result['users']: if user['email'] == email: return user else: raise ValueError("User not found: " + email)
[ "def", "get_user_by_email", "(", "server_context", ",", "email", ")", ":", "url", "=", "server_context", ".", "build_url", "(", "user_controller", ",", "'getUsers.api'", ")", "payload", "=", "dict", "(", "includeDeactivatedAccounts", "=", "True", ")", "result", "=", "server_context", ".", "make_request", "(", "url", ",", "payload", ")", "if", "result", "is", "None", "or", "result", "[", "'users'", "]", "is", "None", ":", "raise", "ValueError", "(", "\"No Users in container\"", "+", "email", ")", "for", "user", "in", "result", "[", "'users'", "]", ":", "if", "user", "[", "'email'", "]", "==", "email", ":", "return", "user", "else", ":", "raise", "ValueError", "(", "\"User not found: \"", "+", "email", ")" ]
Get the user with the provided email. Throws a ValueError if not found. :param server_context: A LabKey server context. See utils.create_server_context. :param email: :return:
[ "Get", "the", "user", "with", "the", "provided", "email", ".", "Throws", "a", "ValueError", "if", "not", "found", ".", ":", "param", "server_context", ":", "A", "LabKey", "server", "context", ".", "See", "utils", ".", "create_server_context", ".", ":", "param", "email", ":", ":", "return", ":" ]
python
train
35.526316
kataev/flake8-rst
flake8_rst/sourceblock.py
https://github.com/kataev/flake8-rst/blob/ca6d41c7a309b9e8cd4fa6f428b82db96b6a986f/flake8_rst/sourceblock.py#L72-L84
def merge(cls, source_blocks): """Merge multiple SourceBlocks together""" if len(source_blocks) == 1: return source_blocks[0] source_blocks.sort(key=operator.attrgetter('start_line_number')) main_block = source_blocks[0] boot_lines = main_block.boot_lines source_lines = [source_line for source_block in source_blocks for source_line in source_block.source_lines] return cls(boot_lines, source_lines, directive=main_block.directive, language=main_block.language, roles=main_block.roles)
[ "def", "merge", "(", "cls", ",", "source_blocks", ")", ":", "if", "len", "(", "source_blocks", ")", "==", "1", ":", "return", "source_blocks", "[", "0", "]", "source_blocks", ".", "sort", "(", "key", "=", "operator", ".", "attrgetter", "(", "'start_line_number'", ")", ")", "main_block", "=", "source_blocks", "[", "0", "]", "boot_lines", "=", "main_block", ".", "boot_lines", "source_lines", "=", "[", "source_line", "for", "source_block", "in", "source_blocks", "for", "source_line", "in", "source_block", ".", "source_lines", "]", "return", "cls", "(", "boot_lines", ",", "source_lines", ",", "directive", "=", "main_block", ".", "directive", ",", "language", "=", "main_block", ".", "language", ",", "roles", "=", "main_block", ".", "roles", ")" ]
Merge multiple SourceBlocks together
[ "Merge", "multiple", "SourceBlocks", "together" ]
python
train
43.384615
prompt-toolkit/ptpython
ptpython/key_bindings.py
https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/key_bindings.py#L260-L289
def auto_newline(buffer): r""" Insert \n at the cursor position. Also add necessary padding. """ insert_text = buffer.insert_text if buffer.document.current_line_after_cursor: # When we are in the middle of a line. Always insert a newline. insert_text('\n') else: # Go to new line, but also add indentation. current_line = buffer.document.current_line_before_cursor.rstrip() insert_text('\n') # Unident if the last line ends with 'pass', remove four spaces. unindent = current_line.rstrip().endswith(' pass') # Copy whitespace from current line current_line2 = current_line[4:] if unindent else current_line for c in current_line2: if c.isspace(): insert_text(c) else: break # If the last line ends with a colon, add four extra spaces. if current_line[-1:] == ':': for x in range(4): insert_text(' ')
[ "def", "auto_newline", "(", "buffer", ")", ":", "insert_text", "=", "buffer", ".", "insert_text", "if", "buffer", ".", "document", ".", "current_line_after_cursor", ":", "# When we are in the middle of a line. Always insert a newline.", "insert_text", "(", "'\\n'", ")", "else", ":", "# Go to new line, but also add indentation.", "current_line", "=", "buffer", ".", "document", ".", "current_line_before_cursor", ".", "rstrip", "(", ")", "insert_text", "(", "'\\n'", ")", "# Unident if the last line ends with 'pass', remove four spaces.", "unindent", "=", "current_line", ".", "rstrip", "(", ")", ".", "endswith", "(", "' pass'", ")", "# Copy whitespace from current line", "current_line2", "=", "current_line", "[", "4", ":", "]", "if", "unindent", "else", "current_line", "for", "c", "in", "current_line2", ":", "if", "c", ".", "isspace", "(", ")", ":", "insert_text", "(", "c", ")", "else", ":", "break", "# If the last line ends with a colon, add four extra spaces.", "if", "current_line", "[", "-", "1", ":", "]", "==", "':'", ":", "for", "x", "in", "range", "(", "4", ")", ":", "insert_text", "(", "' '", ")" ]
r""" Insert \n at the cursor position. Also add necessary padding.
[ "r", "Insert", "\\", "n", "at", "the", "cursor", "position", ".", "Also", "add", "necessary", "padding", "." ]
python
train
32.666667
getsentry/responses
responses.py
https://github.com/getsentry/responses/blob/b7ab59513ffd52bf28808f45005f492f7d1bbd50/responses.py#L436-L487
def add( self, method=None, # method or ``Response`` url=None, body="", adding_headers=None, *args, **kwargs ): """ A basic request: >>> responses.add(responses.GET, 'http://example.com') You can also directly pass an object which implements the ``BaseResponse`` interface: >>> responses.add(Response(...)) A JSON payload: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> json={'foo': 'bar'}, >>> ) Custom headers: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> headers={'X-Header': 'foo'}, >>> ) Strict query string matching: >>> responses.add( >>> method='GET', >>> url='http://example.com?foo=bar', >>> match_querystring=True >>> ) """ if isinstance(method, BaseResponse): self._matches.append(method) return if adding_headers is not None: kwargs.setdefault("headers", adding_headers) self._matches.append(Response(method=method, url=url, body=body, **kwargs))
[ "def", "add", "(", "self", ",", "method", "=", "None", ",", "# method or ``Response``", "url", "=", "None", ",", "body", "=", "\"\"", ",", "adding_headers", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "method", ",", "BaseResponse", ")", ":", "self", ".", "_matches", ".", "append", "(", "method", ")", "return", "if", "adding_headers", "is", "not", "None", ":", "kwargs", ".", "setdefault", "(", "\"headers\"", ",", "adding_headers", ")", "self", ".", "_matches", ".", "append", "(", "Response", "(", "method", "=", "method", ",", "url", "=", "url", ",", "body", "=", "body", ",", "*", "*", "kwargs", ")", ")" ]
A basic request: >>> responses.add(responses.GET, 'http://example.com') You can also directly pass an object which implements the ``BaseResponse`` interface: >>> responses.add(Response(...)) A JSON payload: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> json={'foo': 'bar'}, >>> ) Custom headers: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> headers={'X-Header': 'foo'}, >>> ) Strict query string matching: >>> responses.add( >>> method='GET', >>> url='http://example.com?foo=bar', >>> match_querystring=True >>> )
[ "A", "basic", "request", ":" ]
python
train
23.596154
softlayer/softlayer-python
SoftLayer/managers/hardware.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/hardware.py#L688-L703
def _get_default_price_id(items, option, hourly, location): """Returns a 'free' price id given an option.""" for item in items: if utils.lookup(item, 'itemCategory', 'categoryCode') != option: continue for price in item['prices']: if all([float(price.get('hourlyRecurringFee', 0)) == 0.0, float(price.get('recurringFee', 0)) == 0.0, _matches_billing(price, hourly), _matches_location(price, location)]): return price['id'] raise SoftLayer.SoftLayerError( "Could not find valid price for '%s' option" % option)
[ "def", "_get_default_price_id", "(", "items", ",", "option", ",", "hourly", ",", "location", ")", ":", "for", "item", "in", "items", ":", "if", "utils", ".", "lookup", "(", "item", ",", "'itemCategory'", ",", "'categoryCode'", ")", "!=", "option", ":", "continue", "for", "price", "in", "item", "[", "'prices'", "]", ":", "if", "all", "(", "[", "float", "(", "price", ".", "get", "(", "'hourlyRecurringFee'", ",", "0", ")", ")", "==", "0.0", ",", "float", "(", "price", ".", "get", "(", "'recurringFee'", ",", "0", ")", ")", "==", "0.0", ",", "_matches_billing", "(", "price", ",", "hourly", ")", ",", "_matches_location", "(", "price", ",", "location", ")", "]", ")", ":", "return", "price", "[", "'id'", "]", "raise", "SoftLayer", ".", "SoftLayerError", "(", "\"Could not find valid price for '%s' option\"", "%", "option", ")" ]
Returns a 'free' price id given an option.
[ "Returns", "a", "free", "price", "id", "given", "an", "option", "." ]
python
train
39.5625
PmagPy/PmagPy
programs/demag_gui.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L3668-L3692
def select_specimen(self, specimen): """ Goes through the calculations necessary to plot measurement data for specimen and sets specimen as current GUI specimen, also attempts to handle changing current fit. """ try: fit_index = self.pmag_results_data['specimens'][self.s].index( self.current_fit) except KeyError: fit_index = None except ValueError: fit_index = None # sets self.s to specimen calculates params etc. self.initialize_CART_rot(specimen) self.list_bound_loc = 0 if fit_index != None and self.s in self.pmag_results_data['specimens']: try: self.current_fit = self.pmag_results_data['specimens'][self.s][fit_index] except IndexError: self.current_fit = None else: self.current_fit = None if self.s != self.specimens_box.GetValue(): self.specimens_box.SetValue(self.s)
[ "def", "select_specimen", "(", "self", ",", "specimen", ")", ":", "try", ":", "fit_index", "=", "self", ".", "pmag_results_data", "[", "'specimens'", "]", "[", "self", ".", "s", "]", ".", "index", "(", "self", ".", "current_fit", ")", "except", "KeyError", ":", "fit_index", "=", "None", "except", "ValueError", ":", "fit_index", "=", "None", "# sets self.s to specimen calculates params etc.", "self", ".", "initialize_CART_rot", "(", "specimen", ")", "self", ".", "list_bound_loc", "=", "0", "if", "fit_index", "!=", "None", "and", "self", ".", "s", "in", "self", ".", "pmag_results_data", "[", "'specimens'", "]", ":", "try", ":", "self", ".", "current_fit", "=", "self", ".", "pmag_results_data", "[", "'specimens'", "]", "[", "self", ".", "s", "]", "[", "fit_index", "]", "except", "IndexError", ":", "self", ".", "current_fit", "=", "None", "else", ":", "self", ".", "current_fit", "=", "None", "if", "self", ".", "s", "!=", "self", ".", "specimens_box", ".", "GetValue", "(", ")", ":", "self", ".", "specimens_box", ".", "SetValue", "(", "self", ".", "s", ")" ]
Goes through the calculations necessary to plot measurement data for specimen and sets specimen as current GUI specimen, also attempts to handle changing current fit.
[ "Goes", "through", "the", "calculations", "necessary", "to", "plot", "measurement", "data", "for", "specimen", "and", "sets", "specimen", "as", "current", "GUI", "specimen", "also", "attempts", "to", "handle", "changing", "current", "fit", "." ]
python
train
39.96
dask/dask-kubernetes
dask_kubernetes/core.py
https://github.com/dask/dask-kubernetes/blob/8a4883ecd902460b446bb1f43ed97efe398a135e/dask_kubernetes/core.py#L522-L535
def _namespace_default(): """ Get current namespace if running in a k8s cluster If not in a k8s cluster with service accounts enabled, default to 'default' Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125 """ ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' if os.path.exists(ns_path): with open(ns_path) as f: return f.read().strip() return 'default'
[ "def", "_namespace_default", "(", ")", ":", "ns_path", "=", "'/var/run/secrets/kubernetes.io/serviceaccount/namespace'", "if", "os", ".", "path", ".", "exists", "(", "ns_path", ")", ":", "with", "open", "(", "ns_path", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "return", "'default'" ]
Get current namespace if running in a k8s cluster If not in a k8s cluster with service accounts enabled, default to 'default' Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125
[ "Get", "current", "namespace", "if", "running", "in", "a", "k8s", "cluster" ]
python
train
32.785714
mushkevych/scheduler
synergy/db/dao/unit_of_work_dao.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/db/dao/unit_of_work_dao.py#L147-L150
def run_query(self, query): """ method runs the query and returns a list of filtered UnitOfWork records """ cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in cursor]
[ "def", "run_query", "(", "self", ",", "query", ")", ":", "cursor", "=", "self", ".", "ds", ".", "filter", "(", "COLLECTION_UNIT_OF_WORK", ",", "query", ")", "return", "[", "UnitOfWork", ".", "from_json", "(", "document", ")", "for", "document", "in", "cursor", "]" ]
method runs the query and returns a list of filtered UnitOfWork records
[ "method", "runs", "the", "query", "and", "returns", "a", "list", "of", "filtered", "UnitOfWork", "records" ]
python
train
61.75
angr/angr
angr/analyses/reassembler.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/reassembler.py#L2615-L2639
def _cgc_package_list_identifier(self, data_addr, data_size): """ Identifies the CGC package list associated with the CGC binary. :param int data_addr: Address of the data in memory. :param int data_size: Maximum size possible. :return: A 2-tuple of data type and size. :rtype: tuple """ if data_size < 100: return None, None data = self.fast_memory_load(data_addr, data_size, str) if data[:10] != 'The DECREE': return None, None if not all(i in string.printable for i in data): return None, None if not re.match(r"The DECREE packages used in the creation of this challenge binary were:", data): return None, None return 'cgc-package-list', data_size
[ "def", "_cgc_package_list_identifier", "(", "self", ",", "data_addr", ",", "data_size", ")", ":", "if", "data_size", "<", "100", ":", "return", "None", ",", "None", "data", "=", "self", ".", "fast_memory_load", "(", "data_addr", ",", "data_size", ",", "str", ")", "if", "data", "[", ":", "10", "]", "!=", "'The DECREE'", ":", "return", "None", ",", "None", "if", "not", "all", "(", "i", "in", "string", ".", "printable", "for", "i", "in", "data", ")", ":", "return", "None", ",", "None", "if", "not", "re", ".", "match", "(", "r\"The DECREE packages used in the creation of this challenge binary were:\"", ",", "data", ")", ":", "return", "None", ",", "None", "return", "'cgc-package-list'", ",", "data_size" ]
Identifies the CGC package list associated with the CGC binary. :param int data_addr: Address of the data in memory. :param int data_size: Maximum size possible. :return: A 2-tuple of data type and size. :rtype: tuple
[ "Identifies", "the", "CGC", "package", "list", "associated", "with", "the", "CGC", "binary", "." ]
python
train
31.4
explosion/spaCy
spacy/pipeline/entityruler.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/entityruler.py#L186-L198
def to_disk(self, path, **kwargs): """Save the entity ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#to_disk """ path = ensure_path(path) path = path.with_suffix(".jsonl") srsly.write_jsonl(path, self.patterns)
[ "def", "to_disk", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "path", "=", "ensure_path", "(", "path", ")", "path", "=", "path", ".", "with_suffix", "(", "\".jsonl\"", ")", "srsly", ".", "write_jsonl", "(", "path", ",", "self", ".", "patterns", ")" ]
Save the entity ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#to_disk
[ "Save", "the", "entity", "ruler", "patterns", "to", "a", "directory", ".", "The", "patterns", "will", "be", "saved", "as", "newline", "-", "delimited", "JSON", "(", "JSONL", ")", "." ]
python
train
39.846154
tradenity/python-sdk
tradenity/resources/variant.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/variant.py#L1066-L1087
def update_variant_by_id(cls, variant_id, variant, **kwargs): """Update Variant Update attributes of Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_variant_by_id(variant_id, variant, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to update. (required) :param Variant variant: Attributes of variant to update. (required) :return: Variant If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_variant_by_id_with_http_info(variant_id, variant, **kwargs) else: (data) = cls._update_variant_by_id_with_http_info(variant_id, variant, **kwargs) return data
[ "def", "update_variant_by_id", "(", "cls", ",", "variant_id", ",", "variant", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_update_variant_by_id_with_http_info", "(", "variant_id", ",", "variant", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_update_variant_by_id_with_http_info", "(", "variant_id", ",", "variant", ",", "*", "*", "kwargs", ")", "return", "data" ]
Update Variant Update attributes of Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_variant_by_id(variant_id, variant, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to update. (required) :param Variant variant: Attributes of variant to update. (required) :return: Variant If the method is called asynchronously, returns the request thread.
[ "Update", "Variant" ]
python
train
43.727273
iclab/centinel
centinel/primitives/http.py
https://github.com/iclab/centinel/blob/9a25dcf30c6a1db3c046f7ccb8ab8873e455c1a4/centinel/primitives/http.py#L244-L344
def get_requests_batch(input_list, results={}, delay_time=0.5, max_threads=100): """ This is a parallel version of the HTTP GET primitive. :param input_list: the input is a list of either dictionaries containing query information, or just domain names (and NOT URLs). :param delay_time: delay before starting each thread :param max_threads: maximum number of concurrent threads :return: results in dict format Note: the input list can look like this: [ { "host": "www.google.com", "path": "/", "headers": {}, "ssl": False, "url": "http://www.google.com/" }, "www.twitter.com", "www.youtube.com", { "host": "www.facebook.com", "path": "/", "headers": {}, "ssl": True, "url": "http://www.facebook.com" }, ... ] """ threads = [] thread_error = False thread_wait_timeout = 200 ind = 1 total_item_count = len(input_list) # randomly select one user agent for one input list user_agent = random.choice(user_agent_pool) for row in input_list: headers = {} path = "/" ssl = False theme = "http" if type(row) is dict: if "host" not in row: continue host = row["host"] if "path" in row: path = row["path"] if "headers" in row: if type(row["headers"]) is dict: headers = row["headers"] if "ssl" in row: ssl = row["ssl"] theme = "https" if "url" in row: url = row["url"] else: url = "%s://%s%s" % (theme, host, path) else: host = row url = "%s://%s%s" % (theme, host, path) wait_time = 0 while threading.active_count() > max_threads: time.sleep(1) wait_time += 1 if wait_time > thread_wait_timeout: thread_error = True break if thread_error: results["error"] = "Threads took too long to finish." break if "User-Agent" not in headers: headers["User-Agent"] = user_agent # add just a little bit of delay before starting the thread # to avoid overwhelming the connection. time.sleep(delay_time) log_prefix = "%d/%d: " % (ind, total_item_count) thread = threading.Thread(target=get_request, args=(host, path, headers, ssl, results, url, log_prefix)) ind += 1 thread.setDaemon(1) thread_open_success = False retries = 0 while not thread_open_success and retries < MAX_THREAD_START_RETRY: try: thread.start() threads.append(thread) thread_open_success = True except: retries += 1 time.sleep(THREAD_START_DELAY) logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, url, retries, MAX_THREAD_START_RETRY)) if retries == MAX_THREAD_START_RETRY: logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, url, retries)) for thread in threads: thread.join(thread_wait_timeout) return results
[ "def", "get_requests_batch", "(", "input_list", ",", "results", "=", "{", "}", ",", "delay_time", "=", "0.5", ",", "max_threads", "=", "100", ")", ":", "threads", "=", "[", "]", "thread_error", "=", "False", "thread_wait_timeout", "=", "200", "ind", "=", "1", "total_item_count", "=", "len", "(", "input_list", ")", "# randomly select one user agent for one input list", "user_agent", "=", "random", ".", "choice", "(", "user_agent_pool", ")", "for", "row", "in", "input_list", ":", "headers", "=", "{", "}", "path", "=", "\"/\"", "ssl", "=", "False", "theme", "=", "\"http\"", "if", "type", "(", "row", ")", "is", "dict", ":", "if", "\"host\"", "not", "in", "row", ":", "continue", "host", "=", "row", "[", "\"host\"", "]", "if", "\"path\"", "in", "row", ":", "path", "=", "row", "[", "\"path\"", "]", "if", "\"headers\"", "in", "row", ":", "if", "type", "(", "row", "[", "\"headers\"", "]", ")", "is", "dict", ":", "headers", "=", "row", "[", "\"headers\"", "]", "if", "\"ssl\"", "in", "row", ":", "ssl", "=", "row", "[", "\"ssl\"", "]", "theme", "=", "\"https\"", "if", "\"url\"", "in", "row", ":", "url", "=", "row", "[", "\"url\"", "]", "else", ":", "url", "=", "\"%s://%s%s\"", "%", "(", "theme", ",", "host", ",", "path", ")", "else", ":", "host", "=", "row", "url", "=", "\"%s://%s%s\"", "%", "(", "theme", ",", "host", ",", "path", ")", "wait_time", "=", "0", "while", "threading", ".", "active_count", "(", ")", ">", "max_threads", ":", "time", ".", "sleep", "(", "1", ")", "wait_time", "+=", "1", "if", "wait_time", ">", "thread_wait_timeout", ":", "thread_error", "=", "True", "break", "if", "thread_error", ":", "results", "[", "\"error\"", "]", "=", "\"Threads took too long to finish.\"", "break", "if", "\"User-Agent\"", "not", "in", "headers", ":", "headers", "[", "\"User-Agent\"", "]", "=", "user_agent", "# add just a little bit of delay before starting the thread", "# to avoid overwhelming the connection.", "time", ".", "sleep", "(", "delay_time", ")", "log_prefix", "=", "\"%d/%d: \"", "%", "(", "ind", ",", "total_item_count", ")", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "get_request", ",", "args", "=", "(", "host", ",", "path", ",", "headers", ",", "ssl", ",", "results", ",", "url", ",", "log_prefix", ")", ")", "ind", "+=", "1", "thread", ".", "setDaemon", "(", "1", ")", "thread_open_success", "=", "False", "retries", "=", "0", "while", "not", "thread_open_success", "and", "retries", "<", "MAX_THREAD_START_RETRY", ":", "try", ":", "thread", ".", "start", "(", ")", "threads", ".", "append", "(", "thread", ")", "thread_open_success", "=", "True", "except", ":", "retries", "+=", "1", "time", ".", "sleep", "(", "THREAD_START_DELAY", ")", "logging", ".", "error", "(", "\"%sThread start failed for %s, retrying... (%d/%d)\"", "%", "(", "log_prefix", ",", "url", ",", "retries", ",", "MAX_THREAD_START_RETRY", ")", ")", "if", "retries", "==", "MAX_THREAD_START_RETRY", ":", "logging", ".", "error", "(", "\"%sCan't start a new thread for %s after %d retries.\"", "%", "(", "log_prefix", ",", "url", ",", "retries", ")", ")", "for", "thread", "in", "threads", ":", "thread", ".", "join", "(", "thread_wait_timeout", ")", "return", "results" ]
This is a parallel version of the HTTP GET primitive. :param input_list: the input is a list of either dictionaries containing query information, or just domain names (and NOT URLs). :param delay_time: delay before starting each thread :param max_threads: maximum number of concurrent threads :return: results in dict format Note: the input list can look like this: [ { "host": "www.google.com", "path": "/", "headers": {}, "ssl": False, "url": "http://www.google.com/" }, "www.twitter.com", "www.youtube.com", { "host": "www.facebook.com", "path": "/", "headers": {}, "ssl": True, "url": "http://www.facebook.com" }, ... ]
[ "This", "is", "a", "parallel", "version", "of", "the", "HTTP", "GET", "primitive", "." ]
python
train
32.90099
pantsbuild/pants
src/python/pants/engine/native.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/engine/native.py#L296-L298
def extern_equals(self, context_handle, val1, val2): """Return true if the given Handles are __eq__.""" return self._ffi.from_handle(val1[0]) == self._ffi.from_handle(val2[0])
[ "def", "extern_equals", "(", "self", ",", "context_handle", ",", "val1", ",", "val2", ")", ":", "return", "self", ".", "_ffi", ".", "from_handle", "(", "val1", "[", "0", "]", ")", "==", "self", ".", "_ffi", ".", "from_handle", "(", "val2", "[", "0", "]", ")" ]
Return true if the given Handles are __eq__.
[ "Return", "true", "if", "the", "given", "Handles", "are", "__eq__", "." ]
python
train
60.333333
westurner/pyrpo
pyrpo/pyrpo.py
https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L596-L606
def hgsub_report(self): """ Yields: str: .hgsubs line for this repository """ if self.relpath == '.': return yield "%s = [%s]%s" % ( self.fpath.lstrip('./'), self.label, self.remote_url)
[ "def", "hgsub_report", "(", "self", ")", ":", "if", "self", ".", "relpath", "==", "'.'", ":", "return", "yield", "\"%s = [%s]%s\"", "%", "(", "self", ".", "fpath", ".", "lstrip", "(", "'./'", ")", ",", "self", ".", "label", ",", "self", ".", "remote_url", ")" ]
Yields: str: .hgsubs line for this repository
[ "Yields", ":", "str", ":", ".", "hgsubs", "line", "for", "this", "repository" ]
python
train
25.090909
jrigden/pyPodcastParser
pyPodcastParser/Podcast.py
https://github.com/jrigden/pyPodcastParser/blob/b21e027bb56ec77986d76fc1990f4e420c6de869/pyPodcastParser/Podcast.py#L289-L294
def set_itunes_author_name(self): """Parses author name from itunes tags and sets value""" try: self.itunes_author_name = self.soup.find('itunes:author').string except AttributeError: self.itunes_author_name = None
[ "def", "set_itunes_author_name", "(", "self", ")", ":", "try", ":", "self", ".", "itunes_author_name", "=", "self", ".", "soup", ".", "find", "(", "'itunes:author'", ")", ".", "string", "except", "AttributeError", ":", "self", ".", "itunes_author_name", "=", "None" ]
Parses author name from itunes tags and sets value
[ "Parses", "author", "name", "from", "itunes", "tags", "and", "sets", "value" ]
python
train
42.833333
halcy/Mastodon.py
mastodon/Mastodon.py
https://github.com/halcy/Mastodon.py/blob/35c43562dd3d34d6ebf7a0f757c09e8fcccc957c/mastodon/Mastodon.py#L742-L752
def status(self, id): """ Fetch information about a single toot. Does not require authentication for publicly visible statuses. Returns a `toot dict`_. """ id = self.__unpack_id(id) url = '/api/v1/statuses/{0}'.format(str(id)) return self.__api_request('GET', url)
[ "def", "status", "(", "self", ",", "id", ")", ":", "id", "=", "self", ".", "__unpack_id", "(", "id", ")", "url", "=", "'/api/v1/statuses/{0}'", ".", "format", "(", "str", "(", "id", ")", ")", "return", "self", ".", "__api_request", "(", "'GET'", ",", "url", ")" ]
Fetch information about a single toot. Does not require authentication for publicly visible statuses. Returns a `toot dict`_.
[ "Fetch", "information", "about", "a", "single", "toot", "." ]
python
train
29.090909
twilio/twilio-python
twilio/rest/api/v2010/account/call/feedback.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/call/feedback.py#L259-L273
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FeedbackContext for this FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext """ if self._context is None: self._context = FeedbackContext( self._version, account_sid=self._solution['account_sid'], call_sid=self._solution['call_sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "FeedbackContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "call_sid", "=", "self", ".", "_solution", "[", "'call_sid'", "]", ",", ")", "return", "self", ".", "_context" ]
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FeedbackContext for this FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
39.666667
inasafe/inasafe
safe/gis/raster/contour.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gis/raster/contour.py#L53-L68
def gaussian_kernel(sigma, truncate=4.0): """Return Gaussian that truncates at the given number of std deviations. Adapted from https://github.com/nicjhan/gaussian-filter """ sigma = float(sigma) radius = int(truncate * sigma + 0.5) x, y = np.mgrid[-radius:radius + 1, -radius:radius + 1] sigma = sigma ** 2 k = 2 * np.exp(-0.5 * (x ** 2 + y ** 2) / sigma) k = k / np.sum(k) return k
[ "def", "gaussian_kernel", "(", "sigma", ",", "truncate", "=", "4.0", ")", ":", "sigma", "=", "float", "(", "sigma", ")", "radius", "=", "int", "(", "truncate", "*", "sigma", "+", "0.5", ")", "x", ",", "y", "=", "np", ".", "mgrid", "[", "-", "radius", ":", "radius", "+", "1", ",", "-", "radius", ":", "radius", "+", "1", "]", "sigma", "=", "sigma", "**", "2", "k", "=", "2", "*", "np", ".", "exp", "(", "-", "0.5", "*", "(", "x", "**", "2", "+", "y", "**", "2", ")", "/", "sigma", ")", "k", "=", "k", "/", "np", ".", "sum", "(", "k", ")", "return", "k" ]
Return Gaussian that truncates at the given number of std deviations. Adapted from https://github.com/nicjhan/gaussian-filter
[ "Return", "Gaussian", "that", "truncates", "at", "the", "given", "number", "of", "std", "deviations", "." ]
python
train
25.8125
peri-source/peri
peri/conf.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/conf.py#L55-L61
def read_environment(): """ Read all environment variables to see if they contain PERI """ out = {} for k,v in iteritems(os.environ): if transform(k) in default_conf: out[transform(k)] = v return out
[ "def", "read_environment", "(", ")", ":", "out", "=", "{", "}", "for", "k", ",", "v", "in", "iteritems", "(", "os", ".", "environ", ")", ":", "if", "transform", "(", "k", ")", "in", "default_conf", ":", "out", "[", "transform", "(", "k", ")", "]", "=", "v", "return", "out" ]
Read all environment variables to see if they contain PERI
[ "Read", "all", "environment", "variables", "to", "see", "if", "they", "contain", "PERI" ]
python
valid
32.714286
spookey/photon
photon/util/locations.py
https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/util/locations.py#L105-L162
def search_location(loc, locations=None, critical=False, create_in=None, verbose=True): ''' Locates files with a twist: * Check the existence of a file using the full path in `loc` * Search for the filename `loc` in `locations` * Create it's enclosing folders if the file does not exist. \ use `create_in` :param loc: Filename to search :param locations: A list of possible locations to search within (can be a dictionary, see note below) :param critical: |appteardown| if file was not found :param create_in: If `loc` was not found, the folder `create_in` is created. If `locations` is a dictionary, `create_in` can also specify a key of `locations`. The value will be used then. :param verbose: Pass verbose flag to :func:`make_locations` :returns: The full path of `loc` in matched location .. note:: * |params_locations_dict| * |param_locations_none| ''' from photon.util.structures import to_list from photon.util.system import shell_notify if not locations: locations = get_locations() for p in reversed(sorted(to_list(locations))): f = _path.join(p, loc) if _path.exists(f): return f if _path.exists(_path.abspath(_path.expanduser(loc))): return _path.abspath(_path.expanduser(loc)) if critical: shell_notify('could not locate', state=True, more=dict( file=loc, locations=locations )) if create_in: if isinstance(locations, dict): create_in = locations.get(create_in, create_in) make_locations(locations=[create_in], verbose=verbose) return _path.join(create_in, loc)
[ "def", "search_location", "(", "loc", ",", "locations", "=", "None", ",", "critical", "=", "False", ",", "create_in", "=", "None", ",", "verbose", "=", "True", ")", ":", "from", "photon", ".", "util", ".", "structures", "import", "to_list", "from", "photon", ".", "util", ".", "system", "import", "shell_notify", "if", "not", "locations", ":", "locations", "=", "get_locations", "(", ")", "for", "p", "in", "reversed", "(", "sorted", "(", "to_list", "(", "locations", ")", ")", ")", ":", "f", "=", "_path", ".", "join", "(", "p", ",", "loc", ")", "if", "_path", ".", "exists", "(", "f", ")", ":", "return", "f", "if", "_path", ".", "exists", "(", "_path", ".", "abspath", "(", "_path", ".", "expanduser", "(", "loc", ")", ")", ")", ":", "return", "_path", ".", "abspath", "(", "_path", ".", "expanduser", "(", "loc", ")", ")", "if", "critical", ":", "shell_notify", "(", "'could not locate'", ",", "state", "=", "True", ",", "more", "=", "dict", "(", "file", "=", "loc", ",", "locations", "=", "locations", ")", ")", "if", "create_in", ":", "if", "isinstance", "(", "locations", ",", "dict", ")", ":", "create_in", "=", "locations", ".", "get", "(", "create_in", ",", "create_in", ")", "make_locations", "(", "locations", "=", "[", "create_in", "]", ",", "verbose", "=", "verbose", ")", "return", "_path", ".", "join", "(", "create_in", ",", "loc", ")" ]
Locates files with a twist: * Check the existence of a file using the full path in `loc` * Search for the filename `loc` in `locations` * Create it's enclosing folders if the file does not exist. \ use `create_in` :param loc: Filename to search :param locations: A list of possible locations to search within (can be a dictionary, see note below) :param critical: |appteardown| if file was not found :param create_in: If `loc` was not found, the folder `create_in` is created. If `locations` is a dictionary, `create_in` can also specify a key of `locations`. The value will be used then. :param verbose: Pass verbose flag to :func:`make_locations` :returns: The full path of `loc` in matched location .. note:: * |params_locations_dict| * |param_locations_none|
[ "Locates", "files", "with", "a", "twist", ":" ]
python
train
30.086207
oceanprotocol/squid-py
squid_py/keeper/multi_event_listener.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/multi_event_listener.py#L33-L43
def make_event_filter(self, filter_key, filter_value): """Create a new event filter.""" event_filter = EventFilter( self.event_name, self.event, {filter_key: filter_value}, from_block=self.from_block, to_block=self.to_block ) event_filter.set_poll_interval(0.5) return event_filter
[ "def", "make_event_filter", "(", "self", ",", "filter_key", ",", "filter_value", ")", ":", "event_filter", "=", "EventFilter", "(", "self", ".", "event_name", ",", "self", ".", "event", ",", "{", "filter_key", ":", "filter_value", "}", ",", "from_block", "=", "self", ".", "from_block", ",", "to_block", "=", "self", ".", "to_block", ")", "event_filter", ".", "set_poll_interval", "(", "0.5", ")", "return", "event_filter" ]
Create a new event filter.
[ "Create", "a", "new", "event", "filter", "." ]
python
train
33.727273
caktus/django-timepiece
timepiece/entries/views.py
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/views.py#L286-L315
def delete_entry(request, entry_id): """ Give the user the ability to delete a log entry, with a confirmation beforehand. If this method is invoked via a GET request, a form asking for a confirmation of intent will be presented to the user. If this method is invoked via a POST request, the entry will be deleted. """ try: entry = Entry.no_join.get(pk=entry_id, user=request.user) except Entry.DoesNotExist: message = 'No such entry found.' messages.info(request, message) url = request.GET.get('next', reverse('dashboard')) return HttpResponseRedirect(url) if request.method == 'POST': key = request.POST.get('key', None) if key and key == entry.delete_key: entry.delete() message = 'Deleted {0} for {1}.'.format(entry.activity.name, entry.project) messages.info(request, message) url = request.GET.get('next', reverse('dashboard')) return HttpResponseRedirect(url) else: message = 'You are not authorized to delete this entry!' messages.error(request, message) return render(request, 'timepiece/entry/delete.html', { 'entry': entry, })
[ "def", "delete_entry", "(", "request", ",", "entry_id", ")", ":", "try", ":", "entry", "=", "Entry", ".", "no_join", ".", "get", "(", "pk", "=", "entry_id", ",", "user", "=", "request", ".", "user", ")", "except", "Entry", ".", "DoesNotExist", ":", "message", "=", "'No such entry found.'", "messages", ".", "info", "(", "request", ",", "message", ")", "url", "=", "request", ".", "GET", ".", "get", "(", "'next'", ",", "reverse", "(", "'dashboard'", ")", ")", "return", "HttpResponseRedirect", "(", "url", ")", "if", "request", ".", "method", "==", "'POST'", ":", "key", "=", "request", ".", "POST", ".", "get", "(", "'key'", ",", "None", ")", "if", "key", "and", "key", "==", "entry", ".", "delete_key", ":", "entry", ".", "delete", "(", ")", "message", "=", "'Deleted {0} for {1}.'", ".", "format", "(", "entry", ".", "activity", ".", "name", ",", "entry", ".", "project", ")", "messages", ".", "info", "(", "request", ",", "message", ")", "url", "=", "request", ".", "GET", ".", "get", "(", "'next'", ",", "reverse", "(", "'dashboard'", ")", ")", "return", "HttpResponseRedirect", "(", "url", ")", "else", ":", "message", "=", "'You are not authorized to delete this entry!'", "messages", ".", "error", "(", "request", ",", "message", ")", "return", "render", "(", "request", ",", "'timepiece/entry/delete.html'", ",", "{", "'entry'", ":", "entry", ",", "}", ")" ]
Give the user the ability to delete a log entry, with a confirmation beforehand. If this method is invoked via a GET request, a form asking for a confirmation of intent will be presented to the user. If this method is invoked via a POST request, the entry will be deleted.
[ "Give", "the", "user", "the", "ability", "to", "delete", "a", "log", "entry", "with", "a", "confirmation", "beforehand", ".", "If", "this", "method", "is", "invoked", "via", "a", "GET", "request", "a", "form", "asking", "for", "a", "confirmation", "of", "intent", "will", "be", "presented", "to", "the", "user", ".", "If", "this", "method", "is", "invoked", "via", "a", "POST", "request", "the", "entry", "will", "be", "deleted", "." ]
python
train
40.366667
aws/sagemaker-python-sdk
src/sagemaker/workflow/airflow.py
https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/workflow/airflow.py#L60-L83
def prepare_amazon_algorithm_estimator(estimator, inputs, mini_batch_size=None): """ Set up amazon algorithm estimator, adding the required `feature_dim` hyperparameter from training data. Args: estimator (sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): An estimator for a built-in Amazon algorithm to get information from and update. inputs: The training data. * (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of Amazon :class:~`Record` objects serialized and stored in S3. For use with an estimator for an Amazon algorithm. * (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of :class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is a different channel of training data. """ if isinstance(inputs, list): for record in inputs: if isinstance(record, amazon_estimator.RecordSet) and record.channel == 'train': estimator.feature_dim = record.feature_dim break elif isinstance(inputs, amazon_estimator.RecordSet): estimator.feature_dim = inputs.feature_dim else: raise TypeError('Training data must be represented in RecordSet or list of RecordSets') estimator.mini_batch_size = mini_batch_size
[ "def", "prepare_amazon_algorithm_estimator", "(", "estimator", ",", "inputs", ",", "mini_batch_size", "=", "None", ")", ":", "if", "isinstance", "(", "inputs", ",", "list", ")", ":", "for", "record", "in", "inputs", ":", "if", "isinstance", "(", "record", ",", "amazon_estimator", ".", "RecordSet", ")", "and", "record", ".", "channel", "==", "'train'", ":", "estimator", ".", "feature_dim", "=", "record", ".", "feature_dim", "break", "elif", "isinstance", "(", "inputs", ",", "amazon_estimator", ".", "RecordSet", ")", ":", "estimator", ".", "feature_dim", "=", "inputs", ".", "feature_dim", "else", ":", "raise", "TypeError", "(", "'Training data must be represented in RecordSet or list of RecordSets'", ")", "estimator", ".", "mini_batch_size", "=", "mini_batch_size" ]
Set up amazon algorithm estimator, adding the required `feature_dim` hyperparameter from training data. Args: estimator (sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): An estimator for a built-in Amazon algorithm to get information from and update. inputs: The training data. * (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of Amazon :class:~`Record` objects serialized and stored in S3. For use with an estimator for an Amazon algorithm. * (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of :class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is a different channel of training data.
[ "Set", "up", "amazon", "algorithm", "estimator", "adding", "the", "required", "feature_dim", "hyperparameter", "from", "training", "data", "." ]
python
train
56.458333
juiceinc/recipe
recipe/validators.py
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/validators.py#L66-L72
def _normalize_coerce_to_format_with_lookup(self, v): """ Replace a format with a default """ try: return self.format_lookup.get(v, v) except TypeError: # v is something we can't lookup (like a list) return v
[ "def", "_normalize_coerce_to_format_with_lookup", "(", "self", ",", "v", ")", ":", "try", ":", "return", "self", ".", "format_lookup", ".", "get", "(", "v", ",", "v", ")", "except", "TypeError", ":", "# v is something we can't lookup (like a list)", "return", "v" ]
Replace a format with a default
[ "Replace", "a", "format", "with", "a", "default" ]
python
train
37.428571
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/functions/functions.py
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/functions/functions.py#L210-L272
def _analyze_func_string(func_string): """ Analyze given functiion string an extract: * function name * function arguments * function keyword arguments All given arguments must by of type string, int/float or list. :param func_string: string of the function :return: function name, arguments, keyword arguments """ func = ast.parse(func_string) try: func_call = func.body[0].value func_name = func_call.func.id except AttributeError: raise SphinxError("Given dynamic function string is not a valid python call. Got: {}".format(func_string)) func_args = [] for arg in func_call.args: if isinstance(arg, ast.Num): func_args.append(arg.n) elif isinstance(arg, ast.Str): func_args.append(arg.s) elif isinstance(arg, ast.BoolOp): func_args.append(arg.s) elif isinstance(arg, ast.List): arg_list = [] for element in arg.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_args.append(arg_list) else: raise FunctionParsingException() func_kargs = {} for keyword in func_call.keywords: kvalue = keyword.value kkey = keyword.arg if isinstance(kvalue, ast.Num): func_kargs[kkey] = kvalue.n elif isinstance(kvalue, ast.Str): func_kargs[kkey] = kvalue.s elif isinstance(kvalue, ast_boolean): # Check if Boolean if is_python3: func_kargs[kkey] = kvalue.value else: func_kargs[kkey] = kvalue.id elif isinstance(kvalue, ast.List): arg_list = [] for element in kvalue.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_kargs[kkey] = arg_list else: raise FunctionParsingException() return func_name, func_args, func_kargs
[ "def", "_analyze_func_string", "(", "func_string", ")", ":", "func", "=", "ast", ".", "parse", "(", "func_string", ")", "try", ":", "func_call", "=", "func", ".", "body", "[", "0", "]", ".", "value", "func_name", "=", "func_call", ".", "func", ".", "id", "except", "AttributeError", ":", "raise", "SphinxError", "(", "\"Given dynamic function string is not a valid python call. Got: {}\"", ".", "format", "(", "func_string", ")", ")", "func_args", "=", "[", "]", "for", "arg", "in", "func_call", ".", "args", ":", "if", "isinstance", "(", "arg", ",", "ast", ".", "Num", ")", ":", "func_args", ".", "append", "(", "arg", ".", "n", ")", "elif", "isinstance", "(", "arg", ",", "ast", ".", "Str", ")", ":", "func_args", ".", "append", "(", "arg", ".", "s", ")", "elif", "isinstance", "(", "arg", ",", "ast", ".", "BoolOp", ")", ":", "func_args", ".", "append", "(", "arg", ".", "s", ")", "elif", "isinstance", "(", "arg", ",", "ast", ".", "List", ")", ":", "arg_list", "=", "[", "]", "for", "element", "in", "arg", ".", "elts", ":", "if", "isinstance", "(", "element", ",", "ast", ".", "Num", ")", ":", "arg_list", ".", "append", "(", "element", ".", "n", ")", "elif", "isinstance", "(", "element", ",", "ast", ".", "Str", ")", ":", "arg_list", ".", "append", "(", "element", ".", "s", ")", "func_args", ".", "append", "(", "arg_list", ")", "else", ":", "raise", "FunctionParsingException", "(", ")", "func_kargs", "=", "{", "}", "for", "keyword", "in", "func_call", ".", "keywords", ":", "kvalue", "=", "keyword", ".", "value", "kkey", "=", "keyword", ".", "arg", "if", "isinstance", "(", "kvalue", ",", "ast", ".", "Num", ")", ":", "func_kargs", "[", "kkey", "]", "=", "kvalue", ".", "n", "elif", "isinstance", "(", "kvalue", ",", "ast", ".", "Str", ")", ":", "func_kargs", "[", "kkey", "]", "=", "kvalue", ".", "s", "elif", "isinstance", "(", "kvalue", ",", "ast_boolean", ")", ":", "# Check if Boolean", "if", "is_python3", ":", "func_kargs", "[", "kkey", "]", "=", "kvalue", ".", "value", "else", ":", "func_kargs", "[", "kkey", "]", "=", "kvalue", ".", "id", "elif", "isinstance", "(", "kvalue", ",", "ast", ".", "List", ")", ":", "arg_list", "=", "[", "]", "for", "element", "in", "kvalue", ".", "elts", ":", "if", "isinstance", "(", "element", ",", "ast", ".", "Num", ")", ":", "arg_list", ".", "append", "(", "element", ".", "n", ")", "elif", "isinstance", "(", "element", ",", "ast", ".", "Str", ")", ":", "arg_list", ".", "append", "(", "element", ".", "s", ")", "func_kargs", "[", "kkey", "]", "=", "arg_list", "else", ":", "raise", "FunctionParsingException", "(", ")", "return", "func_name", ",", "func_args", ",", "func_kargs" ]
Analyze given functiion string an extract: * function name * function arguments * function keyword arguments All given arguments must by of type string, int/float or list. :param func_string: string of the function :return: function name, arguments, keyword arguments
[ "Analyze", "given", "functiion", "string", "an", "extract", ":" ]
python
train
34.206349
pypa/pipenv
pipenv/patched/pipfile/api.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/pipfile/api.py#L157-L162
def load(klass, filename, inject_env=True): """Load a Pipfile from a given filename.""" p = PipfileParser(filename=filename) pipfile = klass(filename=filename) pipfile.data = p.parse(inject_env=inject_env) return pipfile
[ "def", "load", "(", "klass", ",", "filename", ",", "inject_env", "=", "True", ")", ":", "p", "=", "PipfileParser", "(", "filename", "=", "filename", ")", "pipfile", "=", "klass", "(", "filename", "=", "filename", ")", "pipfile", ".", "data", "=", "p", ".", "parse", "(", "inject_env", "=", "inject_env", ")", "return", "pipfile" ]
Load a Pipfile from a given filename.
[ "Load", "a", "Pipfile", "from", "a", "given", "filename", "." ]
python
train
42.5
numenta/htmresearch
htmresearch/frameworks/layers/laminar_network.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/laminar_network.py#L50-L73
def createNetwork(networkConfig): """ Create and initialize the specified network instance. @param networkConfig: (dict) the configuration of this network. @return network: (Network) The actual network """ registerAllResearchRegions() network = Network() if networkConfig["networkType"] == "L4L2Column": return createL4L2Column(network, networkConfig, "_0") elif networkConfig["networkType"] == "MultipleL4L2Columns": return createMultipleL4L2Columns(network, networkConfig) elif networkConfig["networkType"] == "MultipleL4L2ColumnsWithTopology": return createMultipleL4L2ColumnsWithTopology(network, networkConfig) elif networkConfig["networkType"] == "L2456Columns": return createL2456Columns(network, networkConfig) elif networkConfig["networkType"] == "L4L2TMColumn": return createL4L2TMColumn(network, networkConfig, "_0") elif networkConfig["networkType"] == "CombinedSequenceColumn": return createCombinedSequenceColumn(network, networkConfig, "_0")
[ "def", "createNetwork", "(", "networkConfig", ")", ":", "registerAllResearchRegions", "(", ")", "network", "=", "Network", "(", ")", "if", "networkConfig", "[", "\"networkType\"", "]", "==", "\"L4L2Column\"", ":", "return", "createL4L2Column", "(", "network", ",", "networkConfig", ",", "\"_0\"", ")", "elif", "networkConfig", "[", "\"networkType\"", "]", "==", "\"MultipleL4L2Columns\"", ":", "return", "createMultipleL4L2Columns", "(", "network", ",", "networkConfig", ")", "elif", "networkConfig", "[", "\"networkType\"", "]", "==", "\"MultipleL4L2ColumnsWithTopology\"", ":", "return", "createMultipleL4L2ColumnsWithTopology", "(", "network", ",", "networkConfig", ")", "elif", "networkConfig", "[", "\"networkType\"", "]", "==", "\"L2456Columns\"", ":", "return", "createL2456Columns", "(", "network", ",", "networkConfig", ")", "elif", "networkConfig", "[", "\"networkType\"", "]", "==", "\"L4L2TMColumn\"", ":", "return", "createL4L2TMColumn", "(", "network", ",", "networkConfig", ",", "\"_0\"", ")", "elif", "networkConfig", "[", "\"networkType\"", "]", "==", "\"CombinedSequenceColumn\"", ":", "return", "createCombinedSequenceColumn", "(", "network", ",", "networkConfig", ",", "\"_0\"", ")" ]
Create and initialize the specified network instance. @param networkConfig: (dict) the configuration of this network. @return network: (Network) The actual network
[ "Create", "and", "initialize", "the", "specified", "network", "instance", "." ]
python
train
41.125
projectatomic/atomic-reactor
atomic_reactor/plugins/pre_resolve_composes.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/pre_resolve_composes.py#L281-L303
def resolve_signing_intent(self): """Determine the correct signing intent Regardless of what was requested, or provided as signing_intent plugin parameter, consult sigkeys of the actual composes used to guarantee information accuracy. """ all_signing_intents = [ self.odcs_config.get_signing_intent_by_keys(compose_info.get('sigkeys', [])) for compose_info in self.composes_info ] # Because composes_info may contain composes that were passed as # plugin parameters, add the parent signing intent to avoid the # overall signing intent from surpassing parent's. if self._parent_signing_intent: all_signing_intents.append(self._parent_signing_intent) # Calculate the least restrictive signing intent signing_intent = min(all_signing_intents, key=lambda x: x['restrictiveness']) self.log.info('Signing intent for build is %s', signing_intent['name']) self.compose_config.set_signing_intent(signing_intent['name'])
[ "def", "resolve_signing_intent", "(", "self", ")", ":", "all_signing_intents", "=", "[", "self", ".", "odcs_config", ".", "get_signing_intent_by_keys", "(", "compose_info", ".", "get", "(", "'sigkeys'", ",", "[", "]", ")", ")", "for", "compose_info", "in", "self", ".", "composes_info", "]", "# Because composes_info may contain composes that were passed as", "# plugin parameters, add the parent signing intent to avoid the", "# overall signing intent from surpassing parent's.", "if", "self", ".", "_parent_signing_intent", ":", "all_signing_intents", ".", "append", "(", "self", ".", "_parent_signing_intent", ")", "# Calculate the least restrictive signing intent", "signing_intent", "=", "min", "(", "all_signing_intents", ",", "key", "=", "lambda", "x", ":", "x", "[", "'restrictiveness'", "]", ")", "self", ".", "log", ".", "info", "(", "'Signing intent for build is %s'", ",", "signing_intent", "[", "'name'", "]", ")", "self", ".", "compose_config", ".", "set_signing_intent", "(", "signing_intent", "[", "'name'", "]", ")" ]
Determine the correct signing intent Regardless of what was requested, or provided as signing_intent plugin parameter, consult sigkeys of the actual composes used to guarantee information accuracy.
[ "Determine", "the", "correct", "signing", "intent" ]
python
train
45.26087
williballenthin/ida-settings
ida_settings/ui/ida_settings_viewer.py
https://github.com/williballenthin/ida-settings/blob/ddfeab5bd0b6f6f177d0d50f8078c585602b1d9e/ida_settings/ui/ida_settings_viewer.py#L19-L64
def PopulateForm(self): """ +-----------------------------------------------------------------------+ | +--- splitter ------------------------------------------------------+ | | | +-- list widget--------------+ +- IdaSettingsView -------------+ | | | | | | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +----------------------------+ +-------------------------------+ | | | +-------------------------------------------------------------------+ | +-----------------------------------------------------------------------+ """ hbox = QtWidgets.QHBoxLayout(self.parent) self._splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal) self._plugin_list = QtWidgets.QListWidget() plugin_names = set([]) for scope, fn in (("idb", ida_settings.IDASettings.get_idb_plugin_names), ("directory", ida_settings.IDASettings.get_directory_plugin_names), ("user", ida_settings.IDASettings.get_user_plugin_names), ("system", ida_settings.IDASettings.get_system_plugin_names)): for plugin_name in fn(): plugin_names.add(plugin_name) for plugin_name in plugin_names: self._plugin_list.addItem(plugin_name) self._splitter.addWidget(self._plugin_list) hbox.addWidget(self._splitter) self.parent.setLayout(hbox) self._plugin_list.currentItemChanged.connect(self._handle_plugin_changed)
[ "def", "PopulateForm", "(", "self", ")", ":", "hbox", "=", "QtWidgets", ".", "QHBoxLayout", "(", "self", ".", "parent", ")", "self", ".", "_splitter", "=", "QtWidgets", ".", "QSplitter", "(", "QtCore", ".", "Qt", ".", "Horizontal", ")", "self", ".", "_plugin_list", "=", "QtWidgets", ".", "QListWidget", "(", ")", "plugin_names", "=", "set", "(", "[", "]", ")", "for", "scope", ",", "fn", "in", "(", "(", "\"idb\"", ",", "ida_settings", ".", "IDASettings", ".", "get_idb_plugin_names", ")", ",", "(", "\"directory\"", ",", "ida_settings", ".", "IDASettings", ".", "get_directory_plugin_names", ")", ",", "(", "\"user\"", ",", "ida_settings", ".", "IDASettings", ".", "get_user_plugin_names", ")", ",", "(", "\"system\"", ",", "ida_settings", ".", "IDASettings", ".", "get_system_plugin_names", ")", ")", ":", "for", "plugin_name", "in", "fn", "(", ")", ":", "plugin_names", ".", "add", "(", "plugin_name", ")", "for", "plugin_name", "in", "plugin_names", ":", "self", ".", "_plugin_list", ".", "addItem", "(", "plugin_name", ")", "self", ".", "_splitter", ".", "addWidget", "(", "self", ".", "_plugin_list", ")", "hbox", ".", "addWidget", "(", "self", ".", "_splitter", ")", "self", ".", "parent", ".", "setLayout", "(", "hbox", ")", "self", ".", "_plugin_list", ".", "currentItemChanged", ".", "connect", "(", "self", ".", "_handle_plugin_changed", ")" ]
+-----------------------------------------------------------------------+ | +--- splitter ------------------------------------------------------+ | | | +-- list widget--------------+ +- IdaSettingsView -------------+ | | | | | | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +----------------------------+ +-------------------------------+ | | | +-------------------------------------------------------------------+ | +-----------------------------------------------------------------------+
[ "+", "-----------------------------------------------------------------------", "+", "|", "+", "---", "splitter", "------------------------------------------------------", "+", "|", "|", "|", "+", "--", "list", "widget", "--------------", "+", "+", "-", "IdaSettingsView", "-------------", "+", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "-", "plugin", "name", "|", "|", "|", "|", "|", "|", "|", "|", "-", "plugin", "name", "|", "|", "|", "|", "|", "|", "|", "|", "-", "plugin", "name", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "|", "+", "----------------------------", "+", "+", "-------------------------------", "+", "|", "|", "|", "+", "-------------------------------------------------------------------", "+", "|", "+", "-----------------------------------------------------------------------", "+" ]
python
test
61.913043
MisterY/gnucash-portfolio
gnucash_portfolio/scheduledtxaggregate.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/scheduledtxaggregate.py#L173-L212
def handle_friday(next_date: Datum, period: str, mult: int, start_date: Datum): """ Extracted the calculation for when the next_day is Friday """ assert isinstance(next_date, Datum) assert isinstance(start_date, Datum) # Starting from line 220. tmp_sat = next_date.clone() tmp_sat.add_days(1) tmp_sun = next_date.clone() tmp_sun.add_days(2) if period == RecurrencePeriod.END_OF_MONTH.value: if (next_date.is_end_of_month() or tmp_sat.is_end_of_month() or tmp_sun.is_end_of_month()): next_date.add_months(1) else: next_date.add_months(mult - 1) else: if tmp_sat.get_day_name() == start_date.get_day_name(): next_date.add_days(1) next_date.add_months(mult) elif tmp_sun.get_day_name() == start_date.get_day_name(): next_date.add_days(2) next_date.add_months(mult) elif next_date.get_day() >= start_date.get_day(): next_date.add_months(mult) elif next_date.is_end_of_month(): next_date.add_months(mult) elif tmp_sat.is_end_of_month(): next_date.add_days(1) next_date.add_months(mult) elif tmp_sun.is_end_of_month(): next_date.add_days(2) next_date.add_months(mult) else: # /* one fewer month fwd because of the occurrence in this month */ next_date.subtract_months(1) return next_date
[ "def", "handle_friday", "(", "next_date", ":", "Datum", ",", "period", ":", "str", ",", "mult", ":", "int", ",", "start_date", ":", "Datum", ")", ":", "assert", "isinstance", "(", "next_date", ",", "Datum", ")", "assert", "isinstance", "(", "start_date", ",", "Datum", ")", "# Starting from line 220.", "tmp_sat", "=", "next_date", ".", "clone", "(", ")", "tmp_sat", ".", "add_days", "(", "1", ")", "tmp_sun", "=", "next_date", ".", "clone", "(", ")", "tmp_sun", ".", "add_days", "(", "2", ")", "if", "period", "==", "RecurrencePeriod", ".", "END_OF_MONTH", ".", "value", ":", "if", "(", "next_date", ".", "is_end_of_month", "(", ")", "or", "tmp_sat", ".", "is_end_of_month", "(", ")", "or", "tmp_sun", ".", "is_end_of_month", "(", ")", ")", ":", "next_date", ".", "add_months", "(", "1", ")", "else", ":", "next_date", ".", "add_months", "(", "mult", "-", "1", ")", "else", ":", "if", "tmp_sat", ".", "get_day_name", "(", ")", "==", "start_date", ".", "get_day_name", "(", ")", ":", "next_date", ".", "add_days", "(", "1", ")", "next_date", ".", "add_months", "(", "mult", ")", "elif", "tmp_sun", ".", "get_day_name", "(", ")", "==", "start_date", ".", "get_day_name", "(", ")", ":", "next_date", ".", "add_days", "(", "2", ")", "next_date", ".", "add_months", "(", "mult", ")", "elif", "next_date", ".", "get_day", "(", ")", ">=", "start_date", ".", "get_day", "(", ")", ":", "next_date", ".", "add_months", "(", "mult", ")", "elif", "next_date", ".", "is_end_of_month", "(", ")", ":", "next_date", ".", "add_months", "(", "mult", ")", "elif", "tmp_sat", ".", "is_end_of_month", "(", ")", ":", "next_date", ".", "add_days", "(", "1", ")", "next_date", ".", "add_months", "(", "mult", ")", "elif", "tmp_sun", ".", "is_end_of_month", "(", ")", ":", "next_date", ".", "add_days", "(", "2", ")", "next_date", ".", "add_months", "(", "mult", ")", "else", ":", "# /* one fewer month fwd because of the occurrence in this month */", "next_date", ".", "subtract_months", "(", "1", ")", "return", "next_date" ]
Extracted the calculation for when the next_day is Friday
[ "Extracted", "the", "calculation", "for", "when", "the", "next_day", "is", "Friday" ]
python
train
36.15
tensorflow/tensor2tensor
tensor2tensor/models/research/glow_ops.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L804-L843
def temporal_latent_to_dist(name, x, hparams, output_channels=None): """Network that maps a time-indexed list of 3-D latents to a gaussian. Args: name: variable scope. x: List of 4-D Tensors indexed by time, (NHWC) hparams: tf.contrib.training.Hparams. output_channels: int, Number of channels of the output gaussian mean. Returns: dist: tfp.distributions.Normal """ _, _, width, _, res_channels = common_layers.shape_list(x) if output_channels is None: output_channels = res_channels dilation_rates = get_dilation_rates(hparams, width) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): h = x for i in range(hparams.latent_encoder_depth): if hparams.latent_apply_dilations: h2 = dilated_conv_stack("dil_latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, dilation_rates=dilation_rates, activation=hparams.latent_activation, dropout=hparams.latent_dropout) else: h2 = conv_stack("latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, activation=hparams.latent_activation, dropout=hparams.latent_dropout) h += h2 # take last activation that should capture all context since padding is # on left. h = h[:, -1, :, :, :] h = conv("res_final", h, apply_actnorm=False, conv_init="zeros", output_channels=2*output_channels, filter_size=[1, 1]) mean, log_scale = h[:, :, :, 0::2], h[:, :, :, 1::2] return tfp.distributions.Normal(mean, tf.exp(log_scale))
[ "def", "temporal_latent_to_dist", "(", "name", ",", "x", ",", "hparams", ",", "output_channels", "=", "None", ")", ":", "_", ",", "_", ",", "width", ",", "_", ",", "res_channels", "=", "common_layers", ".", "shape_list", "(", "x", ")", "if", "output_channels", "is", "None", ":", "output_channels", "=", "res_channels", "dilation_rates", "=", "get_dilation_rates", "(", "hparams", ",", "width", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "h", "=", "x", "for", "i", "in", "range", "(", "hparams", ".", "latent_encoder_depth", ")", ":", "if", "hparams", ".", "latent_apply_dilations", ":", "h2", "=", "dilated_conv_stack", "(", "\"dil_latent_3d_res_%d\"", "%", "i", ",", "h", ",", "mid_channels", "=", "hparams", ".", "latent_encoder_width", ",", "output_channels", "=", "res_channels", ",", "dilation_rates", "=", "dilation_rates", ",", "activation", "=", "hparams", ".", "latent_activation", ",", "dropout", "=", "hparams", ".", "latent_dropout", ")", "else", ":", "h2", "=", "conv_stack", "(", "\"latent_3d_res_%d\"", "%", "i", ",", "h", ",", "mid_channels", "=", "hparams", ".", "latent_encoder_width", ",", "output_channels", "=", "res_channels", ",", "activation", "=", "hparams", ".", "latent_activation", ",", "dropout", "=", "hparams", ".", "latent_dropout", ")", "h", "+=", "h2", "# take last activation that should capture all context since padding is", "# on left.", "h", "=", "h", "[", ":", ",", "-", "1", ",", ":", ",", ":", ",", ":", "]", "h", "=", "conv", "(", "\"res_final\"", ",", "h", ",", "apply_actnorm", "=", "False", ",", "conv_init", "=", "\"zeros\"", ",", "output_channels", "=", "2", "*", "output_channels", ",", "filter_size", "=", "[", "1", ",", "1", "]", ")", "mean", ",", "log_scale", "=", "h", "[", ":", ",", ":", ",", ":", ",", "0", ":", ":", "2", "]", ",", "h", "[", ":", ",", ":", ",", ":", ",", "1", ":", ":", "2", "]", "return", "tfp", ".", "distributions", ".", "Normal", "(", "mean", ",", "tf", ".", "exp", "(", "log_scale", ")", ")" ]
Network that maps a time-indexed list of 3-D latents to a gaussian. Args: name: variable scope. x: List of 4-D Tensors indexed by time, (NHWC) hparams: tf.contrib.training.Hparams. output_channels: int, Number of channels of the output gaussian mean. Returns: dist: tfp.distributions.Normal
[ "Network", "that", "maps", "a", "time", "-", "indexed", "list", "of", "3", "-", "D", "latents", "to", "a", "gaussian", "." ]
python
train
44.35
fastai/fastai
fastai/vision/gan.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L28-L39
def basic_generator(in_size:int, n_channels:int, noise_sz:int=100, n_features:int=64, n_extra_layers=0, **conv_kwargs): "A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`." cur_size, cur_ftrs = 4, n_features//2 while cur_size < in_size: cur_size *= 2; cur_ftrs *= 2 layers = [conv_layer(noise_sz, cur_ftrs, 4, 1, transpose=True, **conv_kwargs)] cur_size = 4 while cur_size < in_size // 2: layers.append(conv_layer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **conv_kwargs)) cur_ftrs //= 2; cur_size *= 2 layers += [conv_layer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **conv_kwargs) for _ in range(n_extra_layers)] layers += [conv2d_trans(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()] return nn.Sequential(*layers)
[ "def", "basic_generator", "(", "in_size", ":", "int", ",", "n_channels", ":", "int", ",", "noise_sz", ":", "int", "=", "100", ",", "n_features", ":", "int", "=", "64", ",", "n_extra_layers", "=", "0", ",", "*", "*", "conv_kwargs", ")", ":", "cur_size", ",", "cur_ftrs", "=", "4", ",", "n_features", "//", "2", "while", "cur_size", "<", "in_size", ":", "cur_size", "*=", "2", "cur_ftrs", "*=", "2", "layers", "=", "[", "conv_layer", "(", "noise_sz", ",", "cur_ftrs", ",", "4", ",", "1", ",", "transpose", "=", "True", ",", "*", "*", "conv_kwargs", ")", "]", "cur_size", "=", "4", "while", "cur_size", "<", "in_size", "//", "2", ":", "layers", ".", "append", "(", "conv_layer", "(", "cur_ftrs", ",", "cur_ftrs", "//", "2", ",", "4", ",", "2", ",", "1", ",", "transpose", "=", "True", ",", "*", "*", "conv_kwargs", ")", ")", "cur_ftrs", "//=", "2", "cur_size", "*=", "2", "layers", "+=", "[", "conv_layer", "(", "cur_ftrs", ",", "cur_ftrs", ",", "3", ",", "1", ",", "1", ",", "transpose", "=", "True", ",", "*", "*", "conv_kwargs", ")", "for", "_", "in", "range", "(", "n_extra_layers", ")", "]", "layers", "+=", "[", "conv2d_trans", "(", "cur_ftrs", ",", "n_channels", ",", "4", ",", "2", ",", "1", ",", "bias", "=", "False", ")", ",", "nn", ".", "Tanh", "(", ")", "]", "return", "nn", ".", "Sequential", "(", "*", "layers", ")" ]
A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`.
[ "A", "basic", "generator", "from", "noise_sz", "to", "images", "n_channels", "x", "in_size", "x", "in_size", "." ]
python
train
66.916667
wummel/linkchecker
third_party/miniboa-r42/chat_demo.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/miniboa-r42/chat_demo.py#L37-L44
def on_disconnect(client): """ Sample on_disconnect function. Handles lost connections. """ print "-- Lost connection to %s" % client.addrport() CLIENT_LIST.remove(client) broadcast('%s leaves the conversation.\n' % client.addrport() )
[ "def", "on_disconnect", "(", "client", ")", ":", "print", "\"-- Lost connection to %s\"", "%", "client", ".", "addrport", "(", ")", "CLIENT_LIST", ".", "remove", "(", "client", ")", "broadcast", "(", "'%s leaves the conversation.\\n'", "%", "client", ".", "addrport", "(", ")", ")" ]
Sample on_disconnect function. Handles lost connections.
[ "Sample", "on_disconnect", "function", ".", "Handles", "lost", "connections", "." ]
python
train
32
myint/autoflake
autoflake.py
https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L726-L728
def _split_comma_separated(string): """Return a set of strings.""" return set(text.strip() for text in string.split(',') if text.strip())
[ "def", "_split_comma_separated", "(", "string", ")", ":", "return", "set", "(", "text", ".", "strip", "(", ")", "for", "text", "in", "string", ".", "split", "(", "','", ")", "if", "text", ".", "strip", "(", ")", ")" ]
Return a set of strings.
[ "Return", "a", "set", "of", "strings", "." ]
python
test
47.666667
ninuxorg/nodeshot
nodeshot/networking/net/models/device.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/net/models/device.py#L60-L99
def save(self, *args, **kwargs): """ Custom save method does the following: * automatically inherit node coordinates and elevation * save shortcuts if HSTORE is enabled """ custom_checks = kwargs.pop('custom_checks', True) super(Device, self).save(*args, **kwargs) if custom_checks is False: return changed = False if not self.location: self.location = self.node.point changed = True if not self.elev and self.node.elev: self.elev = self.node.elev changed = True original_user = self.shortcuts.get('user') if self.node.user: self.shortcuts['user'] = self.node.user if original_user != self.shortcuts.get('user'): changed = True if 'nodeshot.core.layers' in settings.INSTALLED_APPS: original_layer = self.shortcuts.get('layer') self.shortcuts['layer'] = self.node.layer if original_layer != self.shortcuts.get('layer'): changed = True if changed: self.save(custom_checks=False)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "custom_checks", "=", "kwargs", ".", "pop", "(", "'custom_checks'", ",", "True", ")", "super", "(", "Device", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "custom_checks", "is", "False", ":", "return", "changed", "=", "False", "if", "not", "self", ".", "location", ":", "self", ".", "location", "=", "self", ".", "node", ".", "point", "changed", "=", "True", "if", "not", "self", ".", "elev", "and", "self", ".", "node", ".", "elev", ":", "self", ".", "elev", "=", "self", ".", "node", ".", "elev", "changed", "=", "True", "original_user", "=", "self", ".", "shortcuts", ".", "get", "(", "'user'", ")", "if", "self", ".", "node", ".", "user", ":", "self", ".", "shortcuts", "[", "'user'", "]", "=", "self", ".", "node", ".", "user", "if", "original_user", "!=", "self", ".", "shortcuts", ".", "get", "(", "'user'", ")", ":", "changed", "=", "True", "if", "'nodeshot.core.layers'", "in", "settings", ".", "INSTALLED_APPS", ":", "original_layer", "=", "self", ".", "shortcuts", ".", "get", "(", "'layer'", ")", "self", ".", "shortcuts", "[", "'layer'", "]", "=", "self", ".", "node", ".", "layer", "if", "original_layer", "!=", "self", ".", "shortcuts", ".", "get", "(", "'layer'", ")", ":", "changed", "=", "True", "if", "changed", ":", "self", ".", "save", "(", "custom_checks", "=", "False", ")" ]
Custom save method does the following: * automatically inherit node coordinates and elevation * save shortcuts if HSTORE is enabled
[ "Custom", "save", "method", "does", "the", "following", ":", "*", "automatically", "inherit", "node", "coordinates", "and", "elevation", "*", "save", "shortcuts", "if", "HSTORE", "is", "enabled" ]
python
train
28.3
sorgerlab/indra
indra/literature/pubmed_client.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L303-L364
def get_metadata_from_xml_tree(tree, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False, mesh_annotations=False): """Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'. """ # Iterate over the articles and build the results dict results = {} pm_articles = tree.findall('./PubmedArticle') for art_ix, pm_article in enumerate(pm_articles): medline_citation = pm_article.find('./MedlineCitation') article_info = _get_article_info(medline_citation, pm_article.find('PubmedData')) journal_info = _get_journal_info(medline_citation, get_issns_from_nlm) context_info = _get_annotations(medline_citation) # Build the result result = {} result.update(article_info) result.update(journal_info) result.update(context_info) # Get the abstracts if requested if get_abstracts: abstract = _abstract_from_article_element( medline_citation.find('Article'), prepend_title=prepend_title ) result['abstract'] = abstract # Add to dict results[article_info['pmid']] = result return results
[ "def", "get_metadata_from_xml_tree", "(", "tree", ",", "get_issns_from_nlm", "=", "False", ",", "get_abstracts", "=", "False", ",", "prepend_title", "=", "False", ",", "mesh_annotations", "=", "False", ")", ":", "# Iterate over the articles and build the results dict", "results", "=", "{", "}", "pm_articles", "=", "tree", ".", "findall", "(", "'./PubmedArticle'", ")", "for", "art_ix", ",", "pm_article", "in", "enumerate", "(", "pm_articles", ")", ":", "medline_citation", "=", "pm_article", ".", "find", "(", "'./MedlineCitation'", ")", "article_info", "=", "_get_article_info", "(", "medline_citation", ",", "pm_article", ".", "find", "(", "'PubmedData'", ")", ")", "journal_info", "=", "_get_journal_info", "(", "medline_citation", ",", "get_issns_from_nlm", ")", "context_info", "=", "_get_annotations", "(", "medline_citation", ")", "# Build the result", "result", "=", "{", "}", "result", ".", "update", "(", "article_info", ")", "result", ".", "update", "(", "journal_info", ")", "result", ".", "update", "(", "context_info", ")", "# Get the abstracts if requested", "if", "get_abstracts", ":", "abstract", "=", "_abstract_from_article_element", "(", "medline_citation", ".", "find", "(", "'Article'", ")", ",", "prepend_title", "=", "prepend_title", ")", "result", "[", "'abstract'", "]", "=", "abstract", "# Add to dict", "results", "[", "article_info", "[", "'pmid'", "]", "]", "=", "result", "return", "results" ]
Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.
[ "Get", "metadata", "for", "an", "XML", "tree", "containing", "PubmedArticle", "elements", "." ]
python
train
40.080645
glitchassassin/lackey
lackey/__init__.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/__init__.py#L204-L214
def inputText(message="", title="Lackey Input", lines=9, width=20, text=""): """ Creates a textarea dialog with the specified message and default text. Returns the entered value. """ root = tk.Tk() input_text = tk.StringVar() input_text.set(text) PopupTextarea(root, message, title, lines, width, input_text) root.focus_force() root.mainloop() return str(input_text.get())
[ "def", "inputText", "(", "message", "=", "\"\"", ",", "title", "=", "\"Lackey Input\"", ",", "lines", "=", "9", ",", "width", "=", "20", ",", "text", "=", "\"\"", ")", ":", "root", "=", "tk", ".", "Tk", "(", ")", "input_text", "=", "tk", ".", "StringVar", "(", ")", "input_text", ".", "set", "(", "text", ")", "PopupTextarea", "(", "root", ",", "message", ",", "title", ",", "lines", ",", "width", ",", "input_text", ")", "root", ".", "focus_force", "(", ")", "root", ".", "mainloop", "(", ")", "return", "str", "(", "input_text", ".", "get", "(", ")", ")" ]
Creates a textarea dialog with the specified message and default text. Returns the entered value.
[ "Creates", "a", "textarea", "dialog", "with", "the", "specified", "message", "and", "default", "text", "." ]
python
train
36.272727
explosion/spaCy
spacy/lang/de/syntax_iterators.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/lang/de/syntax_iterators.py#L7-L46
def noun_chunks(obj): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ # this iterator extracts spans headed by NOUNs starting from the left-most # syntactic dependent until the NOUN itself for close apposition and # measurement construction, the span is sometimes extended to the right of # the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee" # and not just "eine Tasse", same for "das Thema Familie". labels = [ "sb", "oa", "da", "nk", "mo", "ag", "ROOT", "root", "cj", "pd", "og", "app", ] doc = obj.doc # Ensure works on both Doc and Span. np_label = doc.vocab.strings.add("NP") np_deps = set(doc.vocab.strings.add(label) for label in labels) close_app = doc.vocab.strings.add("nk") rbracket = 0 for i, word in enumerate(obj): if i < rbracket: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: rbracket = word.i + 1 # try to extend the span to the right # to capture close apposition/measurement constructions for rdep in doc[word.i].rights: if rdep.pos in (NOUN, PROPN) and rdep.dep == close_app: rbracket = rdep.i + 1 yield word.left_edge.i, rbracket, np_label
[ "def", "noun_chunks", "(", "obj", ")", ":", "# this iterator extracts spans headed by NOUNs starting from the left-most", "# syntactic dependent until the NOUN itself for close apposition and", "# measurement construction, the span is sometimes extended to the right of", "# the NOUN. Example: \"eine Tasse Tee\" (a cup (of) tea) returns \"eine Tasse Tee\"", "# and not just \"eine Tasse\", same for \"das Thema Familie\".", "labels", "=", "[", "\"sb\"", ",", "\"oa\"", ",", "\"da\"", ",", "\"nk\"", ",", "\"mo\"", ",", "\"ag\"", ",", "\"ROOT\"", ",", "\"root\"", ",", "\"cj\"", ",", "\"pd\"", ",", "\"og\"", ",", "\"app\"", ",", "]", "doc", "=", "obj", ".", "doc", "# Ensure works on both Doc and Span.", "np_label", "=", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "\"NP\"", ")", "np_deps", "=", "set", "(", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "label", ")", "for", "label", "in", "labels", ")", "close_app", "=", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "\"nk\"", ")", "rbracket", "=", "0", "for", "i", ",", "word", "in", "enumerate", "(", "obj", ")", ":", "if", "i", "<", "rbracket", ":", "continue", "if", "word", ".", "pos", "in", "(", "NOUN", ",", "PROPN", ",", "PRON", ")", "and", "word", ".", "dep", "in", "np_deps", ":", "rbracket", "=", "word", ".", "i", "+", "1", "# try to extend the span to the right", "# to capture close apposition/measurement constructions", "for", "rdep", "in", "doc", "[", "word", ".", "i", "]", ".", "rights", ":", "if", "rdep", ".", "pos", "in", "(", "NOUN", ",", "PROPN", ")", "and", "rdep", ".", "dep", "==", "close_app", ":", "rbracket", "=", "rdep", ".", "i", "+", "1", "yield", "word", ".", "left_edge", ".", "i", ",", "rbracket", ",", "np_label" ]
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
[ "Detect", "base", "noun", "phrases", "from", "a", "dependency", "parse", ".", "Works", "on", "both", "Doc", "and", "Span", "." ]
python
train
34.875
chaoss/grimoirelab-elk
grimoire_elk/enriched/study_ceres_onion.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_onion.py#L133-L180
def write(self, items): """Write items into ElasticSearch. :param items: Pandas DataFrame """ if self._read_only: raise IOError("Cannot write, Connector created as Read Only") if len(items) == 0: logger.info(self.__log_prefix + " Nothing to write") return # Uploading info to the new ES rows = items.to_dict("index") docs = [] for row_index in rows.keys(): row = rows[row_index] item_id = row[self.AUTHOR_ORG] + '_' + row[self.PROJECT] + '_' \ + row[self.TIMEFRAME] + '_' + row[self.AUTHOR_UUID] item_id = item_id.replace(' ', '').lower() doc = { "_index": self._es_index, "_type": "item", "_id": item_id, "_source": row } docs.append(doc) # TODO uncomment following lines for incremental version # # Delete old data if exists to ensure refreshing in case of deleted commits # timeframe = docs[0]['_source']['timeframe'] # org = docs[0]['_source']['author_org_name'] # project = docs[0]['_source']['project'] # s = Search(using=self._es_conn, index=self._es_index) # s = s.filter('term', project=project) # s = s.filter('term', author_org_name=org) # s = s.filter('term', timeframe=timeframe) # response = s.execute() # # if response.hits.total > 0: # response = s.delete() # logger.info("[Onion] Deleted " + str(response.deleted) + " items for refreshing: " + timeframe + " " # + org + " " + project) # TODO exception and error handling helpers.bulk(self._es_conn, docs) logger.info(self.__log_prefix + " Written: " + str(len(docs)))
[ "def", "write", "(", "self", ",", "items", ")", ":", "if", "self", ".", "_read_only", ":", "raise", "IOError", "(", "\"Cannot write, Connector created as Read Only\"", ")", "if", "len", "(", "items", ")", "==", "0", ":", "logger", ".", "info", "(", "self", ".", "__log_prefix", "+", "\" Nothing to write\"", ")", "return", "# Uploading info to the new ES", "rows", "=", "items", ".", "to_dict", "(", "\"index\"", ")", "docs", "=", "[", "]", "for", "row_index", "in", "rows", ".", "keys", "(", ")", ":", "row", "=", "rows", "[", "row_index", "]", "item_id", "=", "row", "[", "self", ".", "AUTHOR_ORG", "]", "+", "'_'", "+", "row", "[", "self", ".", "PROJECT", "]", "+", "'_'", "+", "row", "[", "self", ".", "TIMEFRAME", "]", "+", "'_'", "+", "row", "[", "self", ".", "AUTHOR_UUID", "]", "item_id", "=", "item_id", ".", "replace", "(", "' '", ",", "''", ")", ".", "lower", "(", ")", "doc", "=", "{", "\"_index\"", ":", "self", ".", "_es_index", ",", "\"_type\"", ":", "\"item\"", ",", "\"_id\"", ":", "item_id", ",", "\"_source\"", ":", "row", "}", "docs", ".", "append", "(", "doc", ")", "# TODO uncomment following lines for incremental version", "# # Delete old data if exists to ensure refreshing in case of deleted commits", "# timeframe = docs[0]['_source']['timeframe']", "# org = docs[0]['_source']['author_org_name']", "# project = docs[0]['_source']['project']", "# s = Search(using=self._es_conn, index=self._es_index)", "# s = s.filter('term', project=project)", "# s = s.filter('term', author_org_name=org)", "# s = s.filter('term', timeframe=timeframe)", "# response = s.execute()", "#", "# if response.hits.total > 0:", "# response = s.delete()", "# logger.info(\"[Onion] Deleted \" + str(response.deleted) + \" items for refreshing: \" + timeframe + \" \"", "# + org + \" \" + project)", "# TODO exception and error handling", "helpers", ".", "bulk", "(", "self", ".", "_es_conn", ",", "docs", ")", "logger", ".", "info", "(", "self", ".", "__log_prefix", "+", "\" Written: \"", "+", "str", "(", "len", "(", "docs", ")", ")", ")" ]
Write items into ElasticSearch. :param items: Pandas DataFrame
[ "Write", "items", "into", "ElasticSearch", "." ]
python
train
37.9375
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/filtered_network.py
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/filtered_network.py#L27-L41
def get_upregulated_genes_network(self) -> Graph: """Get the graph of up-regulated genes. :return Graph: Graph of up-regulated genes. """ logger.info("In get_upregulated_genes_network()") deg_graph = self.graph.copy() # deep copy graph not_diff_expr = self.graph.vs(up_regulated_eq=False) # delete genes which are not differentially expressed or have no connections to others deg_graph.delete_vertices(not_diff_expr.indices) deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0)) return deg_graph
[ "def", "get_upregulated_genes_network", "(", "self", ")", "->", "Graph", ":", "logger", ".", "info", "(", "\"In get_upregulated_genes_network()\"", ")", "deg_graph", "=", "self", ".", "graph", ".", "copy", "(", ")", "# deep copy graph", "not_diff_expr", "=", "self", ".", "graph", ".", "vs", "(", "up_regulated_eq", "=", "False", ")", "# delete genes which are not differentially expressed or have no connections to others", "deg_graph", ".", "delete_vertices", "(", "not_diff_expr", ".", "indices", ")", "deg_graph", ".", "delete_vertices", "(", "deg_graph", ".", "vs", ".", "select", "(", "_degree_eq", "=", "0", ")", ")", "return", "deg_graph" ]
Get the graph of up-regulated genes. :return Graph: Graph of up-regulated genes.
[ "Get", "the", "graph", "of", "up", "-", "regulated", "genes", "." ]
python
train
38.2
spulec/moto
moto/batch/models.py
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/batch/models.py#L669-L716
def _validate_compute_resources(self, cr): """ Checks contents of sub dictionary for managed clusters :param cr: computeResources :type cr: dict """ for param in ('instanceRole', 'maxvCpus', 'minvCpus', 'instanceTypes', 'securityGroupIds', 'subnets', 'type'): if param not in cr: raise InvalidParameterValueException('computeResources must contain {0}'.format(param)) if self.iam_backend.get_role_by_arn(cr['instanceRole']) is None: raise InvalidParameterValueException('could not find instanceRole {0}'.format(cr['instanceRole'])) if cr['maxvCpus'] < 0: raise InvalidParameterValueException('maxVCpus must be positive') if cr['minvCpus'] < 0: raise InvalidParameterValueException('minVCpus must be positive') if cr['maxvCpus'] < cr['minvCpus']: raise InvalidParameterValueException('maxVCpus must be greater than minvCpus') if len(cr['instanceTypes']) == 0: raise InvalidParameterValueException('At least 1 instance type must be provided') for instance_type in cr['instanceTypes']: if instance_type == 'optimal': pass # Optimal should pick from latest of current gen elif instance_type not in EC2_INSTANCE_TYPES: raise InvalidParameterValueException('Instance type {0} does not exist'.format(instance_type)) for sec_id in cr['securityGroupIds']: if self.ec2_backend.get_security_group_from_id(sec_id) is None: raise InvalidParameterValueException('security group {0} does not exist'.format(sec_id)) if len(cr['securityGroupIds']) == 0: raise InvalidParameterValueException('At least 1 security group must be provided') for subnet_id in cr['subnets']: try: self.ec2_backend.get_subnet(subnet_id) except InvalidSubnetIdError: raise InvalidParameterValueException('subnet {0} does not exist'.format(subnet_id)) if len(cr['subnets']) == 0: raise InvalidParameterValueException('At least 1 subnet must be provided') if cr['type'] not in ('EC2', 'SPOT'): raise InvalidParameterValueException('computeResources.type must be either EC2 | SPOT') if cr['type'] == 'SPOT': raise InternalFailure('SPOT NOT SUPPORTED YET')
[ "def", "_validate_compute_resources", "(", "self", ",", "cr", ")", ":", "for", "param", "in", "(", "'instanceRole'", ",", "'maxvCpus'", ",", "'minvCpus'", ",", "'instanceTypes'", ",", "'securityGroupIds'", ",", "'subnets'", ",", "'type'", ")", ":", "if", "param", "not", "in", "cr", ":", "raise", "InvalidParameterValueException", "(", "'computeResources must contain {0}'", ".", "format", "(", "param", ")", ")", "if", "self", ".", "iam_backend", ".", "get_role_by_arn", "(", "cr", "[", "'instanceRole'", "]", ")", "is", "None", ":", "raise", "InvalidParameterValueException", "(", "'could not find instanceRole {0}'", ".", "format", "(", "cr", "[", "'instanceRole'", "]", ")", ")", "if", "cr", "[", "'maxvCpus'", "]", "<", "0", ":", "raise", "InvalidParameterValueException", "(", "'maxVCpus must be positive'", ")", "if", "cr", "[", "'minvCpus'", "]", "<", "0", ":", "raise", "InvalidParameterValueException", "(", "'minVCpus must be positive'", ")", "if", "cr", "[", "'maxvCpus'", "]", "<", "cr", "[", "'minvCpus'", "]", ":", "raise", "InvalidParameterValueException", "(", "'maxVCpus must be greater than minvCpus'", ")", "if", "len", "(", "cr", "[", "'instanceTypes'", "]", ")", "==", "0", ":", "raise", "InvalidParameterValueException", "(", "'At least 1 instance type must be provided'", ")", "for", "instance_type", "in", "cr", "[", "'instanceTypes'", "]", ":", "if", "instance_type", "==", "'optimal'", ":", "pass", "# Optimal should pick from latest of current gen", "elif", "instance_type", "not", "in", "EC2_INSTANCE_TYPES", ":", "raise", "InvalidParameterValueException", "(", "'Instance type {0} does not exist'", ".", "format", "(", "instance_type", ")", ")", "for", "sec_id", "in", "cr", "[", "'securityGroupIds'", "]", ":", "if", "self", ".", "ec2_backend", ".", "get_security_group_from_id", "(", "sec_id", ")", "is", "None", ":", "raise", "InvalidParameterValueException", "(", "'security group {0} does not exist'", ".", "format", "(", "sec_id", ")", ")", "if", "len", "(", "cr", "[", "'securityGroupIds'", "]", ")", "==", "0", ":", "raise", "InvalidParameterValueException", "(", "'At least 1 security group must be provided'", ")", "for", "subnet_id", "in", "cr", "[", "'subnets'", "]", ":", "try", ":", "self", ".", "ec2_backend", ".", "get_subnet", "(", "subnet_id", ")", "except", "InvalidSubnetIdError", ":", "raise", "InvalidParameterValueException", "(", "'subnet {0} does not exist'", ".", "format", "(", "subnet_id", ")", ")", "if", "len", "(", "cr", "[", "'subnets'", "]", ")", "==", "0", ":", "raise", "InvalidParameterValueException", "(", "'At least 1 subnet must be provided'", ")", "if", "cr", "[", "'type'", "]", "not", "in", "(", "'EC2'", ",", "'SPOT'", ")", ":", "raise", "InvalidParameterValueException", "(", "'computeResources.type must be either EC2 | SPOT'", ")", "if", "cr", "[", "'type'", "]", "==", "'SPOT'", ":", "raise", "InternalFailure", "(", "'SPOT NOT SUPPORTED YET'", ")" ]
Checks contents of sub dictionary for managed clusters :param cr: computeResources :type cr: dict
[ "Checks", "contents", "of", "sub", "dictionary", "for", "managed", "clusters" ]
python
train
49.833333
skelsec/minidump
minidump/minidumpreader.py
https://github.com/skelsec/minidump/blob/0c4dcabe6f11d7a403440919ffa9e3c9889c5212/minidump/minidumpreader.py#L172-L184
def find_all(self, pattern): """ Searches for all occurrences of a pattern in the current memory segment, returns all occurrences as a list """ pos = [] last_found = -1 while True: last_found = self.current_segment.data.find(pattern, last_found + 1) if last_found == -1: break pos.append(last_found + self.current_segment.start_address) return pos
[ "def", "find_all", "(", "self", ",", "pattern", ")", ":", "pos", "=", "[", "]", "last_found", "=", "-", "1", "while", "True", ":", "last_found", "=", "self", ".", "current_segment", ".", "data", ".", "find", "(", "pattern", ",", "last_found", "+", "1", ")", "if", "last_found", "==", "-", "1", ":", "break", "pos", ".", "append", "(", "last_found", "+", "self", ".", "current_segment", ".", "start_address", ")", "return", "pos" ]
Searches for all occurrences of a pattern in the current memory segment, returns all occurrences as a list
[ "Searches", "for", "all", "occurrences", "of", "a", "pattern", "in", "the", "current", "memory", "segment", "returns", "all", "occurrences", "as", "a", "list" ]
python
train
28.153846
ibis-project/ibis
ibis/clickhouse/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/clickhouse/client.py#L266-L294
def list_tables(self, like=None, database=None): """ List tables in the current (or indicated) database. Like the SHOW TABLES command in the clickhouse-shell. Parameters ---------- like : string, default None e.g. 'foo*' to match all tables starting with 'foo' database : string, default None If not passed, uses the current/default database Returns ------- tables : list of strings """ statement = 'SHOW TABLES' if database: statement += " FROM `{0}`".format(database) if like: m = fully_qualified_re.match(like) if m: database, quoted, unquoted = m.groups() like = quoted or unquoted return self.list_tables(like=like, database=database) statement += " LIKE '{0}'".format(like) data, _, _ = self.raw_sql(statement, results=True) return data[0]
[ "def", "list_tables", "(", "self", ",", "like", "=", "None", ",", "database", "=", "None", ")", ":", "statement", "=", "'SHOW TABLES'", "if", "database", ":", "statement", "+=", "\" FROM `{0}`\"", ".", "format", "(", "database", ")", "if", "like", ":", "m", "=", "fully_qualified_re", ".", "match", "(", "like", ")", "if", "m", ":", "database", ",", "quoted", ",", "unquoted", "=", "m", ".", "groups", "(", ")", "like", "=", "quoted", "or", "unquoted", "return", "self", ".", "list_tables", "(", "like", "=", "like", ",", "database", "=", "database", ")", "statement", "+=", "\" LIKE '{0}'\"", ".", "format", "(", "like", ")", "data", ",", "_", ",", "_", "=", "self", ".", "raw_sql", "(", "statement", ",", "results", "=", "True", ")", "return", "data", "[", "0", "]" ]
List tables in the current (or indicated) database. Like the SHOW TABLES command in the clickhouse-shell. Parameters ---------- like : string, default None e.g. 'foo*' to match all tables starting with 'foo' database : string, default None If not passed, uses the current/default database Returns ------- tables : list of strings
[ "List", "tables", "in", "the", "current", "(", "or", "indicated", ")", "database", ".", "Like", "the", "SHOW", "TABLES", "command", "in", "the", "clickhouse", "-", "shell", "." ]
python
train
33.241379
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/json_format.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/json_format.py#L285-L299
def _ValueMessageToJsonObject(self, message): """Converts Value message according to Proto3 JSON Specification.""" which = message.WhichOneof('kind') # If the Value message is not set treat as null_value when serialize # to JSON. The parse back result will be different from original message. if which is None or which == 'null_value': return None if which == 'list_value': return self._ListValueMessageToJsonObject(message.list_value) if which == 'struct_value': value = message.struct_value else: value = getattr(message, which) oneof_descriptor = message.DESCRIPTOR.fields_by_name[which] return self._FieldToJsonObject(oneof_descriptor, value)
[ "def", "_ValueMessageToJsonObject", "(", "self", ",", "message", ")", ":", "which", "=", "message", ".", "WhichOneof", "(", "'kind'", ")", "# If the Value message is not set treat as null_value when serialize", "# to JSON. The parse back result will be different from original message.", "if", "which", "is", "None", "or", "which", "==", "'null_value'", ":", "return", "None", "if", "which", "==", "'list_value'", ":", "return", "self", ".", "_ListValueMessageToJsonObject", "(", "message", ".", "list_value", ")", "if", "which", "==", "'struct_value'", ":", "value", "=", "message", ".", "struct_value", "else", ":", "value", "=", "getattr", "(", "message", ",", "which", ")", "oneof_descriptor", "=", "message", ".", "DESCRIPTOR", ".", "fields_by_name", "[", "which", "]", "return", "self", ".", "_FieldToJsonObject", "(", "oneof_descriptor", ",", "value", ")" ]
Converts Value message according to Proto3 JSON Specification.
[ "Converts", "Value", "message", "according", "to", "Proto3", "JSON", "Specification", "." ]
python
train
46.4
twoolie/NBT
nbt/region.py
https://github.com/twoolie/NBT/blob/b06dd6cc8117d2788da1d8416e642d58bad45762/nbt/region.py#L754-L759
def _classname(self): """Return the fully qualified class name.""" if self.__class__.__module__ in (None,): return self.__class__.__name__ else: return "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
[ "def", "_classname", "(", "self", ")", ":", "if", "self", ".", "__class__", ".", "__module__", "in", "(", "None", ",", ")", ":", "return", "self", ".", "__class__", ".", "__name__", "else", ":", "return", "\"%s.%s\"", "%", "(", "self", ".", "__class__", ".", "__module__", ",", "self", ".", "__class__", ".", "__name__", ")" ]
Return the fully qualified class name.
[ "Return", "the", "fully", "qualified", "class", "name", "." ]
python
train
42.833333
oauthlib/oauthlib
oauthlib/oauth1/rfc5849/__init__.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/__init__.py#L188-L223
def _render(self, request, formencode=False, realm=None): """Render a signed request according to signature type Returns a 3-tuple containing the request URI, headers, and body. If the formencode argument is True and the body contains parameters, it is escaped and returned as a valid formencoded string. """ # TODO what if there are body params on a header-type auth? # TODO what if there are query params on a body-type auth? uri, headers, body = request.uri, request.headers, request.body # TODO: right now these prepare_* methods are very narrow in scope--they # only affect their little thing. In some cases (for example, with # header auth) it might be advantageous to allow these methods to touch # other parts of the request, like the headers—so the prepare_headers # method could also set the Content-Type header to x-www-form-urlencoded # like the spec requires. This would be a fundamental change though, and # I'm not sure how I feel about it. if self.signature_type == SIGNATURE_TYPE_AUTH_HEADER: headers = parameters.prepare_headers( request.oauth_params, request.headers, realm=realm) elif self.signature_type == SIGNATURE_TYPE_BODY and request.decoded_body is not None: body = parameters.prepare_form_encoded_body( request.oauth_params, request.decoded_body) if formencode: body = urlencode(body) headers['Content-Type'] = 'application/x-www-form-urlencoded' elif self.signature_type == SIGNATURE_TYPE_QUERY: uri = parameters.prepare_request_uri_query( request.oauth_params, request.uri) else: raise ValueError('Unknown signature type specified.') return uri, headers, body
[ "def", "_render", "(", "self", ",", "request", ",", "formencode", "=", "False", ",", "realm", "=", "None", ")", ":", "# TODO what if there are body params on a header-type auth?", "# TODO what if there are query params on a body-type auth?", "uri", ",", "headers", ",", "body", "=", "request", ".", "uri", ",", "request", ".", "headers", ",", "request", ".", "body", "# TODO: right now these prepare_* methods are very narrow in scope--they", "# only affect their little thing. In some cases (for example, with", "# header auth) it might be advantageous to allow these methods to touch", "# other parts of the request, like the headers—so the prepare_headers", "# method could also set the Content-Type header to x-www-form-urlencoded", "# like the spec requires. This would be a fundamental change though, and", "# I'm not sure how I feel about it.", "if", "self", ".", "signature_type", "==", "SIGNATURE_TYPE_AUTH_HEADER", ":", "headers", "=", "parameters", ".", "prepare_headers", "(", "request", ".", "oauth_params", ",", "request", ".", "headers", ",", "realm", "=", "realm", ")", "elif", "self", ".", "signature_type", "==", "SIGNATURE_TYPE_BODY", "and", "request", ".", "decoded_body", "is", "not", "None", ":", "body", "=", "parameters", ".", "prepare_form_encoded_body", "(", "request", ".", "oauth_params", ",", "request", ".", "decoded_body", ")", "if", "formencode", ":", "body", "=", "urlencode", "(", "body", ")", "headers", "[", "'Content-Type'", "]", "=", "'application/x-www-form-urlencoded'", "elif", "self", ".", "signature_type", "==", "SIGNATURE_TYPE_QUERY", ":", "uri", "=", "parameters", ".", "prepare_request_uri_query", "(", "request", ".", "oauth_params", ",", "request", ".", "uri", ")", "else", ":", "raise", "ValueError", "(", "'Unknown signature type specified.'", ")", "return", "uri", ",", "headers", ",", "body" ]
Render a signed request according to signature type Returns a 3-tuple containing the request URI, headers, and body. If the formencode argument is True and the body contains parameters, it is escaped and returned as a valid formencoded string.
[ "Render", "a", "signed", "request", "according", "to", "signature", "type" ]
python
train
51.527778
gwpy/gwpy
gwpy/table/io/fetch.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/fetch.py#L67-L95
def get_fetcher(data_format, data_class): """Return the :meth:`~EventTable.fetch` function for the given format Parameters ---------- data_format : `str` name of the format data_class : `type` the class that the fetcher returns Raises ------ astropy.io.registry.IORegistryError if not registration is found matching ``data_format`` """ # this is a copy of astropy.io.regsitry.get_reader fetchers = [(fmt, cls) for fmt, cls in _FETCHERS if fmt == data_format] for fetch_fmt, fetch_cls in fetchers: if io_registry._is_best_match(data_class, fetch_cls, fetchers): return _FETCHERS[(fetch_fmt, fetch_cls)][0] else: formats = [fmt for fmt, cls in _FETCHERS if io_registry._is_best_match(fmt, cls, fetchers)] formatstr = '\n'.join(sorted(formats)) raise IORegistryError( "No fetcher definer for format '{0}' and class '{1}'.\n" "The available formats are:\n{2}".format( data_format, data_class.__name__, formatstr))
[ "def", "get_fetcher", "(", "data_format", ",", "data_class", ")", ":", "# this is a copy of astropy.io.regsitry.get_reader", "fetchers", "=", "[", "(", "fmt", ",", "cls", ")", "for", "fmt", ",", "cls", "in", "_FETCHERS", "if", "fmt", "==", "data_format", "]", "for", "fetch_fmt", ",", "fetch_cls", "in", "fetchers", ":", "if", "io_registry", ".", "_is_best_match", "(", "data_class", ",", "fetch_cls", ",", "fetchers", ")", ":", "return", "_FETCHERS", "[", "(", "fetch_fmt", ",", "fetch_cls", ")", "]", "[", "0", "]", "else", ":", "formats", "=", "[", "fmt", "for", "fmt", ",", "cls", "in", "_FETCHERS", "if", "io_registry", ".", "_is_best_match", "(", "fmt", ",", "cls", ",", "fetchers", ")", "]", "formatstr", "=", "'\\n'", ".", "join", "(", "sorted", "(", "formats", ")", ")", "raise", "IORegistryError", "(", "\"No fetcher definer for format '{0}' and class '{1}'.\\n\"", "\"The available formats are:\\n{2}\"", ".", "format", "(", "data_format", ",", "data_class", ".", "__name__", ",", "formatstr", ")", ")" ]
Return the :meth:`~EventTable.fetch` function for the given format Parameters ---------- data_format : `str` name of the format data_class : `type` the class that the fetcher returns Raises ------ astropy.io.registry.IORegistryError if not registration is found matching ``data_format``
[ "Return", "the", ":", "meth", ":", "~EventTable", ".", "fetch", "function", "for", "the", "given", "format" ]
python
train
36.655172
mfcloud/python-zvm-sdk
zvmsdk/smtclient.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/smtclient.py#L2125-L2132
def get_guest_connection_status(self, userid): '''Get guest vm connection status.''' rd = ' '.join(('getvm', userid, 'isreachable')) results = self._request(rd) if results['rs'] == 1: return True else: return False
[ "def", "get_guest_connection_status", "(", "self", ",", "userid", ")", ":", "rd", "=", "' '", ".", "join", "(", "(", "'getvm'", ",", "userid", ",", "'isreachable'", ")", ")", "results", "=", "self", ".", "_request", "(", "rd", ")", "if", "results", "[", "'rs'", "]", "==", "1", ":", "return", "True", "else", ":", "return", "False" ]
Get guest vm connection status.
[ "Get", "guest", "vm", "connection", "status", "." ]
python
train
33.875
pytroll/posttroll
posttroll/listener.py
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/listener.py#L105-L115
def create_subscriber(self): '''Create a subscriber instance using specified addresses and message types. ''' if self.subscriber is None: if self.topics: self.subscriber = NSSubscriber(self.services, self.topics, addr_listener=True, addresses=self.addresses, nameserver=self.nameserver) self.recv = self.subscriber.start().recv
[ "def", "create_subscriber", "(", "self", ")", ":", "if", "self", ".", "subscriber", "is", "None", ":", "if", "self", ".", "topics", ":", "self", ".", "subscriber", "=", "NSSubscriber", "(", "self", ".", "services", ",", "self", ".", "topics", ",", "addr_listener", "=", "True", ",", "addresses", "=", "self", ".", "addresses", ",", "nameserver", "=", "self", ".", "nameserver", ")", "self", ".", "recv", "=", "self", ".", "subscriber", ".", "start", "(", ")", ".", "recv" ]
Create a subscriber instance using specified addresses and message types.
[ "Create", "a", "subscriber", "instance", "using", "specified", "addresses", "and", "message", "types", "." ]
python
train
48.545455
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2358-L2404
def count_alleles(self, max_allele=None, subpop=None): """Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1 """ # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) if subpop is None: ac = haplotype_array_count_alleles(values, max_allele) else: ac = haplotype_array_count_alleles_subpop(values, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
[ "def", "count_alleles", "(", "self", ",", "max_allele", "=", "None", ",", "subpop", "=", "None", ")", ":", "# check inputs", "subpop", "=", "_normalize_subpop_arg", "(", "subpop", ",", "self", ".", "shape", "[", "1", "]", ")", "# determine alleles to count", "if", "max_allele", "is", "None", ":", "max_allele", "=", "self", ".", "max", "(", ")", "# use optimisations", "values", "=", "memoryview_safe", "(", "self", ".", "values", ")", "if", "subpop", "is", "None", ":", "ac", "=", "haplotype_array_count_alleles", "(", "values", ",", "max_allele", ")", "else", ":", "ac", "=", "haplotype_array_count_alleles_subpop", "(", "values", ",", "max_allele", ",", "subpop", ")", "return", "AlleleCountsArray", "(", "ac", ",", "copy", "=", "False", ")" ]
Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1
[ "Count", "the", "number", "of", "calls", "of", "each", "allele", "per", "variant", "." ]
python
train
28.531915
ontio/ontology-python-sdk
ontology/io/binary_reader.py
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_reader.py#L375-L388
def read_hashes(self): """ Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type. """ var_len = self.read_var_int() items = [] for _ in range(0, var_len): ba = bytearray(self.read_bytes(32)) ba.reverse() items.append(ba.hex()) return items
[ "def", "read_hashes", "(", "self", ")", ":", "var_len", "=", "self", ".", "read_var_int", "(", ")", "items", "=", "[", "]", "for", "_", "in", "range", "(", "0", ",", "var_len", ")", ":", "ba", "=", "bytearray", "(", "self", ".", "read_bytes", "(", "32", ")", ")", "ba", ".", "reverse", "(", ")", "items", ".", "append", "(", "ba", ".", "hex", "(", ")", ")", "return", "items" ]
Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type.
[ "Read", "Hash", "values", "from", "the", "stream", "." ]
python
train
28.071429
pyopenapi/pyswagger
pyswagger/scanner/v1_2/validate.py
https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/scanner/v1_2/validate.py#L115-L122
def _validate_granttype(self, path, obj, _): """ make sure either implicit or authorization_code is defined """ errs = [] if not obj.implicit and not obj.authorization_code: errs.append('Either implicit or authorization_code should be defined.') return path, obj.__class__.__name__, errs
[ "def", "_validate_granttype", "(", "self", ",", "path", ",", "obj", ",", "_", ")", ":", "errs", "=", "[", "]", "if", "not", "obj", ".", "implicit", "and", "not", "obj", ".", "authorization_code", ":", "errs", ".", "append", "(", "'Either implicit or authorization_code should be defined.'", ")", "return", "path", ",", "obj", ".", "__class__", ".", "__name__", ",", "errs" ]
make sure either implicit or authorization_code is defined
[ "make", "sure", "either", "implicit", "or", "authorization_code", "is", "defined" ]
python
train
40.75