_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q280400
QtKillRing.rotate
test
def rotate(self): """ Rotate the kill ring, then yank back the new top. """ if self._prev_yank: text = self._ring.rotate() if text: self._skip_cursor = True cursor = self._text_edit.textCursor() cursor.movePosition(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor, n = len(self._prev_yank)) cursor.insertText(text) self._prev_yank = text
python
{ "resource": "" }
q280401
patch_pyzmq
test
def patch_pyzmq(): """backport a few patches from newer pyzmq These can be removed as we bump our minimum pyzmq version """ import zmq # ioloop.install, introduced in pyzmq 2.1.7 from zmq.eventloop import ioloop def install(): import tornado.ioloop tornado.ioloop.IOLoop = ioloop.IOLoop if not hasattr(ioloop, 'install'): ioloop.install = install # fix missing DEALER/ROUTER aliases in pyzmq < 2.1.9 if not hasattr(zmq, 'DEALER'): zmq.DEALER = zmq.XREQ if not hasattr(zmq, 'ROUTER'): zmq.ROUTER = zmq.XREP # fallback on stdlib json if jsonlib is selected, because jsonlib breaks things. # jsonlib support is removed from pyzmq >= 2.2.0 from zmq.utils import jsonapi if jsonapi.jsonmod.__name__ == 'jsonlib': import json jsonapi.jsonmod = json
python
{ "resource": "" }
q280402
parser_from_schema
test
def parser_from_schema(schema_url, require_version=True): """ Returns an XSD-schema-enabled lxml parser from a WSDL or XSD `schema_url` can of course be local path via file:// url """ schema_tree = etree.parse(schema_url) def get_version(element, getter): try: return getter(element) except VersionNotFound: if require_version: raise else: return None root = schema_tree.getroot() if root.tag == '{%s}definitions' % namespaces.WSDL: # wsdl should contain an embedded schema schema_el = schema_tree.find('wsdl:types/xs:schema', namespaces=NS_MAP) version = get_version(root, version_from_wsdl) else: schema_el = root version = get_version(schema_el, version_from_schema) schema = etree.XMLSchema(schema_el) return objectify.makeparser(schema=schema), version
python
{ "resource": "" }
q280403
AuthenticatedHandler.ws_url
test
def ws_url(self): """websocket url matching the current request turns http[s]://host[:port] into ws[s]://host[:port] """ proto = self.request.protocol.replace('http', 'ws') host = self.application.ipython_app.websocket_host # default to config value if host == '': host = self.request.host # get from request return "%s://%s" % (proto, host)
python
{ "resource": "" }
q280404
ZMQStreamHandler._reserialize_reply
test
def _reserialize_reply(self, msg_list): """Reserialize a reply message using JSON. This takes the msg list from the ZMQ socket, unserializes it using self.session and then serializes the result using JSON. This method should be used by self._on_zmq_reply to build messages that can be sent back to the browser. """ idents, msg_list = self.session.feed_identities(msg_list) msg = self.session.unserialize(msg_list) try: msg['header'].pop('date') except KeyError: pass try: msg['parent_header'].pop('date') except KeyError: pass msg.pop('buffers') return jsonapi.dumps(msg, default=date_default)
python
{ "resource": "" }
q280405
AuthenticatedZMQStreamHandler._inject_cookie_message
test
def _inject_cookie_message(self, msg): """Inject the first message, which is the document cookie, for authentication.""" if isinstance(msg, unicode): # Cookie can't constructor doesn't accept unicode strings for some reason msg = msg.encode('utf8', 'replace') try: self.request._cookies = Cookie.SimpleCookie(msg) except: logging.warn("couldn't parse cookie string: %s",msg, exc_info=True)
python
{ "resource": "" }
q280406
IOPubHandler.start_hb
test
def start_hb(self, callback): """Start the heartbeating and call the callback if the kernel dies.""" if not self._beating: self._kernel_alive = True def ping_or_dead(): self.hb_stream.flush() if self._kernel_alive: self._kernel_alive = False self.hb_stream.send(b'ping') # flush stream to force immediate socket send self.hb_stream.flush() else: try: callback() except: pass finally: self.stop_hb() def beat_received(msg): self._kernel_alive = True self.hb_stream.on_recv(beat_received) loop = ioloop.IOLoop.instance() self._hb_periodic_callback = ioloop.PeriodicCallback(ping_or_dead, self.time_to_dead*1000, loop) loop.add_timeout(time.time()+self.first_beat, self._really_start_hb) self._beating= True
python
{ "resource": "" }
q280407
IOPubHandler._really_start_hb
test
def _really_start_hb(self): """callback for delayed heartbeat start Only start the hb loop if we haven't been closed during the wait. """ if self._beating and not self.hb_stream.closed(): self._hb_periodic_callback.start()
python
{ "resource": "" }
q280408
IOPubHandler.stop_hb
test
def stop_hb(self): """Stop the heartbeating and cancel all related callbacks.""" if self._beating: self._beating = False self._hb_periodic_callback.stop() if not self.hb_stream.closed(): self.hb_stream.on_recv(None)
python
{ "resource": "" }
q280409
Demo.fload
test
def fload(self): """Load file object.""" # read data and parse into blocks if hasattr(self, 'fobj') and self.fobj is not None: self.fobj.close() if hasattr(self.src, "read"): # It seems to be a file or a file-like object self.fobj = self.src else: # Assume it's a string or something that can be converted to one self.fobj = open(self.fname)
python
{ "resource": "" }
q280410
Demo._get_index
test
def _get_index(self,index): """Get the current block index, validating and checking status. Returns None if the demo is finished""" if index is None: if self.finished: print >>io.stdout, 'Demo finished. Use <demo_name>.reset() if you want to rerun it.' return None index = self.block_index else: self._validate_index(index) return index
python
{ "resource": "" }
q280411
Demo.seek
test
def seek(self,index): """Move the current seek pointer to the given block. You can use negative indices to seek from the end, with identical semantics to those of Python lists.""" if index<0: index = self.nblocks + index self._validate_index(index) self.block_index = index self.finished = False
python
{ "resource": "" }
q280412
Demo.edit
test
def edit(self,index=None): """Edit a block. If no number is given, use the last block executed. This edits the in-memory copy of the demo, it does NOT modify the original source file. If you want to do that, simply open the file in an editor and use reload() when you make changes to the file. This method is meant to let you change a block during a demonstration for explanatory purposes, without damaging your original script.""" index = self._get_index(index) if index is None: return # decrease the index by one (unless we're at the very beginning), so # that the default demo.edit() call opens up the sblock we've last run if index>0: index -= 1 filename = self.shell.mktempfile(self.src_blocks[index]) self.shell.hooks.editor(filename,1) new_block = file_read(filename) # update the source and colored block self.src_blocks[index] = new_block self.src_blocks_colored[index] = self.ip_colorize(new_block) self.block_index = index # call to run with the newly edited index self()
python
{ "resource": "" }
q280413
Demo.show
test
def show(self,index=None): """Show a single block on screen""" index = self._get_index(index) if index is None: return print >>io.stdout, self.marquee('<%s> block # %s (%s remaining)' % (self.title,index,self.nblocks-index-1)) print >>io.stdout,(self.src_blocks_colored[index]) sys.stdout.flush()
python
{ "resource": "" }
q280414
Demo.show_all
test
def show_all(self): """Show entire demo on screen, block by block""" fname = self.title title = self.title nblocks = self.nblocks silent = self._silent marquee = self.marquee for index,block in enumerate(self.src_blocks_colored): if silent[index]: print >>io.stdout, marquee('<%s> SILENT block # %s (%s remaining)' % (title,index,nblocks-index-1)) else: print >>io.stdout, marquee('<%s> block # %s (%s remaining)' % (title,index,nblocks-index-1)) print >>io.stdout, block, sys.stdout.flush()
python
{ "resource": "" }
q280415
series
test
def series(collection, method, prints = 15, *args, **kwargs): ''' Processes a collection in series Parameters ---------- collection : list list of Record objects method : method to call on each Record prints : int number of timer prints to the screen Returns ------- collection : list list of Record objects after going through method called If more than one collection is given, the function is called with an argument list consisting of the corresponding item of each collection, substituting None for missing values when not all collection have the same length. If the function is None, return the original collection (or a list of tuples if multiple collections). Example ------- adding 2 to every number in a range >>> import turntable >>> collection = range(100) >>> method = lambda x: x + 2 >>> collection = turntable.spin.series(collection, method) ''' if 'verbose' in kwargs.keys(): verbose = kwargs['verbose'] else: verbose = True results = [] timer = turntable.utils.Timer(nLoops=len(collection), numPrints=prints, verbose=verbose) for subject in collection: results.append(method(subject, *args, **kwargs)) timer.loop() timer.fin() return results
python
{ "resource": "" }
q280416
batch
test
def batch(collection, method, processes=None, batch_size=None, quiet=False, kwargs_to_dump=None, args=None, **kwargs): '''Processes a collection in parallel batches, each batch processes in series on a single process. Running batches in parallel can be more effficient that splitting a list across cores as in spin.parallel because of parallel processing has high IO requirements. Parameters ---------- collection : list i.e. list of Record objects method : method to call on each Record processes : int number of processes to run on [defaults to number of cores on machine] batch_size : int lenght of each batch [defaults to number of elements / number of processes] Returns ------- collection : list list of Record objects after going through method called Example ------- adding 2 to every number in a range >>> import turntable >>> collection = range(100) >>> def jam(record): >>> return record + 2 >>> collection = turntable.spin.batch(collection, jam) Note ---- lambda functions do not work in parallel ''' if processes is None: # default to the number of processes, not exceeding 20 or the number of # subjects processes = min(mp.cpu_count(), 20, len(collection)) if batch_size is None: # floor divide rounds down to nearest int batch_size = max(len(collection) // processes, 1) print 'size of each batch =', batch_size mod = len(collection) % processes # batch_list is a list of cars broken in to batch size chunks batch_list = [collection[x:x + batch_size] for x in xrange(0, len(collection) - mod, batch_size)] # remainder handling if mod != 0: batch_list[len(batch_list) - 1] += collection[-mod:] print 'number of batches =', len(batch_list) # New args if args is None: args = method else: if isinstance(args, tuple) == False: args = (args,) args = (method,) + args # Applying the mp method w/ or w/o dumping using the custom operator # method if kwargs_to_dump is None: res = parallel( batch_list, new_function_batch, processes=processes, args=args, **kwargs) else: res = process_dump( batch_list, new_function_batch, kwargs_to_dump, processes=processes, args=args, **kwargs) returnList = [] for l in res: returnList += l # toc = time.time() # elapsed = toc-tic # if quiet is False: # if processes is None: # print "Total Elapsed time: %s :-)" % str(elapsed) # else: # print "Total Elapsed time: %s on %s processes :-)" % # (str(elapsed),str(processes)) return returnList
python
{ "resource": "" }
q280417
thread
test
def thread(function, sequence, cores=None, runSeries=False, quiet=False): '''sets up the threadpool with map for parallel processing''' # Make the Pool of workes if cores is None: pool = ThreadPool() else: pool = ThreadPool(cores) # Operate on the list of subjects with the requested function # in the split threads tic = time.time() if runSeries is False: try: results = pool.map(function, sequence) # close the pool and wiat for teh work to finish pool.close() pool.join() except: print 'thread Failed... running in series :-(' results = series(sequence, function) else: results = series(sequence, function) toc = time.time() elapsed = toc - tic if quiet is False: if cores is None: print "Elapsed time: %s :-)\n" % str(elapsed) else: print "Elapsed time: %s on %s threads :-)\n" % (str(elapsed), str(cores)) # Noes: # import functools # abc = map(functools.partial(sb.dist, distName = 'weibull'), wbldfList) return results
python
{ "resource": "" }
q280418
parallel
test
def parallel(collection, method, processes=None, args=None, **kwargs): '''Processes a collection in parallel. Parameters ---------- collection : list i.e. list of Record objects method : method to call on each Record processes : int number of processes to run on [defaults to number of cores on machine] batch_size : int lenght of each batch [defaults to number of elements / number of processes] Returns ------- collection : list list of Record objects after going through method called Example ------- adding 2 to every number in a range >>> import turntable >>> collection = range(100) >>> def jam(record): >>> return record + 2 >>> collection = turntable.spin.parallel(collection, jam) Note ---- lambda functions do not work in parallel ''' if processes is None: # default to the number of cores, not exceeding 20 processes = min(mp.cpu_count(), 20) print "Running parallel process on " + str(processes) + " cores. :-)" pool = mp.Pool(processes=processes) PROC = [] tic = time.time() for main_arg in collection: if args is None: ARGS = (main_arg,) else: if isinstance(args, tuple) == False: args = (args,) ARGS = (main_arg,) + args PROC.append(pool.apply_async(method, args=ARGS, kwds=kwargs)) #RES = [p.get() for p in PROC] RES = [] for p in PROC: try: RES.append(p.get()) except Exception as e: print "shit happens..." print e RES.append(None) pool.close() pool.join() toc = time.time() elapsed = toc - tic print "Elapsed time: %s on %s processes :-)\n" % (str(elapsed), str(processes)) return RES
python
{ "resource": "" }
q280419
with_it
test
def with_it(obj): ''' wrap `with obj` out of func. example: ``` py @with_it(Lock()) def func(): pass ``` ''' def _wrap(func): @functools.wraps(func) def wrapper(*args, **kwargs): with obj: return func(*args, **kwargs) return wrapper return _wrap
python
{ "resource": "" }
q280420
with_objattrs
test
def with_objattrs(*names): ''' like `with_objattr` but enter context one by one. ''' def _wrap(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): with contextlib.ExitStack() as stack: for name in names: stack.enter_context(getattr(self, name)) return func(self, *args, **kwargs) return wrapper return _wrap
python
{ "resource": "" }
q280421
tbsource
test
def tbsource(tb, context=6): """Get source from a traceback object. A tuple of two things is returned: a list of lines of context from the source code, and the index of the current line within that list. The optional second argument specifies the number of lines of context to return, which are centered around the current line. .. Note :: This is adapted from inspect.py in the python 2.4 standard library, since a bug in the 2.3 version of inspect prevents it from correctly locating source lines in a traceback frame. """ lineno = tb.tb_lineno frame = tb.tb_frame if context > 0: start = lineno - 1 - context//2 log.debug("lineno: %s start: %s", lineno, start) try: lines, dummy = inspect.findsource(frame) except IOError: lines, index = [''], 0 else: all_lines = lines start = max(start, 1) start = max(0, min(start, len(lines) - context)) lines = lines[start:start+context] index = lineno - 1 - start # python 2.5 compat: if previous line ends in a continuation, # decrement start by 1 to match 2.4 behavior if sys.version_info >= (2, 5) and index > 0: while lines[index-1].strip().endswith('\\'): start -= 1 lines = all_lines[start:start+context] else: lines, index = [''], 0 log.debug("tbsource lines '''%s''' around index %s", lines, index) return (lines, index)
python
{ "resource": "" }
q280422
find_inspectable_lines
test
def find_inspectable_lines(lines, pos): """Find lines in home that are inspectable. Walk back from the err line up to 3 lines, but don't walk back over changes in indent level. Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk over changes in indent level (unless part of an extended line) """ cnt = re.compile(r'\\[\s\n]*$') df = re.compile(r':[\s\n]*$') ind = re.compile(r'^(\s*)') toinspect = [] home = lines[pos] home_indent = ind.match(home).groups()[0] before = lines[max(pos-3, 0):pos] before.reverse() after = lines[pos+1:min(pos+4, len(lines))] for line in before: if ind.match(line).groups()[0] == home_indent: toinspect.append(line) else: break toinspect.reverse() toinspect.append(home) home_pos = len(toinspect)-1 continued = cnt.search(home) for line in after: if ((continued or ind.match(line).groups()[0] == home_indent) and not df.search(line)): toinspect.append(line) continued = cnt.search(line) else: break log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos) return toinspect, home_pos
python
{ "resource": "" }
q280423
countdown
test
def countdown(name, date, description='', id='', granularity='sec', start=None, progressbar=False, progressbar_inversed=False, showpct=False): ''' Create a countdown. ''' end_date = dateparse.parse_datetime(date) end = dateformat.format(end_date, 'U') content = '<div class="name">' + name + '</div>' content += '<div class="description">' + description + '</div>' if progressbar: if not end: raise Exception('For progressbar, start date is requried.') parsed_date = datetime.datetime.combine( dateparse.parse_date(start), datetime.time()) start_date = dateparse.parse_datetime(start) or parsed_date now = datetime.datetime.now() pct = (now - start_date).total_seconds() /\ (end_date - start_date).total_seconds() pct = int(pct * 100) if progressbar_inversed: pct = 100 - pct # Note: the output is for bootstrap! bar = '<div class="progress progress-striped active">' bar += '<div class="progress-bar" role="progressbar" aria-valuenow="{pct}" aria-valuemin="0" aria-valuemax="100" style="width: {pct}%">' bar += '<span class="sr-only">{pct}% Complete</span>' bar += '</div>' bar += '</div>' if showpct: bar += '<div class="percentage">{pct}%</div>' bar = bar.format(pct=pct) content += bar content += '<div class="counter"></div>' attr = { 'class': 'countdownbox', 'data-datetime': end, 'data-granularity': granularity } if id: attr['id'] = id return html.tag('div', content, attr)
python
{ "resource": "" }
q280424
cleanup
test
def cleanup(controller, engines): """Cleanup routine to shut down all subprocesses we opened.""" import signal, time print('Starting cleanup') print('Stopping engines...') for e in engines: e.send_signal(signal.SIGINT) print('Stopping controller...') # so it can shut down its queues controller.send_signal(signal.SIGINT) time.sleep(0.1) print('Killing controller...') controller.kill() print('Cleanup done')
python
{ "resource": "" }
q280425
ConditionalModifier.pre_call
test
def pre_call(self, ctxt, pre_mod, post_mod, action): """ A modifier hook function. This is called in priority order prior to invoking the ``Action`` for the step. This allows a modifier to alter the context, or to take over subsequent action invocation. :param ctxt: The context object. :param pre_mod: A list of the modifiers preceding this modifier in the list of modifiers that is applicable to the action. This list is in priority order. :param post_mod: A list of the modifiers following this modifier in the list of modifiers that is applicable to the action. This list is in priority order. :param action: The action that will be performed. :returns: A ``None`` return value indicates that the modifier is taking no action. A non-``None`` return value should consist of a ``StepResult`` object; this will suspend further ``pre_call()`` processing and proceed to the ``post_call()`` processing. This implementation returns a ``StepResult`` with state ``SKIPPED`` if the condition does not evaluate to ``True``. """ # Check the condition if not self.condition(ctxt): return steps.StepResult(state=steps.SKIPPED) return None
python
{ "resource": "" }
q280426
IgnoreErrorsModifier.post_call
test
def post_call(self, ctxt, result, action, post_mod, pre_mod): """ A modifier hook function. This is called in reverse-priority order after invoking the ``Action`` for the step. This allows a modifier to inspect or alter the result of the step. :param ctxt: The context object. :param result: The result of the action. This will be a ``StepResult`` object. :param action: The action that was performed. :param post_mod: A list of modifiers following this modifier in the list of modifiers that is applicable to the action. This list is in priority order. :param pre_mod: A list of modifiers preceding this modifier in the list of modifiers that is applicable to the action. This list is in priority order. :returns: The result for the action, optionally modified. If the result is not modified, ``result`` must be returned unchanged. This implementation alters the ``ignore`` property of the ``result`` object to match the configured value. """ # Set the ignore state result.ignore = self.config return result
python
{ "resource": "" }
q280427
save_ids
test
def save_ids(f, self, *args, **kwargs): """Keep our history and outstanding attributes up to date after a method call.""" n_previous = len(self.client.history) try: ret = f(self, *args, **kwargs) finally: nmsgs = len(self.client.history) - n_previous msg_ids = self.client.history[-nmsgs:] self.history.extend(msg_ids) map(self.outstanding.add, msg_ids) return ret
python
{ "resource": "" }
q280428
sync_results
test
def sync_results(f, self, *args, **kwargs): """sync relevant results from self.client to our results attribute.""" ret = f(self, *args, **kwargs) delta = self.outstanding.difference(self.client.outstanding) completed = self.outstanding.intersection(delta) self.outstanding = self.outstanding.difference(completed) return ret
python
{ "resource": "" }
q280429
spin_after
test
def spin_after(f, self, *args, **kwargs): """call spin after the method.""" ret = f(self, *args, **kwargs) self.spin() return ret
python
{ "resource": "" }
q280430
BlockingSubSocketChannel.get_msgs
test
def get_msgs(self): """Get all messages that are currently ready.""" msgs = [] while True: try: msgs.append(self.get_msg(block=False)) except Empty: break return msgs
python
{ "resource": "" }
q280431
BlockingStdInSocketChannel.get_msg
test
def get_msg(self, block=True, timeout=None): "Gets a message if there is one that is ready." return self._in_queue.get(block, timeout)
python
{ "resource": "" }
q280432
prop
test
def prop(func=None, *, field = _UNSET, get: bool = True, set: bool = True, del_: bool = False, default = _UNSET, types: tuple = _UNSET): ''' `prop` is a sugar for `property`. ``` py @prop def value(self): pass # equals: @property def value(self): return self._value @value.setter def value(self, val): self._value = val ``` ''' def wrap(func): if not callable(func): raise TypeError prop_name = func.__name__ key = field if key is _UNSET: key = '_' + prop_name fget, fset, fdel = None, None, None if get: def fget(self): try: return self.__dict__[key] except KeyError: if default is not _UNSET: return default raise AttributeError(f"'{type(self).__name__}' object has no attribute '{key}'") if set: def fset(self, val): if types is not _UNSET and not isinstance(val, types): if isinstance(types, tuple): types_name = tuple(x.__name__ for x in types) else: types_name = types.__name__ raise TypeError(f'type of {type(self).__name__}.{prop_name} must be {types_name}; ' f'got {type(val).__name__} instead') self.__dict__[key] = val if del_: def fdel(self): del self.__dict__[key] return property(fget, fset, fdel, func.__doc__) return wrap(func) if func else wrap
python
{ "resource": "" }
q280433
get_onlys
test
def get_onlys(*fields): ''' `get_onlys` is a sugar for multi-`property`. ``` py name, age = get_onlys('_name', '_age') # equals: @property def name(self): return getattr(self, '_name') @property def age(self): return getattr(self, '_age') ``` ''' return tuple(property(lambda self, f=f: getattr(self, f)) for f in fields)
python
{ "resource": "" }
q280434
parse
test
def parse(url): """Parses a database URL.""" config = {} if not isinstance(url, six.string_types): url = '' url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] # Update with environment configuration. config.update({ 'NAME': path, 'USER': url.username, 'PASSWORD': url.password, 'HOST': url.hostname, 'PORT': url.port, }) if url.scheme in SCHEMES: config['ENGINE'] = SCHEMES[url.scheme] return config
python
{ "resource": "" }
q280435
module_list
test
def module_list(path): """ Return the list containing the names of the modules available in the given folder. """ # sys.path has the cwd as an empty string, but isdir/listdir need it as '.' if path == '': path = '.' if os.path.isdir(path): folder_list = os.listdir(path) elif path.endswith('.egg'): try: folder_list = [f for f in zipimporter(path)._files] except: folder_list = [] else: folder_list = [] if not folder_list: return [] # A few local constants to be used in loops below isfile = os.path.isfile pjoin = os.path.join basename = os.path.basename def is_importable_file(path): """Returns True if the provided path is a valid importable module""" name, extension = os.path.splitext( path ) return import_re.match(path) and py3compat.isidentifier(name) # Now find actual path matches for packages or modules folder_list = [p for p in folder_list if isfile(pjoin(path, p,'__init__.py')) or is_importable_file(p) ] return [basename(p).split('.')[0] for p in folder_list]
python
{ "resource": "" }
q280436
get_root_modules
test
def get_root_modules(): """ Returns a list containing the names of all the modules available in the folders of the pythonpath. """ ip = get_ipython() if 'rootmodules' in ip.db: return ip.db['rootmodules'] t = time() store = False modules = list(sys.builtin_module_names) for path in sys.path: modules += module_list(path) if time() - t >= TIMEOUT_STORAGE and not store: store = True print("\nCaching the list of root modules, please wait!") print("(This will only be done once - type '%rehashx' to " "reset cache!)\n") sys.stdout.flush() if time() - t > TIMEOUT_GIVEUP: print("This is taking too long, we give up.\n") ip.db['rootmodules'] = [] return [] modules = set(modules) if '__init__' in modules: modules.remove('__init__') modules = list(modules) if store: ip.db['rootmodules'] = modules return modules
python
{ "resource": "" }
q280437
quick_completer
test
def quick_completer(cmd, completions): """ Easily create a trivial completer for a command. Takes either a list of completions, or all completions in string (that will be split on whitespace). Example:: [d:\ipython]|1> import ipy_completers [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz']) [d:\ipython]|3> foo b<TAB> bar baz [d:\ipython]|3> foo ba """ if isinstance(completions, basestring): completions = completions.split() def do_complete(self, event): return completions get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
python
{ "resource": "" }
q280438
module_completion
test
def module_completion(line): """ Returns a list containing the completion possibilities for an import line. The line looks like this : 'import xml.d' 'from xml.dom import' """ words = line.split(' ') nwords = len(words) # from whatever <tab> -> 'import ' if nwords == 3 and words[0] == 'from': return ['import '] # 'from xy<tab>' or 'import xy<tab>' if nwords < 3 and (words[0] in ['import','from']) : if nwords == 1: return get_root_modules() mod = words[1].split('.') if len(mod) < 2: return get_root_modules() completion_list = try_import('.'.join(mod[:-1]), True) return ['.'.join(mod[:-1] + [el]) for el in completion_list] # 'from xyz import abc<tab>' if nwords >= 3 and words[0] == 'from': mod = words[1] return try_import(mod)
python
{ "resource": "" }
q280439
magic_run_completer
test
def magic_run_completer(self, event): """Complete files that end in .py or .ipy for the %run command. """ comps = arg_split(event.line, strict=False) relpath = (len(comps) > 1 and comps[-1] or '').strip("'\"") #print("\nev=", event) # dbg #print("rp=", relpath) # dbg #print('comps=', comps) # dbg lglob = glob.glob isdir = os.path.isdir relpath, tilde_expand, tilde_val = expand_user(relpath) dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)] # Find if the user has already typed the first filename, after which we # should complete on all files, since after the first one other files may # be arguments to the input script. if filter(magic_run_re.match, comps): pys = [f.replace('\\','/') for f in lglob('*')] else: pys = [f.replace('\\','/') for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') + lglob(relpath + '*.pyw')] #print('run comp:', dirs+pys) # dbg return [compress_user(p, tilde_expand, tilde_val) for p in dirs+pys]
python
{ "resource": "" }
q280440
cd_completer
test
def cd_completer(self, event): """Completer function for cd, which only returns directories.""" ip = get_ipython() relpath = event.symbol #print(event) # dbg if event.line.endswith('-b') or ' -b ' in event.line: # return only bookmark completions bkms = self.db.get('bookmarks', None) if bkms: return bkms.keys() else: return [] if event.symbol == '-': width_dh = str(len(str(len(ip.user_ns['_dh']) + 1))) # jump in directory history by number fmt = '-%0' + width_dh +'d [%s]' ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])] if len(ents) > 1: return ents return [] if event.symbol.startswith('--'): return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']] # Expand ~ in path and normalize directory separators. relpath, tilde_expand, tilde_val = expand_user(relpath) relpath = relpath.replace('\\','/') found = [] for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*') if os.path.isdir(f)]: if ' ' in d: # we don't want to deal with any of that, complex code # for this is elsewhere raise TryNext found.append(d) if not found: if os.path.isdir(relpath): return [compress_user(relpath, tilde_expand, tilde_val)] # if no completions so far, try bookmarks bks = self.db.get('bookmarks',{}).iterkeys() bkmatches = [s for s in bks if s.startswith(event.symbol)] if bkmatches: return bkmatches raise TryNext return [compress_user(p, tilde_expand, tilde_val) for p in found]
python
{ "resource": "" }
q280441
Xunit._quoteattr
test
def _quoteattr(self, attr): """Escape an XML attribute. Value can be unicode.""" attr = xml_safe(attr) if isinstance(attr, unicode) and not UNICODE_STRINGS: attr = attr.encode(self.encoding) return saxutils.quoteattr(attr)
python
{ "resource": "" }
q280442
Xunit.configure
test
def configure(self, options, config): """Configures the xunit plugin.""" Plugin.configure(self, options, config) self.config = config if self.enabled: self.stats = {'errors': 0, 'failures': 0, 'passes': 0, 'skipped': 0 } self.errorlist = [] self.error_report_file = codecs.open(options.xunit_file, 'w', self.encoding, 'replace')
python
{ "resource": "" }
q280443
Xunit.report
test
def report(self, stream): """Writes an Xunit-formatted XML file The file includes a report of test errors and failures. """ self.stats['encoding'] = self.encoding self.stats['total'] = (self.stats['errors'] + self.stats['failures'] + self.stats['passes'] + self.stats['skipped']) self.error_report_file.write( u'<?xml version="1.0" encoding="%(encoding)s"?>' u'<testsuite name="nosetests" tests="%(total)d" ' u'errors="%(errors)d" failures="%(failures)d" ' u'skip="%(skipped)d">' % self.stats) self.error_report_file.write(u''.join([self._forceUnicode(e) for e in self.errorlist])) self.error_report_file.write(u'</testsuite>') self.error_report_file.close() if self.config.verbosity > 1: stream.writeln("-" * 70) stream.writeln("XML: %s" % self.error_report_file.name)
python
{ "resource": "" }
q280444
Xunit.addError
test
def addError(self, test, err, capt=None): """Add error output to Xunit report. """ taken = self._timeTaken() if issubclass(err[0], SkipTest): type = 'skipped' self.stats['skipped'] += 1 else: type = 'error' self.stats['errors'] += 1 tb = ''.join(traceback.format_exception(*err)) id = test.id() self.errorlist.append( '<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">' '<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>' '</%(type)s></testcase>' % {'cls': self._quoteattr(id_split(id)[0]), 'name': self._quoteattr(id_split(id)[-1]), 'taken': taken, 'type': type, 'errtype': self._quoteattr(nice_classname(err[0])), 'message': self._quoteattr(exc_message(err)), 'tb': escape_cdata(tb), })
python
{ "resource": "" }
q280445
Xunit.addFailure
test
def addFailure(self, test, err, capt=None, tb_info=None): """Add failure output to Xunit report. """ taken = self._timeTaken() tb = ''.join(traceback.format_exception(*err)) self.stats['failures'] += 1 id = test.id() self.errorlist.append( '<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">' '<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>' '</failure></testcase>' % {'cls': self._quoteattr(id_split(id)[0]), 'name': self._quoteattr(id_split(id)[-1]), 'taken': taken, 'errtype': self._quoteattr(nice_classname(err[0])), 'message': self._quoteattr(exc_message(err)), 'tb': escape_cdata(tb), })
python
{ "resource": "" }
q280446
Xunit.addSuccess
test
def addSuccess(self, test, capt=None): """Add success output to Xunit report. """ taken = self._timeTaken() self.stats['passes'] += 1 id = test.id() self.errorlist.append( '<testcase classname=%(cls)s name=%(name)s ' 'time="%(taken).3f" />' % {'cls': self._quoteattr(id_split(id)[0]), 'name': self._quoteattr(id_split(id)[-1]), 'taken': taken, })
python
{ "resource": "" }
q280447
twobin
test
def twobin(loads): """Pick two at random, use the LRU of the two. The content of loads is ignored. Assumes LRU ordering of loads, with oldest first. """ n = len(loads) a = randint(0,n-1) b = randint(0,n-1) return min(a,b)
python
{ "resource": "" }
q280448
weighted
test
def weighted(loads): """Pick two at random using inverse load as weight. Return the less loaded of the two. """ # weight 0 a million times more than 1: weights = 1./(1e-6+numpy.array(loads)) sums = weights.cumsum() t = sums[-1] x = random()*t y = random()*t idx = 0 idy = 0 while sums[idx] < x: idx += 1 while sums[idy] < y: idy += 1 if weights[idy] > weights[idx]: return idy else: return idx
python
{ "resource": "" }
q280449
TaskScheduler._register_engine
test
def _register_engine(self, uid): """New engine with ident `uid` became available.""" # head of the line: self.targets.insert(0,uid) self.loads.insert(0,0) # initialize sets self.completed[uid] = set() self.failed[uid] = set() self.pending[uid] = {} # rescan the graph: self.update_graph(None)
python
{ "resource": "" }
q280450
TaskScheduler._unregister_engine
test
def _unregister_engine(self, uid): """Existing engine with ident `uid` became unavailable.""" if len(self.targets) == 1: # this was our only engine pass # handle any potentially finished tasks: self.engine_stream.flush() # don't pop destinations, because they might be used later # map(self.destinations.pop, self.completed.pop(uid)) # map(self.destinations.pop, self.failed.pop(uid)) # prevent this engine from receiving work idx = self.targets.index(uid) self.targets.pop(idx) self.loads.pop(idx) # wait 5 seconds before cleaning up pending jobs, since the results might # still be incoming if self.pending[uid]: dc = ioloop.DelayedCallback(lambda : self.handle_stranded_tasks(uid), 5000, self.loop) dc.start() else: self.completed.pop(uid) self.failed.pop(uid)
python
{ "resource": "" }
q280451
TaskScheduler.handle_stranded_tasks
test
def handle_stranded_tasks(self, engine): """Deal with jobs resident in an engine that died.""" lost = self.pending[engine] for msg_id in lost.keys(): if msg_id not in self.pending[engine]: # prevent double-handling of messages continue raw_msg = lost[msg_id].raw_msg idents,msg = self.session.feed_identities(raw_msg, copy=False) parent = self.session.unpack(msg[1].bytes) idents = [engine, idents[0]] # build fake error reply try: raise error.EngineError("Engine %r died while running task %r"%(engine, msg_id)) except: content = error.wrap_exception() # build fake header header = dict( status='error', engine=engine, date=datetime.now(), ) msg = self.session.msg('apply_reply', content, parent=parent, subheader=header) raw_reply = map(zmq.Message, self.session.serialize(msg, ident=idents)) # and dispatch it self.dispatch_result(raw_reply) # finally scrub completed/failed lists self.completed.pop(engine) self.failed.pop(engine)
python
{ "resource": "" }
q280452
TaskScheduler.dispatch_submission
test
def dispatch_submission(self, raw_msg): """Dispatch job submission to appropriate handlers.""" # ensure targets up to date: self.notifier_stream.flush() try: idents, msg = self.session.feed_identities(raw_msg, copy=False) msg = self.session.unserialize(msg, content=False, copy=False) except Exception: self.log.error("task::Invaid task msg: %r"%raw_msg, exc_info=True) return # send to monitor self.mon_stream.send_multipart([b'intask']+raw_msg, copy=False) header = msg['header'] msg_id = header['msg_id'] self.all_ids.add(msg_id) # get targets as a set of bytes objects # from a list of unicode objects targets = header.get('targets', []) targets = map(cast_bytes, targets) targets = set(targets) retries = header.get('retries', 0) self.retries[msg_id] = retries # time dependencies after = header.get('after', None) if after: after = Dependency(after) if after.all: if after.success: after = Dependency(after.difference(self.all_completed), success=after.success, failure=after.failure, all=after.all, ) if after.failure: after = Dependency(after.difference(self.all_failed), success=after.success, failure=after.failure, all=after.all, ) if after.check(self.all_completed, self.all_failed): # recast as empty set, if `after` already met, # to prevent unnecessary set comparisons after = MET else: after = MET # location dependencies follow = Dependency(header.get('follow', [])) # turn timeouts into datetime objects: timeout = header.get('timeout', None) if timeout: # cast to float, because jsonlib returns floats as decimal.Decimal, # which timedelta does not accept timeout = datetime.now() + timedelta(0,float(timeout),0) job = Job(msg_id=msg_id, raw_msg=raw_msg, idents=idents, msg=msg, header=header, targets=targets, after=after, follow=follow, timeout=timeout, ) # validate and reduce dependencies: for dep in after,follow: if not dep: # empty dependency continue # check valid: if msg_id in dep or dep.difference(self.all_ids): self.depending[msg_id] = job return self.fail_unreachable(msg_id, error.InvalidDependency) # check if unreachable: if dep.unreachable(self.all_completed, self.all_failed): self.depending[msg_id] = job return self.fail_unreachable(msg_id) if after.check(self.all_completed, self.all_failed): # time deps already met, try to run if not self.maybe_run(job): # can't run yet if msg_id not in self.all_failed: # could have failed as unreachable self.save_unmet(job) else: self.save_unmet(job)
python
{ "resource": "" }
q280453
TaskScheduler.audit_timeouts
test
def audit_timeouts(self): """Audit all waiting tasks for expired timeouts.""" now = datetime.now() for msg_id in self.depending.keys(): # must recheck, in case one failure cascaded to another: if msg_id in self.depending: job = self.depending[msg_id] if job.timeout and job.timeout < now: self.fail_unreachable(msg_id, error.TaskTimeout)
python
{ "resource": "" }
q280454
TaskScheduler.fail_unreachable
test
def fail_unreachable(self, msg_id, why=error.ImpossibleDependency): """a task has become unreachable, send a reply with an ImpossibleDependency error.""" if msg_id not in self.depending: self.log.error("msg %r already failed!", msg_id) return job = self.depending.pop(msg_id) for mid in job.dependents: if mid in self.graph: self.graph[mid].remove(msg_id) try: raise why() except: content = error.wrap_exception() self.all_done.add(msg_id) self.all_failed.add(msg_id) msg = self.session.send(self.client_stream, 'apply_reply', content, parent=job.header, ident=job.idents) self.session.send(self.mon_stream, msg, ident=[b'outtask']+job.idents) self.update_graph(msg_id, success=False)
python
{ "resource": "" }
q280455
TaskScheduler.maybe_run
test
def maybe_run(self, job): """check location dependencies, and run if they are met.""" msg_id = job.msg_id self.log.debug("Attempting to assign task %s", msg_id) if not self.targets: # no engines, definitely can't run return False if job.follow or job.targets or job.blacklist or self.hwm: # we need a can_run filter def can_run(idx): # check hwm if self.hwm and self.loads[idx] == self.hwm: return False target = self.targets[idx] # check blacklist if target in job.blacklist: return False # check targets if job.targets and target not in job.targets: return False # check follow return job.follow.check(self.completed[target], self.failed[target]) indices = filter(can_run, range(len(self.targets))) if not indices: # couldn't run if job.follow.all: # check follow for impossibility dests = set() relevant = set() if job.follow.success: relevant = self.all_completed if job.follow.failure: relevant = relevant.union(self.all_failed) for m in job.follow.intersection(relevant): dests.add(self.destinations[m]) if len(dests) > 1: self.depending[msg_id] = job self.fail_unreachable(msg_id) return False if job.targets: # check blacklist+targets for impossibility job.targets.difference_update(job.blacklist) if not job.targets or not job.targets.intersection(self.targets): self.depending[msg_id] = job self.fail_unreachable(msg_id) return False return False else: indices = None self.submit_task(job, indices) return True
python
{ "resource": "" }
q280456
TaskScheduler.save_unmet
test
def save_unmet(self, job): """Save a message for later submission when its dependencies are met.""" msg_id = job.msg_id self.depending[msg_id] = job # track the ids in follow or after, but not those already finished for dep_id in job.after.union(job.follow).difference(self.all_done): if dep_id not in self.graph: self.graph[dep_id] = set() self.graph[dep_id].add(msg_id)
python
{ "resource": "" }
q280457
TaskScheduler.submit_task
test
def submit_task(self, job, indices=None): """Submit a task to any of a subset of our targets.""" if indices: loads = [self.loads[i] for i in indices] else: loads = self.loads idx = self.scheme(loads) if indices: idx = indices[idx] target = self.targets[idx] # print (target, map(str, msg[:3])) # send job to the engine self.engine_stream.send(target, flags=zmq.SNDMORE, copy=False) self.engine_stream.send_multipart(job.raw_msg, copy=False) # update load self.add_job(idx) self.pending[target][job.msg_id] = job # notify Hub content = dict(msg_id=job.msg_id, engine_id=target.decode('ascii')) self.session.send(self.mon_stream, 'task_destination', content=content, ident=[b'tracktask',self.ident])
python
{ "resource": "" }
q280458
TaskScheduler.dispatch_result
test
def dispatch_result(self, raw_msg): """dispatch method for result replies""" try: idents,msg = self.session.feed_identities(raw_msg, copy=False) msg = self.session.unserialize(msg, content=False, copy=False) engine = idents[0] try: idx = self.targets.index(engine) except ValueError: pass # skip load-update for dead engines else: self.finish_job(idx) except Exception: self.log.error("task::Invaid result: %r", raw_msg, exc_info=True) return header = msg['header'] parent = msg['parent_header'] if header.get('dependencies_met', True): success = (header['status'] == 'ok') msg_id = parent['msg_id'] retries = self.retries[msg_id] if not success and retries > 0: # failed self.retries[msg_id] = retries - 1 self.handle_unmet_dependency(idents, parent) else: del self.retries[msg_id] # relay to client and update graph self.handle_result(idents, parent, raw_msg, success) # send to Hub monitor self.mon_stream.send_multipart([b'outtask']+raw_msg, copy=False) else: self.handle_unmet_dependency(idents, parent)
python
{ "resource": "" }
q280459
TaskScheduler.handle_result
test
def handle_result(self, idents, parent, raw_msg, success=True): """handle a real task result, either success or failure""" # first, relay result to client engine = idents[0] client = idents[1] # swap_ids for ROUTER-ROUTER mirror raw_msg[:2] = [client,engine] # print (map(str, raw_msg[:4])) self.client_stream.send_multipart(raw_msg, copy=False) # now, update our data structures msg_id = parent['msg_id'] self.pending[engine].pop(msg_id) if success: self.completed[engine].add(msg_id) self.all_completed.add(msg_id) else: self.failed[engine].add(msg_id) self.all_failed.add(msg_id) self.all_done.add(msg_id) self.destinations[msg_id] = engine self.update_graph(msg_id, success)
python
{ "resource": "" }
q280460
TaskScheduler.handle_unmet_dependency
test
def handle_unmet_dependency(self, idents, parent): """handle an unmet dependency""" engine = idents[0] msg_id = parent['msg_id'] job = self.pending[engine].pop(msg_id) job.blacklist.add(engine) if job.blacklist == job.targets: self.depending[msg_id] = job self.fail_unreachable(msg_id) elif not self.maybe_run(job): # resubmit failed if msg_id not in self.all_failed: # put it back in our dependency tree self.save_unmet(job) if self.hwm: try: idx = self.targets.index(engine) except ValueError: pass # skip load-update for dead engines else: if self.loads[idx] == self.hwm-1: self.update_graph(None)
python
{ "resource": "" }
q280461
TaskScheduler.update_graph
test
def update_graph(self, dep_id=None, success=True): """dep_id just finished. Update our dependency graph and submit any jobs that just became runable. Called with dep_id=None to update entire graph for hwm, but without finishing a task. """ # print ("\n\n***********") # pprint (dep_id) # pprint (self.graph) # pprint (self.depending) # pprint (self.all_completed) # pprint (self.all_failed) # print ("\n\n***********\n\n") # update any jobs that depended on the dependency jobs = self.graph.pop(dep_id, []) # recheck *all* jobs if # a) we have HWM and an engine just become no longer full # or b) dep_id was given as None if dep_id is None or self.hwm and any( [ load==self.hwm-1 for load in self.loads ]): jobs = self.depending.keys() for msg_id in sorted(jobs, key=lambda msg_id: self.depending[msg_id].timestamp): job = self.depending[msg_id] if job.after.unreachable(self.all_completed, self.all_failed)\ or job.follow.unreachable(self.all_completed, self.all_failed): self.fail_unreachable(msg_id) elif job.after.check(self.all_completed, self.all_failed): # time deps met, maybe run if self.maybe_run(job): self.depending.pop(msg_id) for mid in job.dependents: if mid in self.graph: self.graph[mid].remove(msg_id)
python
{ "resource": "" }
q280462
Logger.logstart
test
def logstart(self, logfname=None, loghead=None, logmode=None, log_output=False, timestamp=False, log_raw_input=False): """Generate a new log-file with a default header. Raises RuntimeError if the log has already been started""" if self.logfile is not None: raise RuntimeError('Log file is already active: %s' % self.logfname) # The parameters can override constructor defaults if logfname is not None: self.logfname = logfname if loghead is not None: self.loghead = loghead if logmode is not None: self.logmode = logmode # Parameters not part of the constructor self.timestamp = timestamp self.log_output = log_output self.log_raw_input = log_raw_input # init depending on the log mode requested isfile = os.path.isfile logmode = self.logmode if logmode == 'append': self.logfile = io.open(self.logfname, 'a', encoding='utf-8') elif logmode == 'backup': if isfile(self.logfname): backup_logname = self.logfname+'~' # Manually remove any old backup, since os.rename may fail # under Windows. if isfile(backup_logname): os.remove(backup_logname) os.rename(self.logfname,backup_logname) self.logfile = io.open(self.logfname, 'w', encoding='utf-8') elif logmode == 'global': self.logfname = os.path.join(self.home_dir,self.logfname) self.logfile = io.open(self.logfname, 'a', encoding='utf-8') elif logmode == 'over': if isfile(self.logfname): os.remove(self.logfname) self.logfile = io.open(self.logfname,'w', encoding='utf-8') elif logmode == 'rotate': if isfile(self.logfname): if isfile(self.logfname+'.001~'): old = glob.glob(self.logfname+'.*~') old.sort() old.reverse() for f in old: root, ext = os.path.splitext(f) num = int(ext[1:-1])+1 os.rename(f, root+'.'+`num`.zfill(3)+'~') os.rename(self.logfname, self.logfname+'.001~') self.logfile = io.open(self.logfname, 'w', encoding='utf-8') if logmode != 'append': self.logfile.write(self.loghead) self.logfile.flush() self.log_active = True
python
{ "resource": "" }
q280463
Logger.logstate
test
def logstate(self): """Print a status message about the logger.""" if self.logfile is None: print 'Logging has not been activated.' else: state = self.log_active and 'active' or 'temporarily suspended' print 'Filename :',self.logfname print 'Mode :',self.logmode print 'Output logging :',self.log_output print 'Raw input log :',self.log_raw_input print 'Timestamping :',self.timestamp print 'State :',state
python
{ "resource": "" }
q280464
Logger.log
test
def log(self, line_mod, line_ori): """Write the sources to a log. Inputs: - line_mod: possibly modified input, such as the transformations made by input prefilters or input handlers of various kinds. This should always be valid Python. - line_ori: unmodified input line from the user. This is not necessarily valid Python. """ # Write the log line, but decide which one according to the # log_raw_input flag, set when the log is started. if self.log_raw_input: self.log_write(line_ori) else: self.log_write(line_mod)
python
{ "resource": "" }
q280465
Logger.log_write
test
def log_write(self, data, kind='input'): """Write data to the log file, if active""" #print 'data: %r' % data # dbg if self.log_active and data: write = self.logfile.write if kind=='input': if self.timestamp: write(str_to_unicode(time.strftime('# %a, %d %b %Y %H:%M:%S\n', time.localtime()))) write(data) elif kind=='output' and self.log_output: odata = u'\n'.join([u'#[Out]# %s' % s for s in data.splitlines()]) write(u'%s\n' % odata) self.logfile.flush()
python
{ "resource": "" }
q280466
Logger.logstop
test
def logstop(self): """Fully stop logging and close log file. In order to start logging again, a new logstart() call needs to be made, possibly (though not necessarily) with a new filename, mode and other options.""" if self.logfile is not None: self.logfile.close() self.logfile = None else: print "Logging hadn't been started." self.log_active = False
python
{ "resource": "" }
q280467
new_worksheet
test
def new_worksheet(name=None, cells=None): """Create a worksheet by name with with a list of cells.""" ws = NotebookNode() if name is not None: ws.name = unicode(name) if cells is None: ws.cells = [] else: ws.cells = list(cells) return ws
python
{ "resource": "" }
q280468
StrDispatch.add_s
test
def add_s(self, s, obj, priority= 0 ): """ Adds a target 'string' for dispatching """ chain = self.strs.get(s, CommandChainDispatcher()) chain.add(obj,priority) self.strs[s] = chain
python
{ "resource": "" }
q280469
StrDispatch.add_re
test
def add_re(self, regex, obj, priority= 0 ): """ Adds a target regexp for dispatching """ chain = self.regexs.get(regex, CommandChainDispatcher()) chain.add(obj,priority) self.regexs[regex] = chain
python
{ "resource": "" }
q280470
StrDispatch.dispatch
test
def dispatch(self, key): """ Get a seq of Commandchain objects that match key """ if key in self.strs: yield self.strs[key] for r, obj in self.regexs.items(): if re.match(r, key): yield obj else: #print "nomatch",key # dbg pass
python
{ "resource": "" }
q280471
StrDispatch.flat_matches
test
def flat_matches(self, key): """ Yield all 'value' targets, without priority """ for val in self.dispatch(key): for el in val: yield el[1] # only value, no priority return
python
{ "resource": "" }
q280472
NotebookManager._notebook_dir_changed
test
def _notebook_dir_changed(self, name, old, new): """do a bit of validation of the notebook dir""" if os.path.exists(new) and not os.path.isdir(new): raise TraitError("notebook dir %r is not a directory" % new) if not os.path.exists(new): self.log.info("Creating notebook dir %s", new) try: os.mkdir(new) except: raise TraitError("Couldn't create notebook dir %r" % new)
python
{ "resource": "" }
q280473
NotebookManager.list_notebooks
test
def list_notebooks(self): """List all notebooks in the notebook dir. This returns a list of dicts of the form:: dict(notebook_id=notebook,name=name) """ names = glob.glob(os.path.join(self.notebook_dir, '*' + self.filename_ext)) names = [os.path.splitext(os.path.basename(name))[0] for name in names] data = [] for name in names: if name not in self.rev_mapping: notebook_id = self.new_notebook_id(name) else: notebook_id = self.rev_mapping[name] data.append(dict(notebook_id=notebook_id,name=name)) data = sorted(data, key=lambda item: item['name']) return data
python
{ "resource": "" }
q280474
NotebookManager.new_notebook_id
test
def new_notebook_id(self, name): """Generate a new notebook_id for a name and store its mappings.""" # TODO: the following will give stable urls for notebooks, but unless # the notebooks are immediately redirected to their new urls when their # filemname changes, nasty inconsistencies result. So for now it's # disabled and instead we use a random uuid4() call. But we leave the # logic here so that we can later reactivate it, whhen the necessary # url redirection code is written. #notebook_id = unicode(uuid.uuid5(uuid.NAMESPACE_URL, # 'file://'+self.get_path_by_name(name).encode('utf-8'))) notebook_id = unicode(uuid.uuid4()) self.mapping[notebook_id] = name self.rev_mapping[name] = notebook_id return notebook_id
python
{ "resource": "" }
q280475
NotebookManager.delete_notebook_id
test
def delete_notebook_id(self, notebook_id): """Delete a notebook's id only. This doesn't delete the actual notebook.""" name = self.mapping[notebook_id] del self.mapping[notebook_id] del self.rev_mapping[name]
python
{ "resource": "" }
q280476
NotebookManager.notebook_exists
test
def notebook_exists(self, notebook_id): """Does a notebook exist?""" if notebook_id not in self.mapping: return False path = self.get_path_by_name(self.mapping[notebook_id]) return os.path.isfile(path)
python
{ "resource": "" }
q280477
NotebookManager.find_path
test
def find_path(self, notebook_id): """Return a full path to a notebook given its notebook_id.""" try: name = self.mapping[notebook_id] except KeyError: raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id) return self.get_path_by_name(name)
python
{ "resource": "" }
q280478
NotebookManager.get_path_by_name
test
def get_path_by_name(self, name): """Return a full path to a notebook given its name.""" filename = name + self.filename_ext path = os.path.join(self.notebook_dir, filename) return path
python
{ "resource": "" }
q280479
NotebookManager.get_notebook
test
def get_notebook(self, notebook_id, format=u'json'): """Get the representation of a notebook in format by notebook_id.""" format = unicode(format) if format not in self.allowed_formats: raise web.HTTPError(415, u'Invalid notebook format: %s' % format) last_modified, nb = self.get_notebook_object(notebook_id) kwargs = {} if format == 'json': # don't split lines for sending over the wire, because it # should match the Python in-memory format. kwargs['split_lines'] = False data = current.writes(nb, format, **kwargs) name = nb.metadata.get('name','notebook') return last_modified, name, data
python
{ "resource": "" }
q280480
NotebookManager.get_notebook_object
test
def get_notebook_object(self, notebook_id): """Get the NotebookNode representation of a notebook by notebook_id.""" path = self.find_path(notebook_id) if not os.path.isfile(path): raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id) info = os.stat(path) last_modified = datetime.datetime.utcfromtimestamp(info.st_mtime) with open(path,'r') as f: s = f.read() try: # v1 and v2 and json in the .ipynb files. nb = current.reads(s, u'json') except: raise web.HTTPError(500, u'Unreadable JSON notebook.') # Always use the filename as the notebook name. nb.metadata.name = os.path.splitext(os.path.basename(path))[0] return last_modified, nb
python
{ "resource": "" }
q280481
NotebookManager.save_new_notebook
test
def save_new_notebook(self, data, name=None, format=u'json'): """Save a new notebook and return its notebook_id. If a name is passed in, it overrides any values in the notebook data and the value in the data is updated to use that value. """ if format not in self.allowed_formats: raise web.HTTPError(415, u'Invalid notebook format: %s' % format) try: nb = current.reads(data.decode('utf-8'), format) except: raise web.HTTPError(400, u'Invalid JSON data') if name is None: try: name = nb.metadata.name except AttributeError: raise web.HTTPError(400, u'Missing notebook name') nb.metadata.name = name notebook_id = self.new_notebook_id(name) self.save_notebook_object(notebook_id, nb) return notebook_id
python
{ "resource": "" }
q280482
NotebookManager.save_notebook
test
def save_notebook(self, notebook_id, data, name=None, format=u'json'): """Save an existing notebook by notebook_id.""" if format not in self.allowed_formats: raise web.HTTPError(415, u'Invalid notebook format: %s' % format) try: nb = current.reads(data.decode('utf-8'), format) except: raise web.HTTPError(400, u'Invalid JSON data') if name is not None: nb.metadata.name = name self.save_notebook_object(notebook_id, nb)
python
{ "resource": "" }
q280483
NotebookManager.save_notebook_object
test
def save_notebook_object(self, notebook_id, nb): """Save an existing notebook object by notebook_id.""" if notebook_id not in self.mapping: raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id) old_name = self.mapping[notebook_id] try: new_name = nb.metadata.name except AttributeError: raise web.HTTPError(400, u'Missing notebook name') path = self.get_path_by_name(new_name) try: with open(path,'w') as f: current.write(nb, f, u'json') except Exception as e: raise web.HTTPError(400, u'Unexpected error while saving notebook: %s' % e) # save .py script as well if self.save_script: pypath = os.path.splitext(path)[0] + '.py' try: with io.open(pypath,'w', encoding='utf-8') as f: current.write(nb, f, u'py') except Exception as e: raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s' % e) if old_name != new_name: old_path = self.get_path_by_name(old_name) if os.path.isfile(old_path): os.unlink(old_path) if self.save_script: old_pypath = os.path.splitext(old_path)[0] + '.py' if os.path.isfile(old_pypath): os.unlink(old_pypath) self.mapping[notebook_id] = new_name self.rev_mapping[new_name] = notebook_id del self.rev_mapping[old_name]
python
{ "resource": "" }
q280484
NotebookManager.delete_notebook
test
def delete_notebook(self, notebook_id): """Delete notebook by notebook_id.""" path = self.find_path(notebook_id) if not os.path.isfile(path): raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id) os.unlink(path) self.delete_notebook_id(notebook_id)
python
{ "resource": "" }
q280485
NotebookManager.new_notebook
test
def new_notebook(self): """Create a new notebook and return its notebook_id.""" path, name = self.increment_filename('Untitled') notebook_id = self.new_notebook_id(name) metadata = current.new_metadata(name=name) nb = current.new_notebook(metadata=metadata) with open(path,'w') as f: current.write(nb, f, u'json') return notebook_id
python
{ "resource": "" }
q280486
NotebookManager.copy_notebook
test
def copy_notebook(self, notebook_id): """Copy an existing notebook and return its notebook_id.""" last_mod, nb = self.get_notebook_object(notebook_id) name = nb.metadata.name + '-Copy' path, name = self.increment_filename(name) nb.metadata.name = name notebook_id = self.new_notebook_id(name) self.save_notebook_object(notebook_id, nb) return notebook_id
python
{ "resource": "" }
q280487
phys_tokens
test
def phys_tokens(toks): """Return all physical tokens, even line continuations. tokenize.generate_tokens() doesn't return a token for the backslash that continues lines. This wrapper provides those tokens so that we can re-create a faithful representation of the original source. Returns the same values as generate_tokens() """ last_line = None last_lineno = -1 last_ttype = None for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: if last_lineno != elineno: if last_line and last_line.endswith("\\\n"): # We are at the beginning of a new line, and the last line # ended with a backslash. We probably have to inject a # backslash token into the stream. Unfortunately, there's more # to figure out. This code:: # # usage = """\ # HEY THERE # """ # # triggers this condition, but the token text is:: # # '"""\\\nHEY THERE\n"""' # # so we need to figure out if the backslash is already in the # string token or not. inject_backslash = True if last_ttype == tokenize.COMMENT: # Comments like this \ # should never result in a new token. inject_backslash = False elif ttype == token.STRING: if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\': # It's a multiline string and the first line ends with # a backslash, so we don't need to inject another. inject_backslash = False if inject_backslash: # Figure out what column the backslash is in. ccol = len(last_line.split("\n")[-2]) - 1 # Yield the token, with a fake token type. yield ( 99999, "\\\n", (slineno, ccol), (slineno, ccol+2), last_line ) last_line = ltext last_ttype = ttype yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext last_lineno = elineno
python
{ "resource": "" }
q280488
source_token_lines
test
def source_token_lines(source): """Generate a series of lines, one for each line in `source`. Each line is a list of pairs, each pair is a token:: [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] Each pair has a token class, and the token text. If you concatenate all the token texts, and then join them with newlines, you should have your original `source` back, with two differences: trailing whitespace is not preserved, and a final line with no newline is indistinguishable from a final line with a newline. """ ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]) line = [] col = 0 source = source.expandtabs(8).replace('\r\n', '\n') tokgen = generate_tokens(source) for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen): mark_start = True for part in re.split('(\n)', ttext): if part == '\n': yield line line = [] col = 0 mark_end = False elif part == '': mark_end = False elif ttype in ws_tokens: mark_end = False else: if mark_start and scol > col: line.append(("ws", " " * (scol - col))) mark_start = False tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3] if ttype == token.NAME and keyword.iskeyword(ttext): tok_class = "key" line.append((tok_class, part)) mark_end = True scol = 0 if mark_end: col = ecol if line: yield line
python
{ "resource": "" }
q280489
load_default_config
test
def load_default_config(ipython_dir=None): """Load the default config file from the default ipython_dir. This is useful for embedded shells. """ if ipython_dir is None: ipython_dir = get_ipython_dir() profile_dir = os.path.join(ipython_dir, 'profile_default') cl = PyFileConfigLoader(default_config_file_name, profile_dir) try: config = cl.load_config() except ConfigFileNotFound: # no config found config = Config() return config
python
{ "resource": "" }
q280490
TerminalIPythonApp._classes_default
test
def _classes_default(self): """This has to be in a method, for TerminalIPythonApp to be available.""" return [ InteractiveShellApp, # ShellApp comes before TerminalApp, because self.__class__, # it will also affect subclasses (e.g. QtConsole) TerminalInteractiveShell, PromptManager, HistoryManager, ProfileDir, PlainTextFormatter, IPCompleter, ScriptMagics, ]
python
{ "resource": "" }
q280491
TerminalIPythonApp.parse_command_line
test
def parse_command_line(self, argv=None): """override to allow old '-pylab' flag with deprecation warning""" argv = sys.argv[1:] if argv is None else argv if '-pylab' in argv: # deprecated `-pylab` given, # warn and transform into current syntax argv = argv[:] # copy, don't clobber idx = argv.index('-pylab') warn.warn("`-pylab` flag has been deprecated.\n" " Use `--pylab` instead, or `--pylab=foo` to specify a backend.") sub = '--pylab' if len(argv) > idx+1: # check for gui arg, as in '-pylab qt' gui = argv[idx+1] if gui in ('wx', 'qt', 'qt4', 'gtk', 'auto'): sub = '--pylab='+gui argv.pop(idx+1) argv[idx] = sub return super(TerminalIPythonApp, self).parse_command_line(argv)
python
{ "resource": "" }
q280492
TerminalIPythonApp.initialize
test
def initialize(self, argv=None): """Do actions after construct, but before starting the app.""" super(TerminalIPythonApp, self).initialize(argv) if self.subapp is not None: # don't bother initializing further, starting subapp return if not self.ignore_old_config: check_for_old_config(self.ipython_dir) # print self.extra_args if self.extra_args and not self.something_to_run: self.file_to_run = self.extra_args[0] self.init_path() # create the shell self.init_shell() # and draw the banner self.init_banner() # Now a variety of things that happen after the banner is printed. self.init_gui_pylab() self.init_extensions() self.init_code()
python
{ "resource": "" }
q280493
TerminalIPythonApp.init_shell
test
def init_shell(self): """initialize the InteractiveShell instance""" # Create an InteractiveShell instance. # shell.display_banner should always be False for the terminal # based app, because we call shell.show_banner() by hand below # so the banner shows *before* all extension loading stuff. self.shell = TerminalInteractiveShell.instance(config=self.config, display_banner=False, profile_dir=self.profile_dir, ipython_dir=self.ipython_dir) self.shell.configurables.append(self)
python
{ "resource": "" }
q280494
TerminalIPythonApp.init_banner
test
def init_banner(self): """optionally display the banner""" if self.display_banner and self.interact: self.shell.show_banner() # Make sure there is a space below the banner. if self.log_level <= logging.INFO: print
python
{ "resource": "" }
q280495
repr_type
test
def repr_type(obj): """ Return a string representation of a value and its type for readable error messages. """ the_type = type(obj) if (not py3compat.PY3) and the_type is InstanceType: # Old-style class. the_type = obj.__class__ msg = '%r %r' % (obj, the_type) return msg
python
{ "resource": "" }
q280496
parse_notifier_name
test
def parse_notifier_name(name): """Convert the name argument to a list of names. Examples -------- >>> parse_notifier_name('a') ['a'] >>> parse_notifier_name(['a','b']) ['a', 'b'] >>> parse_notifier_name(None) ['anytrait'] """ if isinstance(name, str): return [name] elif name is None: return ['anytrait'] elif isinstance(name, (list, tuple)): for n in name: assert isinstance(n, str), "names must be strings" return name
python
{ "resource": "" }
q280497
TraitType.set_default_value
test
def set_default_value(self, obj): """Set the default value on a per instance basis. This method is called by :meth:`instance_init` to create and validate the default value. The creation and validation of default values must be delayed until the parent :class:`HasTraits` class has been instantiated. """ # Check for a deferred initializer defined in the same class as the # trait declaration or above. mro = type(obj).mro() meth_name = '_%s_default' % self.name for cls in mro[:mro.index(self.this_class)+1]: if meth_name in cls.__dict__: break else: # We didn't find one. Do static initialization. dv = self.get_default_value() newdv = self._validate(obj, dv) obj._trait_values[self.name] = newdv return # Complete the dynamic initialization. obj._trait_dyn_inits[self.name] = cls.__dict__[meth_name]
python
{ "resource": "" }
q280498
HasTraits.on_trait_change
test
def on_trait_change(self, handler, name=None, remove=False): """Setup a handler to be called when a trait changes. This is used to setup dynamic notifications of trait changes. Static handlers can be created by creating methods on a HasTraits subclass with the naming convention '_[traitname]_changed'. Thus, to create static handler for the trait 'a', create the method _a_changed(self, name, old, new) (fewer arguments can be used, see below). Parameters ---------- handler : callable A callable that is called when a trait changes. Its signature can be handler(), handler(name), handler(name, new) or handler(name, old, new). name : list, str, None If None, the handler will apply to all traits. If a list of str, handler will apply to all names in the list. If a str, the handler will apply just to that name. remove : bool If False (the default), then install the handler. If True then unintall it. """ if remove: names = parse_notifier_name(name) for n in names: self._remove_notifiers(handler, n) else: names = parse_notifier_name(name) for n in names: self._add_notifiers(handler, n)
python
{ "resource": "" }
q280499
HasTraits.class_traits
test
def class_traits(cls, **metadata): """Get a list of all the traits of this class. This method is just like the :meth:`traits` method, but is unbound. The TraitTypes returned don't know anything about the values that the various HasTrait's instances are holding. This follows the same algorithm as traits does and does not allow for any simple way of specifying merely that a metadata name exists, but has any value. This is because get_metadata returns None if a metadata key doesn't exist. """ traits = dict([memb for memb in getmembers(cls) if \ isinstance(memb[1], TraitType)]) if len(metadata) == 0: return traits for meta_name, meta_eval in metadata.items(): if type(meta_eval) is not FunctionType: metadata[meta_name] = _SimpleTest(meta_eval) result = {} for name, trait in traits.items(): for meta_name, meta_eval in metadata.items(): if not meta_eval(trait.get_metadata(meta_name)): break else: result[name] = trait return result
python
{ "resource": "" }