_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q279400
InteractiveShell.init_user_ns
test
def init_user_ns(self): """Initialize all user-visible namespaces to their minimum defaults. Certain history lists are also initialized here, as they effectively act as user namespaces. Notes ----- All data structures here are only filled in, they are NOT reset by this method. If they were not empty before, data will simply be added to therm. """ # This function works in two parts: first we put a few things in # user_ns, and we sync that contents into user_ns_hidden so that these # initial variables aren't shown by %who. After the sync, we add the # rest of what we *do* want the user to see with %who even on a new # session (probably nothing, so theye really only see their own stuff) # The user dict must *always* have a __builtin__ reference to the # Python standard __builtin__ namespace, which must be imported. # This is so that certain operations in prompt evaluation can be # reliably executed with builtins. Note that we can NOT use # __builtins__ (note the 's'), because that can either be a dict or a # module, and can even mutate at runtime, depending on the context # (Python makes no guarantees on it). In contrast, __builtin__ is # always a module object, though it must be explicitly imported. # For more details: # http://mail.python.org/pipermail/python-dev/2001-April/014068.html ns = dict() # Put 'help' in the user namespace try: from site import _Helper ns['help'] = _Helper() except ImportError: warn('help() not available - check site.py') # make global variables for user access to the histories ns['_ih'] = self.history_manager.input_hist_parsed ns['_oh'] = self.history_manager.output_hist ns['_dh'] = self.history_manager.dir_hist ns['_sh'] = shadowns # user aliases to input and output histories. These shouldn't show up # in %who, as they can have very large reprs. ns['In'] = self.history_manager.input_hist_parsed ns['Out'] = self.history_manager.output_hist # Store myself as the public api!!! ns['get_ipython'] = self.get_ipython ns['exit'] = self.exiter ns['quit'] = self.exiter # Sync what we've added so far to user_ns_hidden so these aren't seen # by %who self.user_ns_hidden.update(ns) # Anything put into ns now would show up in %who. Think twice before # putting anything here, as we really want %who to show the user their # stuff, not our variables. # Finally, update the real user's namespace self.user_ns.update(ns)
python
{ "resource": "" }
q279401
InteractiveShell.all_ns_refs
test
def all_ns_refs(self): """Get a list of references to all the namespace dictionaries in which IPython might store a user-created object. Note that this does not include the displayhook, which also caches objects from the output.""" return [self.user_ns, self.user_global_ns, self._user_main_module.__dict__] + self._main_ns_cache.values()
python
{ "resource": "" }
q279402
InteractiveShell.reset
test
def reset(self, new_session=True): """Clear all internal namespaces, and attempt to release references to user objects. If new_session is True, a new history session will be opened. """ # Clear histories self.history_manager.reset(new_session) # Reset counter used to index all histories if new_session: self.execution_count = 1 # Flush cached output items if self.displayhook.do_full_cache: self.displayhook.flush() # The main execution namespaces must be cleared very carefully, # skipping the deletion of the builtin-related keys, because doing so # would cause errors in many object's __del__ methods. if self.user_ns is not self.user_global_ns: self.user_ns.clear() ns = self.user_global_ns drop_keys = set(ns.keys()) drop_keys.discard('__builtin__') drop_keys.discard('__builtins__') drop_keys.discard('__name__') for k in drop_keys: del ns[k] self.user_ns_hidden.clear() # Restore the user namespaces to minimal usability self.init_user_ns() # Restore the default and user aliases self.alias_manager.clear_aliases() self.alias_manager.init_aliases() # Flush the private list of module references kept for script # execution protection self.clear_main_mod_cache() # Clear out the namespace from the last %run self.new_main_mod()
python
{ "resource": "" }
q279403
InteractiveShell.del_var
test
def del_var(self, varname, by_name=False): """Delete a variable from the various namespaces, so that, as far as possible, we're not keeping any hidden references to it. Parameters ---------- varname : str The name of the variable to delete. by_name : bool If True, delete variables with the given name in each namespace. If False (default), find the variable in the user namespace, and delete references to it. """ if varname in ('__builtin__', '__builtins__'): raise ValueError("Refusing to delete %s" % varname) ns_refs = self.all_ns_refs if by_name: # Delete by name for ns in ns_refs: try: del ns[varname] except KeyError: pass else: # Delete by object try: obj = self.user_ns[varname] except KeyError: raise NameError("name '%s' is not defined" % varname) # Also check in output history ns_refs.append(self.history_manager.output_hist) for ns in ns_refs: to_delete = [n for n, o in ns.iteritems() if o is obj] for name in to_delete: del ns[name] # displayhook keeps extra references, but not in a dictionary for name in ('_', '__', '___'): if getattr(self.displayhook, name) is obj: setattr(self.displayhook, name, None)
python
{ "resource": "" }
q279404
InteractiveShell.reset_selective
test
def reset_selective(self, regex=None): """Clear selective variables from internal namespaces based on a specified regular expression. Parameters ---------- regex : string or compiled pattern, optional A regular expression pattern that will be used in searching variable names in the users namespaces. """ if regex is not None: try: m = re.compile(regex) except TypeError: raise TypeError('regex must be a string or compiled pattern') # Search for keys in each namespace that match the given regex # If a match is found, delete the key/value pair. for ns in self.all_ns_refs: for var in ns: if m.search(var): del ns[var]
python
{ "resource": "" }
q279405
InteractiveShell.push
test
def push(self, variables, interactive=True): """Inject a group of variables into the IPython user namespace. Parameters ---------- variables : dict, str or list/tuple of str The variables to inject into the user's namespace. If a dict, a simple update is done. If a str, the string is assumed to have variable names separated by spaces. A list/tuple of str can also be used to give the variable names. If just the variable names are give (list/tuple/str) then the variable values looked up in the callers frame. interactive : bool If True (default), the variables will be listed with the ``who`` magic. """ vdict = None # We need a dict of name/value pairs to do namespace updates. if isinstance(variables, dict): vdict = variables elif isinstance(variables, (basestring, list, tuple)): if isinstance(variables, basestring): vlist = variables.split() else: vlist = variables vdict = {} cf = sys._getframe(1) for name in vlist: try: vdict[name] = eval(name, cf.f_globals, cf.f_locals) except: print ('Could not get variable %s from %s' % (name,cf.f_code.co_name)) else: raise ValueError('variables must be a dict/str/list/tuple') # Propagate variables to user namespace self.user_ns.update(vdict) # And configure interactive visibility user_ns_hidden = self.user_ns_hidden if interactive: user_ns_hidden.difference_update(vdict) else: user_ns_hidden.update(vdict)
python
{ "resource": "" }
q279406
InteractiveShell._ofind
test
def _ofind(self, oname, namespaces=None): """Find an object in the available namespaces. self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic Has special code to detect magic functions. """ oname = oname.strip() #print '1- oname: <%r>' % oname # dbg if not oname.startswith(ESC_MAGIC) and \ not oname.startswith(ESC_MAGIC2) and \ not py3compat.isidentifier(oname, dotted=True): return dict(found=False) alias_ns = None if namespaces is None: # Namespaces to search in: # Put them in a list. The order is important so that we # find things in the same order that Python finds them. namespaces = [ ('Interactive', self.user_ns), ('Interactive (global)', self.user_global_ns), ('Python builtin', builtin_mod.__dict__), ('Alias', self.alias_manager.alias_table), ] alias_ns = self.alias_manager.alias_table # initialize results to 'null' found = False; obj = None; ospace = None; ds = None; ismagic = False; isalias = False; parent = None # We need to special-case 'print', which as of python2.6 registers as a # function but should only be treated as one if print_function was # loaded with a future import. In this case, just bail. if (oname == 'print' and not py3compat.PY3 and not \ (self.compile.compiler_flags & __future__.CO_FUTURE_PRINT_FUNCTION)): return {'found':found, 'obj':obj, 'namespace':ospace, 'ismagic':ismagic, 'isalias':isalias, 'parent':parent} # Look for the given name by splitting it in parts. If the head is # found, then we look for all the remaining parts as members, and only # declare success if we can find them all. oname_parts = oname.split('.') oname_head, oname_rest = oname_parts[0],oname_parts[1:] for nsname,ns in namespaces: try: obj = ns[oname_head] except KeyError: continue else: #print 'oname_rest:', oname_rest # dbg for part in oname_rest: try: parent = obj obj = getattr(obj,part) except: # Blanket except b/c some badly implemented objects # allow __getattr__ to raise exceptions other than # AttributeError, which then crashes IPython. break else: # If we finish the for loop (no break), we got all members found = True ospace = nsname if ns == alias_ns: isalias = True break # namespace loop # Try to see if it's magic if not found: obj = None if oname.startswith(ESC_MAGIC2): oname = oname.lstrip(ESC_MAGIC2) obj = self.find_cell_magic(oname) elif oname.startswith(ESC_MAGIC): oname = oname.lstrip(ESC_MAGIC) obj = self.find_line_magic(oname) else: # search without prefix, so run? will find %run? obj = self.find_line_magic(oname) if obj is None: obj = self.find_cell_magic(oname) if obj is not None: found = True ospace = 'IPython internal' ismagic = True # Last try: special-case some literals like '', [], {}, etc: if not found and oname_head in ["''",'""','[]','{}','()']: obj = eval(oname_head) found = True ospace = 'Interactive' return {'found':found, 'obj':obj, 'namespace':ospace, 'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
python
{ "resource": "" }
q279407
InteractiveShell._ofind_property
test
def _ofind_property(self, oname, info): """Second part of object finding, to look for property details.""" if info.found: # Get the docstring of the class property if it exists. path = oname.split('.') root = '.'.join(path[:-1]) if info.parent is not None: try: target = getattr(info.parent, '__class__') # The object belongs to a class instance. try: target = getattr(target, path[-1]) # The class defines the object. if isinstance(target, property): oname = root + '.__class__.' + path[-1] info = Struct(self._ofind(oname)) except AttributeError: pass except AttributeError: pass # We return either the new info or the unmodified input if the object # hadn't been found return info
python
{ "resource": "" }
q279408
InteractiveShell._object_find
test
def _object_find(self, oname, namespaces=None): """Find an object and return a struct with info about it.""" inf = Struct(self._ofind(oname, namespaces)) return Struct(self._ofind_property(oname, inf))
python
{ "resource": "" }
q279409
InteractiveShell._inspect
test
def _inspect(self, meth, oname, namespaces=None, **kw): """Generic interface to the inspector system. This function is meant to be called by pdef, pdoc & friends.""" info = self._object_find(oname, namespaces) if info.found: pmethod = getattr(self.inspector, meth) formatter = format_screen if info.ismagic else None if meth == 'pdoc': pmethod(info.obj, oname, formatter) elif meth == 'pinfo': pmethod(info.obj, oname, formatter, info, **kw) else: pmethod(info.obj, oname) else: print 'Object `%s` not found.' % oname return 'not found'
python
{ "resource": "" }
q279410
InteractiveShell.init_history
test
def init_history(self): """Sets up the command history, and starts regular autosaves.""" self.history_manager = HistoryManager(shell=self, config=self.config) self.configurables.append(self.history_manager)
python
{ "resource": "" }
q279411
InteractiveShell.excepthook
test
def excepthook(self, etype, value, tb): """One more defense for GUI apps that call sys.excepthook. GUI frameworks like wxPython trap exceptions and call sys.excepthook themselves. I guess this is a feature that enables them to keep running after exceptions that would otherwise kill their mainloop. This is a bother for IPython which excepts to catch all of the program exceptions with a try: except: statement. Normally, IPython sets sys.excepthook to a CrashHandler instance, so if any app directly invokes sys.excepthook, it will look to the user like IPython crashed. In order to work around this, we can disable the CrashHandler and replace it with this excepthook instead, which prints a regular traceback using our InteractiveTB. In this fashion, apps which call sys.excepthook will generate a regular-looking exception from IPython, and the CrashHandler will only be triggered by real IPython crashes. This hook should be used sparingly, only in places which are not likely to be true IPython errors. """ self.showtraceback((etype,value,tb),tb_offset=0)
python
{ "resource": "" }
q279412
InteractiveShell.showtraceback
test
def showtraceback(self,exc_tuple = None,filename=None,tb_offset=None, exception_only=False): """Display the exception that just occurred. If nothing is known about the exception, this is the method which should be used throughout the code for presenting user tracebacks, rather than directly invoking the InteractiveTB object. A specific showsyntaxerror() also exists, but this method can take care of calling it if needed, so unless you are explicitly catching a SyntaxError exception, don't try to analyze the stack manually and simply call this method.""" try: try: etype, value, tb = self._get_exc_info(exc_tuple) except ValueError: self.write_err('No traceback available to show.\n') return if etype is SyntaxError: # Though this won't be called by syntax errors in the input # line, there may be SyntaxError cases with imported code. self.showsyntaxerror(filename) elif etype is UsageError: self.write_err("UsageError: %s" % value) else: if exception_only: stb = ['An exception has occurred, use %tb to see ' 'the full traceback.\n'] stb.extend(self.InteractiveTB.get_exception_only(etype, value)) else: try: # Exception classes can customise their traceback - we # use this in IPython.parallel for exceptions occurring # in the engines. This should return a list of strings. stb = value._render_traceback_() except Exception: stb = self.InteractiveTB.structured_traceback(etype, value, tb, tb_offset=tb_offset) self._showtraceback(etype, value, stb) if self.call_pdb: # drop into debugger self.debugger(force=True) return # Actually show the traceback self._showtraceback(etype, value, stb) except KeyboardInterrupt: self.write_err("\nKeyboardInterrupt\n")
python
{ "resource": "" }
q279413
InteractiveShell._showtraceback
test
def _showtraceback(self, etype, evalue, stb): """Actually show a traceback. Subclasses may override this method to put the traceback on a different place, like a side channel. """ print >> io.stdout, self.InteractiveTB.stb2text(stb)
python
{ "resource": "" }
q279414
InteractiveShell.showsyntaxerror
test
def showsyntaxerror(self, filename=None): """Display the syntax error that just occurred. This doesn't display a stack trace because there isn't one. If a filename is given, it is stuffed in the exception instead of what was there before (because Python's parser always uses "<string>" when reading from a string). """ etype, value, last_traceback = self._get_exc_info() if filename and etype is SyntaxError: try: value.filename = filename except: # Not the format we expect; leave it alone pass stb = self.SyntaxTB.structured_traceback(etype, value, []) self._showtraceback(etype, value, stb)
python
{ "resource": "" }
q279415
InteractiveShell.pre_readline
test
def pre_readline(self): """readline hook to be used at the start of each line. Currently it handles auto-indent only.""" if self.rl_do_indent: self.readline.insert_text(self._indent_current_str()) if self.rl_next_input is not None: self.readline.insert_text(self.rl_next_input) self.rl_next_input = None
python
{ "resource": "" }
q279416
InteractiveShell.complete
test
def complete(self, text, line=None, cursor_pos=None): """Return the completed text and a list of completions. Parameters ---------- text : string A string of text to be completed on. It can be given as empty and instead a line/position pair are given. In this case, the completer itself will split the line like readline does. line : string, optional The complete line that text is part of. cursor_pos : int, optional The position of the cursor on the input line. Returns ------- text : string The actual text that was completed. matches : list A sorted list with all possible completions. The optional arguments allow the completion to take more context into account, and are part of the low-level completion API. This is a wrapper around the completion mechanism, similar to what readline does at the command line when the TAB key is hit. By exposing it as a method, it can be used by other non-readline environments (such as GUIs) for text completion. Simple usage example: In [1]: x = 'hello' In [2]: _ip.complete('x.l') Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip']) """ # Inject names into __builtin__ so we can complete on the added names. with self.builtin_trap: return self.Completer.complete(text, line, cursor_pos)
python
{ "resource": "" }
q279417
InteractiveShell.set_custom_completer
test
def set_custom_completer(self, completer, pos=0): """Adds a new custom completer function. The position argument (defaults to 0) is the index in the completers list where you want the completer to be inserted.""" newcomp = types.MethodType(completer,self.Completer) self.Completer.matchers.insert(pos,newcomp)
python
{ "resource": "" }
q279418
InteractiveShell.set_completer_frame
test
def set_completer_frame(self, frame=None): """Set the frame of the completer.""" if frame: self.Completer.namespace = frame.f_locals self.Completer.global_namespace = frame.f_globals else: self.Completer.namespace = self.user_ns self.Completer.global_namespace = self.user_global_ns
python
{ "resource": "" }
q279419
InteractiveShell.run_line_magic
test
def run_line_magic(self, magic_name, line): """Execute the given line magic. Parameters ---------- magic_name : str Name of the desired magic function, without '%' prefix. line : str The rest of the input line as a single string. """ fn = self.find_line_magic(magic_name) if fn is None: cm = self.find_cell_magic(magic_name) etpl = "Line magic function `%%%s` not found%s." extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, ' 'did you mean that instead?)' % magic_name ) error(etpl % (magic_name, extra)) else: # Note: this is the distance in the stack to the user's frame. # This will need to be updated if the internal calling logic gets # refactored, or else we'll be expanding the wrong variables. stack_depth = 2 magic_arg_s = self.var_expand(line, stack_depth) # Put magic args in a list so we can call with f(*a) syntax args = [magic_arg_s] # Grab local namespace if we need it: if getattr(fn, "needs_local_scope", False): args.append(sys._getframe(stack_depth).f_locals) with self.builtin_trap: result = fn(*args) return result
python
{ "resource": "" }
q279420
InteractiveShell.find_magic
test
def find_magic(self, magic_name, magic_kind='line'): """Find and return a magic of the given type by name. Returns None if the magic isn't found.""" return self.magics_manager.magics[magic_kind].get(magic_name)
python
{ "resource": "" }
q279421
InteractiveShell.define_macro
test
def define_macro(self, name, themacro): """Define a new macro Parameters ---------- name : str The name of the macro. themacro : str or Macro The action to do upon invoking the macro. If a string, a new Macro object is created by passing the string to it. """ from IPython.core import macro if isinstance(themacro, basestring): themacro = macro.Macro(themacro) if not isinstance(themacro, macro.Macro): raise ValueError('A macro must be a string or a Macro instance.') self.user_ns[name] = themacro
python
{ "resource": "" }
q279422
InteractiveShell.system_raw
test
def system_raw(self, cmd): """Call the given cmd in a subprocess using os.system Parameters ---------- cmd : str Command to execute. """ cmd = self.var_expand(cmd, depth=1) # protect os.system from UNC paths on Windows, which it can't handle: if sys.platform == 'win32': from IPython.utils._process_win32 import AvoidUNCPath with AvoidUNCPath() as path: if path is not None: cmd = '"pushd %s &&"%s' % (path, cmd) cmd = py3compat.unicode_to_str(cmd) ec = os.system(cmd) else: cmd = py3compat.unicode_to_str(cmd) ec = os.system(cmd) # We explicitly do NOT return the subprocess status code, because # a non-None value would trigger :func:`sys.displayhook` calls. # Instead, we store the exit_code in user_ns. self.user_ns['_exit_code'] = ec
python
{ "resource": "" }
q279423
InteractiveShell.auto_rewrite_input
test
def auto_rewrite_input(self, cmd): """Print to the screen the rewritten form of the user's command. This shows visual feedback by rewriting input lines that cause automatic calling to kick in, like:: /f x into:: ------> f(x) after the user's input prompt. This helps the user understand that the input line was transformed automatically by IPython. """ if not self.show_rewritten_input: return rw = self.prompt_manager.render('rewrite') + cmd try: # plain ascii works better w/ pyreadline, on some machines, so # we use it and only print uncolored rewrite if we have unicode rw = str(rw) print >> io.stdout, rw except UnicodeEncodeError: print "------> " + cmd
python
{ "resource": "" }
q279424
InteractiveShell.user_variables
test
def user_variables(self, names): """Get a list of variable names from the user's namespace. Parameters ---------- names : list of strings A list of names of variables to be read from the user namespace. Returns ------- A dict, keyed by the input names and with the repr() of each value. """ out = {} user_ns = self.user_ns for varname in names: try: value = repr(user_ns[varname]) except: value = self._simple_error() out[varname] = value return out
python
{ "resource": "" }
q279425
InteractiveShell.user_expressions
test
def user_expressions(self, expressions): """Evaluate a dict of expressions in the user's namespace. Parameters ---------- expressions : dict A dict with string keys and string values. The expression values should be valid Python expressions, each of which will be evaluated in the user namespace. Returns ------- A dict, keyed like the input expressions dict, with the repr() of each value. """ out = {} user_ns = self.user_ns global_ns = self.user_global_ns for key, expr in expressions.iteritems(): try: value = repr(eval(expr, global_ns, user_ns)) except: value = self._simple_error() out[key] = value return out
python
{ "resource": "" }
q279426
InteractiveShell.ev
test
def ev(self, expr): """Evaluate python expression expr in user namespace. Returns the result of evaluation """ with self.builtin_trap: return eval(expr, self.user_global_ns, self.user_ns)
python
{ "resource": "" }
q279427
InteractiveShell.safe_execfile_ipy
test
def safe_execfile_ipy(self, fname): """Like safe_execfile, but for .ipy files with IPython syntax. Parameters ---------- fname : str The name of the file to execute. The filename must have a .ipy extension. """ fname = os.path.abspath(os.path.expanduser(fname)) # Make sure we can open the file try: with open(fname) as thefile: pass except: warn('Could not open file <%s> for safe execution.' % fname) return # Find things also in current directory. This is needed to mimic the # behavior of running a script from the system command line, where # Python inserts the script's directory into sys.path dname = os.path.dirname(fname) with prepended_to_syspath(dname): try: with open(fname) as thefile: # self.run_cell currently captures all exceptions # raised in user code. It would be nice if there were # versions of runlines, execfile that did raise, so # we could catch the errors. self.run_cell(thefile.read(), store_history=False) except: self.showtraceback() warn('Unknown failure executing file: <%s>' % fname)
python
{ "resource": "" }
q279428
InteractiveShell._run_cached_cell_magic
test
def _run_cached_cell_magic(self, magic_name, line): """Special method to call a cell magic with the data stored in self. """ cell = self._current_cell_magic_body self._current_cell_magic_body = None return self.run_cell_magic(magic_name, line, cell)
python
{ "resource": "" }
q279429
InteractiveShell.run_cell
test
def run_cell(self, raw_cell, store_history=False, silent=False): """Run a complete IPython cell. Parameters ---------- raw_cell : str The code (including IPython code such as %magic functions) to run. store_history : bool If True, the raw and translated cell will be stored in IPython's history. For user code calling back into IPython's machinery, this should be set to False. silent : bool If True, avoid side-effets, such as implicit displayhooks, history, and logging. silent=True forces store_history=False. """ if (not raw_cell) or raw_cell.isspace(): return if silent: store_history = False self.input_splitter.push(raw_cell) # Check for cell magics, which leave state behind. This interface is # ugly, we need to do something cleaner later... Now the logic is # simply that the input_splitter remembers if there was a cell magic, # and in that case we grab the cell body. if self.input_splitter.cell_magic_parts: self._current_cell_magic_body = \ ''.join(self.input_splitter.cell_magic_parts) cell = self.input_splitter.source_reset() with self.builtin_trap: prefilter_failed = False if len(cell.splitlines()) == 1: try: # use prefilter_lines to handle trailing newlines # restore trailing newline for ast.parse cell = self.prefilter_manager.prefilter_lines(cell) + '\n' except AliasError as e: error(e) prefilter_failed = True except Exception: # don't allow prefilter errors to crash IPython self.showtraceback() prefilter_failed = True # Store raw and processed history if store_history: self.history_manager.store_inputs(self.execution_count, cell, raw_cell) if not silent: self.logger.log(cell, raw_cell) if not prefilter_failed: # don't run if prefilter failed cell_name = self.compile.cache(cell, self.execution_count) with self.display_trap: try: code_ast = self.compile.ast_parse(cell, filename=cell_name) except IndentationError: self.showindentationerror() if store_history: self.execution_count += 1 return None except (OverflowError, SyntaxError, ValueError, TypeError, MemoryError): self.showsyntaxerror() if store_history: self.execution_count += 1 return None interactivity = "none" if silent else self.ast_node_interactivity self.run_ast_nodes(code_ast.body, cell_name, interactivity=interactivity) # Execute any registered post-execution functions. # unless we are silent post_exec = [] if silent else self._post_execute.iteritems() for func, status in post_exec: if self.disable_failing_post_execute and not status: continue try: func() except KeyboardInterrupt: print >> io.stderr, "\nKeyboardInterrupt" except Exception: # register as failing: self._post_execute[func] = False self.showtraceback() print >> io.stderr, '\n'.join([ "post-execution function %r produced an error." % func, "If this problem persists, you can disable failing post-exec functions with:", "", " get_ipython().disable_failing_post_execute = True" ]) if store_history: # Write output to the database. Does nothing unless # history output logging is enabled. self.history_manager.store_output(self.execution_count) # Each cell is a *single* input, regardless of how many lines it has self.execution_count += 1
python
{ "resource": "" }
q279430
InteractiveShell.run_ast_nodes
test
def run_ast_nodes(self, nodelist, cell_name, interactivity='last_expr'): """Run a sequence of AST nodes. The execution mode depends on the interactivity parameter. Parameters ---------- nodelist : list A sequence of AST nodes to run. cell_name : str Will be passed to the compiler as the filename of the cell. Typically the value returned by ip.compile.cache(cell). interactivity : str 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run interactively (displaying output from expressions). 'last_expr' will run the last node interactively only if it is an expression (i.e. expressions in loops or other blocks are not displayed. Other values for this parameter will raise a ValueError. """ if not nodelist: return if interactivity == 'last_expr': if isinstance(nodelist[-1], ast.Expr): interactivity = "last" else: interactivity = "none" if interactivity == 'none': to_run_exec, to_run_interactive = nodelist, [] elif interactivity == 'last': to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:] elif interactivity == 'all': to_run_exec, to_run_interactive = [], nodelist else: raise ValueError("Interactivity was %r" % interactivity) exec_count = self.execution_count try: for i, node in enumerate(to_run_exec): mod = ast.Module([node]) code = self.compile(mod, cell_name, "exec") if self.run_code(code): return True for i, node in enumerate(to_run_interactive): mod = ast.Interactive([node]) code = self.compile(mod, cell_name, "single") if self.run_code(code): return True # Flush softspace if softspace(sys.stdout, 0): print except: # It's possible to have exceptions raised here, typically by # compilation of odd code (such as a naked 'return' outside a # function) that did parse but isn't valid. Typically the exception # is a SyntaxError, but it's safest just to catch anything and show # the user a traceback. # We do only one try/except outside the loop to minimize the impact # on runtime, and also because if any node in the node list is # broken, we should stop execution completely. self.showtraceback() return False
python
{ "resource": "" }
q279431
InteractiveShell.enable_pylab
test
def enable_pylab(self, gui=None, import_all=True): """Activate pylab support at runtime. This turns on support for matplotlib, preloads into the interactive namespace all of numpy and pylab, and configures IPython to correctly interact with the GUI event loop. The GUI backend to be used can be optionally selected with the optional :param:`gui` argument. Parameters ---------- gui : optional, string If given, dictates the choice of matplotlib GUI backend to use (should be one of IPython's supported backends, 'qt', 'osx', 'tk', 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by matplotlib (as dictated by the matplotlib build-time options plus the user's matplotlibrc configuration file). Note that not all backends make sense in all contexts, for example a terminal ipython can't display figures inline. """ from IPython.core.pylabtools import mpl_runner # We want to prevent the loading of pylab to pollute the user's # namespace as shown by the %who* magics, so we execute the activation # code in an empty namespace, and we update *both* user_ns and # user_ns_hidden with this information. ns = {} try: gui = pylab_activate(ns, gui, import_all, self) except KeyError: error("Backend %r not supported" % gui) return self.user_ns.update(ns) self.user_ns_hidden.update(ns) # Now we must activate the gui pylab wants to use, and fix %run to take # plot updates into account self.enable_gui(gui) self.magics_manager.registry['ExecutionMagics'].default_runner = \ mpl_runner(self.safe_execfile)
python
{ "resource": "" }
q279432
InteractiveShell.var_expand
test
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()): """Expand python variables in a string. The depth argument indicates how many frames above the caller should be walked to look for the local namespace where to expand variables. The global namespace for expansion is always the user's interactive namespace. """ ns = self.user_ns.copy() ns.update(sys._getframe(depth+1).f_locals) ns.pop('self', None) try: cmd = formatter.format(cmd, **ns) except Exception: # if formatter couldn't format, just let it go untransformed pass return cmd
python
{ "resource": "" }
q279433
InteractiveShell.mktempfile
test
def mktempfile(self, data=None, prefix='ipython_edit_'): """Make a new tempfile and return its filename. This makes a call to tempfile.mktemp, but it registers the created filename internally so ipython cleans it up at exit time. Optional inputs: - data(None): if data is given, it gets written out to the temp file immediately, and the file is closed again.""" filename = tempfile.mktemp('.py', prefix) self.tempfiles.append(filename) if data: tmp_file = open(filename,'w') tmp_file.write(data) tmp_file.close() return filename
python
{ "resource": "" }
q279434
InteractiveShell.extract_input_lines
test
def extract_input_lines(self, range_str, raw=False): """Return as a string a set of input history slices. Parameters ---------- range_str : string The set of slices is given as a string, like "~5/6-~4/2 4:8 9", since this function is for use by magic functions which get their arguments as strings. The number before the / is the session number: ~n goes n back from the current session. Optional Parameters: - raw(False): by default, the processed input is used. If this is true, the raw input history is used instead. Note that slices can be called with two notations: N:M -> standard python form, means including items N...(M-1). N-M -> include items N..M (closed endpoint).""" lines = self.history_manager.get_range_by_str(range_str, raw=raw) return "\n".join(x for _, _, x in lines)
python
{ "resource": "" }
q279435
InteractiveShell.find_user_code
test
def find_user_code(self, target, raw=True, py_only=False): """Get a code string from history, file, url, or a string or macro. This is mainly used by magic functions. Parameters ---------- target : str A string specifying code to retrieve. This will be tried respectively as: ranges of input history (see %history for syntax), url, correspnding .py file, filename, or an expression evaluating to a string or Macro in the user namespace. raw : bool If true (default), retrieve raw history. Has no effect on the other retrieval mechanisms. py_only : bool (default False) Only try to fetch python code, do not try alternative methods to decode file if unicode fails. Returns ------- A string of code. ValueError is raised if nothing is found, and TypeError if it evaluates to an object of another type. In each case, .args[0] is a printable message. """ code = self.extract_input_lines(target, raw=raw) # Grab history if code: return code utarget = unquote_filename(target) try: if utarget.startswith(('http://', 'https://')): return openpy.read_py_url(utarget, skip_encoding_cookie=True) except UnicodeDecodeError: if not py_only : response = urllib.urlopen(target) return response.read().decode('latin1') raise ValueError(("'%s' seem to be unreadable.") % utarget) potential_target = [target] try : potential_target.insert(0,get_py_filename(target)) except IOError: pass for tgt in potential_target : if os.path.isfile(tgt): # Read file try : return openpy.read_py_file(tgt, skip_encoding_cookie=True) except UnicodeDecodeError : if not py_only : with io_open(tgt,'r', encoding='latin1') as f : return f.read() raise ValueError(("'%s' seem to be unreadable.") % target) try: # User namespace codeobj = eval(target, self.user_ns) except Exception: raise ValueError(("'%s' was not found in history, as a file, url, " "nor in the user namespace.") % target) if isinstance(codeobj, basestring): return codeobj elif isinstance(codeobj, Macro): return codeobj.value raise TypeError("%s is neither a string nor a macro." % target, codeobj)
python
{ "resource": "" }
q279436
InteractiveShell.atexit_operations
test
def atexit_operations(self): """This will be executed at the time of exit. Cleanup operations and saving of persistent data that is done unconditionally by IPython should be performed here. For things that may depend on startup flags or platform specifics (such as having readline or not), register a separate atexit function in the code that has the appropriate information, rather than trying to clutter """ # Close the history session (this stores the end time and line count) # this must be *before* the tempfile cleanup, in case of temporary # history db self.history_manager.end_session() # Cleanup all tempfiles left around for tfile in self.tempfiles: try: os.unlink(tfile) except OSError: pass # Clear all user namespaces to release all references cleanly. self.reset(new_session=False) # Run user hooks self.hooks.shutdown_hook()
python
{ "resource": "" }
q279437
broadcast
test
def broadcast(client, sender, msg_name, dest_name=None, block=None): """broadcast a message from one engine to all others.""" dest_name = msg_name if dest_name is None else dest_name client[sender].execute('com.publish(%s)'%msg_name, block=None) targets = client.ids targets.remove(sender) return client[targets].execute('%s=com.consume()'%dest_name, block=None)
python
{ "resource": "" }
q279438
send
test
def send(client, sender, targets, msg_name, dest_name=None, block=None): """send a message from one to one-or-more engines.""" dest_name = msg_name if dest_name is None else dest_name def _send(targets, m_name): msg = globals()[m_name] return com.send(targets, msg) client[sender].apply_async(_send, targets, msg_name) return client[targets].execute('%s=com.recv()'%dest_name, block=None)
python
{ "resource": "" }
q279439
skipif
test
def skipif(skip_condition, msg=None): """ Make function raise SkipTest exception if a given condition is true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly imports, to delay the cost until the test suite is actually executed. Parameters ---------- skip_condition : bool or callable Flag to determine whether to skip the decorated test. msg : str, optional Message to give on raising a SkipTest exception. Default is None. Returns ------- decorator : function Decorator which, when applied to a function, causes SkipTest to be raised when `skip_condition` is True, and the function to be called normally otherwise. Notes ----- The decorator itself is decorated with the ``nose.tools.make_decorator`` function in order to transmit function name, and various other metadata. """ def skip_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose # Allow for both boolean or callable skip conditions. if callable(skip_condition): skip_val = lambda : skip_condition() else: skip_val = lambda : skip_condition def get_msg(func,msg=None): """Skip message with information about function being skipped.""" if msg is None: out = 'Test skipped due to test condition' else: out = '\n'+msg return "Skipping test: %s%s" % (func.__name__,out) # We need to define *two* skippers because Python doesn't allow both # return with value and yield inside the same function. def skipper_func(*args, **kwargs): """Skipper for normal test functions.""" if skip_val(): raise nose.SkipTest(get_msg(f,msg)) else: return f(*args, **kwargs) def skipper_gen(*args, **kwargs): """Skipper for test generators.""" if skip_val(): raise nose.SkipTest(get_msg(f,msg)) else: for x in f(*args, **kwargs): yield x # Choose the right skipper to use when building the actual decorator. if nose.util.isgenerator(f): skipper = skipper_gen else: skipper = skipper_func return nose.tools.make_decorator(f)(skipper) return skip_decorator
python
{ "resource": "" }
q279440
knownfailureif
test
def knownfailureif(fail_condition, msg=None): """ Make function raise KnownFailureTest exception if given condition is true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly imports, to delay the cost until the test suite is actually executed. Parameters ---------- fail_condition : bool or callable Flag to determine whether to mark the decorated test as a known failure (if True) or not (if False). msg : str, optional Message to give on raising a KnownFailureTest exception. Default is None. Returns ------- decorator : function Decorator, which, when applied to a function, causes SkipTest to be raised when `skip_condition` is True, and the function to be called normally otherwise. Notes ----- The decorator itself is decorated with the ``nose.tools.make_decorator`` function in order to transmit function name, and various other metadata. """ if msg is None: msg = 'Test skipped due to known failure' # Allow for both boolean or callable known failure conditions. if callable(fail_condition): fail_val = lambda : fail_condition() else: fail_val = lambda : fail_condition def knownfail_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose def knownfailer(*args, **kwargs): if fail_val(): raise KnownFailureTest, msg else: return f(*args, **kwargs) return nose.tools.make_decorator(f)(knownfailer) return knownfail_decorator
python
{ "resource": "" }
q279441
deprecated
test
def deprecated(conditional=True): """ Filter deprecation warnings while running the test suite. This decorator can be used to filter DeprecationWarning's, to avoid printing them during the test suite run, while checking that the test actually raises a DeprecationWarning. Parameters ---------- conditional : bool or callable, optional Flag to determine whether to mark test as deprecated or not. If the condition is a callable, it is used at runtime to dynamically make the decision. Default is True. Returns ------- decorator : function The `deprecated` decorator itself. Notes ----- .. versionadded:: 1.4.0 """ def deprecate_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose def _deprecated_imp(*args, **kwargs): # Poor man's replacement for the with statement ctx = WarningManager(record=True) l = ctx.__enter__() warnings.simplefilter('always') try: f(*args, **kwargs) if not len(l) > 0: raise AssertionError("No warning raised when calling %s" % f.__name__) if not l[0].category is DeprecationWarning: raise AssertionError("First warning for %s is not a " \ "DeprecationWarning( is %s)" % (f.__name__, l[0])) finally: ctx.__exit__() if callable(conditional): cond = conditional() else: cond = conditional if cond: return nose.tools.make_decorator(f)(_deprecated_imp) else: return f return deprecate_decorator
python
{ "resource": "" }
q279442
list_profiles_in
test
def list_profiles_in(path): """list profiles in a given root directory""" files = os.listdir(path) profiles = [] for f in files: full_path = os.path.join(path, f) if os.path.isdir(full_path) and f.startswith('profile_'): profiles.append(f.split('_',1)[-1]) return profiles
python
{ "resource": "" }
q279443
list_bundled_profiles
test
def list_bundled_profiles(): """list profiles that are bundled with IPython.""" path = os.path.join(get_ipython_package_dir(), u'config', u'profile') files = os.listdir(path) profiles = [] for profile in files: full_path = os.path.join(path, profile) if os.path.isdir(full_path) and profile != "__pycache__": profiles.append(profile) return profiles
python
{ "resource": "" }
q279444
WorkingSet.find
test
def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) else: return dist
python
{ "resource": "" }
q279445
run
test
def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None, logfile=None, cwd=None, env=None, encoding='utf-8'): """ This function runs the given command; waits for it to finish; then returns all output as a string. STDERR is included in output. If the full path to the command is not given then the path is searched. Note that lines are terminated by CR/LF (\\r\\n) combination even on UNIX-like systems because this is the standard for pseudo ttys. If you set 'withexitstatus' to true, then run will return a tuple of (command_output, exitstatus). If 'withexitstatus' is false then this returns just command_output. The run() function can often be used instead of creating a spawn instance. For example, the following code uses spawn:: from pexpect import * child = spawn('scp foo [email protected]:.') child.expect ('(?i)password') child.sendline (mypassword) The previous code can be replace with the following:: from pexpect import * run ('scp foo [email protected]:.', events={'(?i)password': mypassword}) Examples ======== Start the apache daemon on the local machine:: from pexpect import * run ("/usr/local/apache/bin/apachectl start") Check in a file using SVN:: from pexpect import * run ("svn ci -m 'automatic commit' my_file.py") Run a command and capture exit status:: from pexpect import * (command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1) Tricky Examples =============== The following will run SSH and execute 'ls -l' on the remote machine. The password 'secret' will be sent if the '(?i)password' pattern is ever seen:: run ("ssh [email protected] 'ls -l'", events={'(?i)password':'secret\\n'}) This will start mencoder to rip a video from DVD. This will also display progress ticks every 5 seconds as it runs. For example:: from pexpect import * def print_ticks(d): print d['event_count'], run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5) The 'events' argument should be a dictionary of patterns and responses. Whenever one of the patterns is seen in the command out run() will send the associated response string. Note that you should put newlines in your string if Enter is necessary. The responses may also contain callback functions. Any callback is function that takes a dictionary as an argument. The dictionary contains all the locals from the run() function, so you can access the child spawn object or any other variable defined in run() (event_count, child, and extra_args are the most useful). A callback may return True to stop the current run process otherwise run() continues until the next event. A callback may also return a string which will be sent to the child. 'extra_args' is not used by directly run(). It provides a way to pass data to a callback function through run() through the locals dictionary passed to a callback.""" if timeout == -1: child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env, encoding=encoding) else: child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile, cwd=cwd, env=env, encoding=encoding) if events is not None: patterns = events.keys() responses = events.values() else: patterns=None # We assume that EOF or TIMEOUT will save us. responses=None child_result_list = [] event_count = 0 while 1: try: index = child.expect (patterns) if isinstance(child.after, basestring): child_result_list.append(child.before + child.after) else: # child.after may have been a TIMEOUT or EOF, so don't cat those. child_result_list.append(child.before) if isinstance(responses[index], basestring): child.send(responses[index]) elif type(responses[index]) is types.FunctionType: callback_result = responses[index](locals()) sys.stdout.flush() if isinstance(callback_result, basestring): child.send(callback_result) elif callback_result: break else: raise TypeError ('The callback must be a string or function type.') event_count = event_count + 1 except TIMEOUT, e: child_result_list.append(child.before) break except EOF, e: child_result_list.append(child.before) break child_result = child._empty_buffer.join(child_result_list) if withexitstatus: child.close() return (child_result, child.exitstatus) else: return child_result
python
{ "resource": "" }
q279446
which
test
def which (filename): """This takes a given filename; tries to find it in the environment path; then checks if it is executable. This returns the full path to the filename if found and executable. Otherwise this returns None.""" # Special case where filename already contains a path. if os.path.dirname(filename) != '': if os.access (filename, os.X_OK): return filename if not os.environ.has_key('PATH') or os.environ['PATH'] == '': p = os.defpath else: p = os.environ['PATH'] pathlist = p.split(os.pathsep) for path in pathlist: f = os.path.join(path, filename) if os.access(f, os.X_OK): return f return None
python
{ "resource": "" }
q279447
spawnb.next
test
def next (self): # File-like object. """This is to support iterators over a file-like object. """ result = self.readline() if result == self._empty_buffer: raise StopIteration return result
python
{ "resource": "" }
q279448
spawnb.send
test
def send(self, s): """This sends a string to the child process. This returns the number of bytes written. If a log file was set then the data is also written to the log. """ time.sleep(self.delaybeforesend) s2 = self._cast_buffer_type(s) if self.logfile is not None: self.logfile.write(s2) self.logfile.flush() if self.logfile_send is not None: self.logfile_send.write(s2) self.logfile_send.flush() c = os.write (self.child_fd, _cast_bytes(s, self.encoding)) return c
python
{ "resource": "" }
q279449
spawnb.sendintr
test
def sendintr(self): """This sends a SIGINT to the child. It does not require the SIGINT to be the first character on a line. """ if hasattr(termios, 'VINTR'): char = termios.tcgetattr(self.child_fd)[6][termios.VINTR] else: # platform does not define VINTR so assume CTRL-C char = chr(3) self.send (char)
python
{ "resource": "" }
q279450
spawnb._prepare_regex_pattern
test
def _prepare_regex_pattern(self, p): "Recompile unicode regexes as bytes regexes. Overridden in subclass." if isinstance(p.pattern, unicode): p = re.compile(p.pattern.encode('utf-8'), p.flags &~ re.UNICODE) return p
python
{ "resource": "" }
q279451
spawnb.expect
test
def expect(self, pattern, timeout = -1, searchwindowsize=-1): """This seeks through the stream until a pattern is matched. The pattern is overloaded and may take several types. The pattern can be a StringType, EOF, a compiled re, or a list of any of those types. Strings will be compiled to re types. This returns the index into the pattern list. If the pattern was not a list this returns index 0 on a successful match. This may raise exceptions for EOF or TIMEOUT. To avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern list. That will cause expect to match an EOF or TIMEOUT condition instead of raising an exception. If you pass a list of patterns and more than one matches, the first match in the stream is chosen. If more than one pattern matches at that point, the leftmost in the pattern list is chosen. For example:: # the input is 'foobar' index = p.expect (['bar', 'foo', 'foobar']) # returns 1 ('foo') even though 'foobar' is a "better" match Please note, however, that buffering can affect this behavior, since input arrives in unpredictable chunks. For example:: # the input is 'foobar' index = p.expect (['foobar', 'foo']) # returns 0 ('foobar') if all input is available at once, # but returs 1 ('foo') if parts of the final 'bar' arrive late After a match is found the instance attributes 'before', 'after' and 'match' will be set. You can see all the data read before the match in 'before'. You can see the data that was matched in 'after'. The re.MatchObject used in the re match will be in 'match'. If an error occurred then 'before' will be set to all the data read so far and 'after' and 'match' will be None. If timeout is -1 then timeout will be set to the self.timeout value. A list entry may be EOF or TIMEOUT instead of a string. This will catch these exceptions and return the index of the list entry instead of raising the exception. The attribute 'after' will be set to the exception type. The attribute 'match' will be None. This allows you to write code like this:: index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT]) if index == 0: do_something() elif index == 1: do_something_else() elif index == 2: do_some_other_thing() elif index == 3: do_something_completely_different() instead of code like this:: try: index = p.expect (['good', 'bad']) if index == 0: do_something() elif index == 1: do_something_else() except EOF: do_some_other_thing() except TIMEOUT: do_something_completely_different() These two forms are equivalent. It all depends on what you want. You can also just expect the EOF if you are waiting for all output of a child to finish. For example:: p = pexpect.spawn('/bin/ls') p.expect (pexpect.EOF) print p.before If you are trying to optimize for speed then see expect_list(). """ compiled_pattern_list = self.compile_pattern_list(pattern) return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
python
{ "resource": "" }
q279452
spawnb.expect_loop
test
def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1): """This is the common loop used inside expect. The 'searcher' should be an instance of searcher_re or searcher_string, which describes how and what to search for in the input. See expect() for other arguments, return value and exceptions. """ self.searcher = searcher if timeout == -1: timeout = self.timeout if timeout is not None: end_time = time.time() + timeout if searchwindowsize == -1: searchwindowsize = self.searchwindowsize try: incoming = self.buffer freshlen = len(incoming) while True: # Keep reading until exception or return. index = searcher.search(incoming, freshlen, searchwindowsize) if index >= 0: self.buffer = incoming[searcher.end : ] self.before = incoming[ : searcher.start] self.after = incoming[searcher.start : searcher.end] self.match = searcher.match self.match_index = index return self.match_index # No match at this point if timeout is not None and timeout < 0: raise TIMEOUT ('Timeout exceeded in expect_any().') # Still have time left, so read more data c = self.read_nonblocking (self.maxread, timeout) freshlen = len(c) time.sleep (0.0001) incoming = incoming + c if timeout is not None: timeout = end_time - time.time() except EOF, e: self.buffer = self._empty_buffer self.before = incoming self.after = EOF index = searcher.eof_index if index >= 0: self.match = EOF self.match_index = index return self.match_index else: self.match = None self.match_index = None raise EOF (str(e) + '\n' + str(self)) except TIMEOUT, e: self.buffer = incoming self.before = incoming self.after = TIMEOUT index = searcher.timeout_index if index >= 0: self.match = TIMEOUT self.match_index = index return self.match_index else: self.match = None self.match_index = None raise TIMEOUT (str(e) + '\n' + str(self)) except: self.before = incoming self.after = None self.match = None self.match_index = None raise
python
{ "resource": "" }
q279453
spawn._prepare_regex_pattern
test
def _prepare_regex_pattern(self, p): "Recompile bytes regexes as unicode regexes." if isinstance(p.pattern, bytes): p = re.compile(p.pattern.decode(self.encoding), p.flags) return p
python
{ "resource": "" }
q279454
searcher_string.search
test
def search(self, buffer, freshlen, searchwindowsize=None): """This searches 'buffer' for the first occurence of one of the search strings. 'freshlen' must indicate the number of bytes at the end of 'buffer' which have not been searched before. It helps to avoid searching the same, possibly big, buffer over and over again. See class spawn for the 'searchwindowsize' argument. If there is a match this returns the index of that string, and sets 'start', 'end' and 'match'. Otherwise, this returns -1. """ absurd_match = len(buffer) first_match = absurd_match # 'freshlen' helps a lot here. Further optimizations could # possibly include: # # using something like the Boyer-Moore Fast String Searching # Algorithm; pre-compiling the search through a list of # strings into something that can scan the input once to # search for all N strings; realize that if we search for # ['bar', 'baz'] and the input is '...foo' we need not bother # rescanning until we've read three more bytes. # # Sadly, I don't know enough about this interesting topic. /grahn for index, s in self._strings: if searchwindowsize is None: # the match, if any, can only be in the fresh data, # or at the very end of the old data offset = -(freshlen+len(s)) else: # better obey searchwindowsize offset = -searchwindowsize n = buffer.find(s, offset) if n >= 0 and n < first_match: first_match = n best_index, best_match = index, s if first_match == absurd_match: return -1 self.match = best_match self.start = first_match self.end = self.start + len(self.match) return best_index
python
{ "resource": "" }
q279455
searcher_re.search
test
def search(self, buffer, freshlen, searchwindowsize=None): """This searches 'buffer' for the first occurence of one of the regular expressions. 'freshlen' must indicate the number of bytes at the end of 'buffer' which have not been searched before. See class spawn for the 'searchwindowsize' argument. If there is a match this returns the index of that string, and sets 'start', 'end' and 'match'. Otherwise, returns -1.""" absurd_match = len(buffer) first_match = absurd_match # 'freshlen' doesn't help here -- we cannot predict the # length of a match, and the re module provides no help. if searchwindowsize is None: searchstart = 0 else: searchstart = max(0, len(buffer)-searchwindowsize) for index, s in self._searches: match = s.search(buffer, searchstart) if match is None: continue n = match.start() if n < first_match: first_match = n the_match = match best_index = index if first_match == absurd_match: return -1 self.start = first_match self.match = the_match self.end = self.match.end() return best_index
python
{ "resource": "" }
q279456
log_listener
test
def log_listener(log:logging.Logger=None, level=logging.INFO): """Progress Monitor listener that logs all updates to the given logger""" if log is None: log = logging.getLogger("ProgressMonitor") def listen(monitor): name = "{}: ".format(monitor.name) if monitor.name is not None else "" perc = int(monitor.progress * 100) msg = "[{name}{perc:3d}%] {monitor.message}".format(**locals()) log.log(level, msg) return listen
python
{ "resource": "" }
q279457
unpack_directory
test
def unpack_directory(filename, extract_dir, progress_filter=default_filter): """"Unpack" a directory, using the same interface as for archives Raises ``UnrecognizedFormat`` if `filename` is not a directory """ if not os.path.isdir(filename): raise UnrecognizedFormat("%s is not a directory" % (filename,)) paths = {filename:('',extract_dir)} for base, dirs, files in os.walk(filename): src,dst = paths[base] for d in dirs: paths[os.path.join(base,d)] = src+d+'/', os.path.join(dst,d) for f in files: name = src+f target = os.path.join(dst,f) target = progress_filter(src+f, target) if not target: continue # skip non-files ensure_directory(target) f = os.path.join(base,f) shutil.copyfile(f, target) shutil.copystat(f, target)
python
{ "resource": "" }
q279458
Context.emit
test
def emit(self, msg, level=1, debug=False): """ Emit a message to the user. :param msg: The message to emit. If ``debug`` is ``True``, the message will be emitted to ``stderr`` only if the ``debug`` attribute is ``True``. If ``debug`` is ``False``, the message will be emitted to ``stdout`` under the control of the ``verbose`` attribute. :param level: Ignored if ``debug`` is ``True``. The message will only be emitted if the ``verbose`` attribute is greater than or equal to the value of this parameter. Defaults to 1. :param debug: If ``True``, marks the message as a debugging message. The message will only be emitted if the ``debug`` attribute is ``True``. """ # Is it a debug message? if debug: if not self.debug: # Debugging not enabled, don't emit the message return stream = sys.stderr else: # Not a debugging message; is verbose high enough? if self.verbose < level: return stream = sys.stdout # Emit the message print(msg, file=stream) stream.flush()
python
{ "resource": "" }
q279459
CommandSession.last_error
test
def last_error(self): """Get the output of the last command exevuted.""" if not len(self.log): raise RuntimeError('Nothing executed') try: errs = [l for l in self.log if l[1] != 0] return errs[-1][2] except IndexError: # odd case where there were no errors #TODO return 'no last error'
python
{ "resource": "" }
q279460
CommandSession.check_output
test
def check_output(self, cmd): """Wrapper for subprocess.check_output.""" ret, output = self._exec(cmd) if not ret == 0: raise CommandError(self) return output
python
{ "resource": "" }
q279461
Analysis.find_source
test
def find_source(self, filename): """Find the source for `filename`. Returns two values: the actual filename, and the source. The source returned depends on which of these cases holds: * The filename seems to be a non-source file: returns None * The filename is a source file, and actually exists: returns None. * The filename is a source file, and is in a zip file or egg: returns the source. * The filename is a source file, but couldn't be found: raises `NoSource`. """ source = None base, ext = os.path.splitext(filename) TRY_EXTS = { '.py': ['.py', '.pyw'], '.pyw': ['.pyw'], } try_exts = TRY_EXTS.get(ext) if not try_exts: return filename, None for try_ext in try_exts: try_filename = base + try_ext if os.path.exists(try_filename): return try_filename, None source = self.coverage.file_locator.get_zip_data(try_filename) if source: return try_filename, source raise NoSource("No source for code: '%s'" % filename)
python
{ "resource": "" }
q279462
Analysis.arcs_executed
test
def arcs_executed(self): """Returns a sorted list of the arcs actually executed in the code.""" executed = self.coverage.data.executed_arcs(self.filename) m2fl = self.parser.first_line executed = [(m2fl(l1), m2fl(l2)) for (l1,l2) in executed] return sorted(executed)
python
{ "resource": "" }
q279463
Analysis.arcs_missing
test
def arcs_missing(self): """Returns a sorted list of the arcs in the code not executed.""" possible = self.arc_possibilities() executed = self.arcs_executed() missing = [ p for p in possible if p not in executed and p[0] not in self.no_branch ] return sorted(missing)
python
{ "resource": "" }
q279464
Analysis.arcs_unpredicted
test
def arcs_unpredicted(self): """Returns a sorted list of the executed arcs missing from the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() # Exclude arcs here which connect a line to itself. They can occur # in executed data in some cases. This is where they can cause # trouble, and here is where it's the least burden to remove them. unpredicted = [ e for e in executed if e not in possible and e[0] != e[1] ] return sorted(unpredicted)
python
{ "resource": "" }
q279465
Analysis.branch_lines
test
def branch_lines(self): """Returns a list of line numbers that have more than one exit.""" exit_counts = self.parser.exit_counts() return [l1 for l1,count in iitems(exit_counts) if count > 1]
python
{ "resource": "" }
q279466
Analysis.total_branches
test
def total_branches(self): """How many total branches are there?""" exit_counts = self.parser.exit_counts() return sum([count for count in exit_counts.values() if count > 1])
python
{ "resource": "" }
q279467
Analysis.missing_branch_arcs
test
def missing_branch_arcs(self): """Return arcs that weren't executed from branch lines. Returns {l1:[l2a,l2b,...], ...} """ missing = self.arcs_missing() branch_lines = set(self.branch_lines()) mba = {} for l1, l2 in missing: if l1 in branch_lines: if l1 not in mba: mba[l1] = [] mba[l1].append(l2) return mba
python
{ "resource": "" }
q279468
Analysis.branch_stats
test
def branch_stats(self): """Get stats about branches. Returns a dict mapping line numbers to a tuple: (total_exits, taken_exits). """ exit_counts = self.parser.exit_counts() missing_arcs = self.missing_branch_arcs() stats = {} for lnum in self.branch_lines(): exits = exit_counts[lnum] try: missing = len(missing_arcs[lnum]) except KeyError: missing = 0 stats[lnum] = (exits, exits - missing) return stats
python
{ "resource": "" }
q279469
Numbers.set_precision
test
def set_precision(cls, precision): """Set the number of decimal places used to report percentages.""" assert 0 <= precision < 10 cls._precision = precision cls._near0 = 1.0 / 10**precision cls._near100 = 100.0 - cls._near0
python
{ "resource": "" }
q279470
Numbers._get_pc_covered
test
def _get_pc_covered(self): """Returns a single percentage value for coverage.""" if self.n_statements > 0: pc_cov = (100.0 * (self.n_executed + self.n_executed_branches) / (self.n_statements + self.n_branches)) else: pc_cov = 100.0 return pc_cov
python
{ "resource": "" }
q279471
Numbers._get_pc_covered_str
test
def _get_pc_covered_str(self): """Returns the percent covered, as a string, without a percent sign. Note that "0" is only returned when the value is truly zero, and "100" is only returned when the value is truly 100. Rounding can never result in either "0" or "100". """ pc = self.pc_covered if 0 < pc < self._near0: pc = self._near0 elif self._near100 < pc < 100: pc = self._near100 else: pc = round(pc, self._precision) return "%.*f" % (self._precision, pc)
python
{ "resource": "" }
q279472
highlight_text
test
def highlight_text(needles, haystack, cls_name='highlighted', words=False, case=False): """ Applies cls_name to all needles found in haystack. """ if not needles: return haystack if not haystack: return '' if words: pattern = r"(%s)" % "|".join(['\\b{}\\b'.format(re.escape(n)) for n in needles]) else: pattern = r"(%s)" % "|".join([re.escape(n) for n in needles]) if case: regex = re.compile(pattern) else: regex = re.compile(pattern, re.I) i, out = 0, "" for m in regex.finditer(haystack): out += "".join([haystack[i:m.start()], '<span class="%s">' % cls_name, haystack[m.start():m.end()], "</span>"]) i = m.end() return mark_safe(out + haystack[i:])
python
{ "resource": "" }
q279473
highlight
test
def highlight(string, keywords, cls_name='highlighted'): """ Given an list of words, this function highlights the matched text in the given string. """ if not keywords: return string if not string: return '' include, exclude = get_text_tokenizer(keywords) highlighted = highlight_text(include, string, cls_name) return highlighted
python
{ "resource": "" }
q279474
highlight_words
test
def highlight_words(string, keywords, cls_name='highlighted'): """ Given an list of words, this function highlights the matched words in the given string. """ if not keywords: return string if not string: return '' include, exclude = get_text_tokenizer(keywords) highlighted = highlight_text(include, string, cls_name, words=True) return highlighted
python
{ "resource": "" }
q279475
AbstractSandbox.run
test
def run(self, func): """Run 'func' under os sandboxing""" try: self._copy(self) if _file: __builtin__.file = self._file __builtin__.open = self._open self._active = True return func() finally: self._active = False if _file: __builtin__.file = _file __builtin__.open = _open self._copy(_os)
python
{ "resource": "" }
q279476
unquote_ends
test
def unquote_ends(istr): """Remove a single pair of quotes from the endpoints of a string.""" if not istr: return istr if (istr[0]=="'" and istr[-1]=="'") or \ (istr[0]=='"' and istr[-1]=='"'): return istr[1:-1] else: return istr
python
{ "resource": "" }
q279477
indent
test
def indent(instr,nspaces=4, ntabs=0, flatten=False): """Indent a string a given number of spaces or tabstops. indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces. Parameters ---------- instr : basestring The string to be indented. nspaces : int (default: 4) The number of spaces to be indented. ntabs : int (default: 0) The number of tabs to be indented. flatten : bool (default: False) Whether to scrub existing indentation. If True, all lines will be aligned to the same indentation. If False, existing indentation will be strictly increased. Returns ------- str|unicode : string indented by ntabs and nspaces. """ if instr is None: return ind = '\t'*ntabs+' '*nspaces if flatten: pat = re.compile(r'^\s*', re.MULTILINE) else: pat = re.compile(r'^', re.MULTILINE) outstr = re.sub(pat, ind, instr) if outstr.endswith(os.linesep+ind): return outstr[:-len(ind)] else: return outstr
python
{ "resource": "" }
q279478
marquee
test
def marquee(txt='',width=78,mark='*'): """Return the input string centered in a 'marquee'. :Examples: In [16]: marquee('A test',40) Out[16]: '**************** A test ****************' In [17]: marquee('A test',40,'-') Out[17]: '---------------- A test ----------------' In [18]: marquee('A test',40,' ') Out[18]: ' A test ' """ if not txt: return (mark*width)[:width] nmark = (width-len(txt)-2)//len(mark)//2 if nmark < 0: nmark =0 marks = mark*nmark return '%s %s %s' % (marks,txt,marks)
python
{ "resource": "" }
q279479
format_screen
test
def format_screen(strng): """Format a string for screen printing. This removes some latex-type format codes.""" # Paragraph continue par_re = re.compile(r'\\$',re.MULTILINE) strng = par_re.sub('',strng) return strng
python
{ "resource": "" }
q279480
dedent
test
def dedent(text): """Equivalent of textwrap.dedent that ignores unindented first line. This means it will still dedent strings like: '''foo is a bar ''' For use in wrap_paragraphs. """ if text.startswith('\n'): # text starts with blank line, don't ignore the first line return textwrap.dedent(text) # split first line splits = text.split('\n',1) if len(splits) == 1: # only one line return textwrap.dedent(text) first, rest = splits # dedent everything but the first line rest = textwrap.dedent(rest) return '\n'.join([first, rest])
python
{ "resource": "" }
q279481
wrap_paragraphs
test
def wrap_paragraphs(text, ncols=80): """Wrap multiple paragraphs to fit a specified width. This is equivalent to textwrap.wrap, but with support for multiple paragraphs, as separated by empty lines. Returns ------- list of complete paragraphs, wrapped to fill `ncols` columns. """ paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE) text = dedent(text).strip() paragraphs = paragraph_re.split(text)[::2] # every other entry is space out_ps = [] indent_re = re.compile(r'\n\s+', re.MULTILINE) for p in paragraphs: # presume indentation that survives dedent is meaningful formatting, # so don't fill unless text is flush. if indent_re.search(p) is None: # wrap paragraph p = textwrap.fill(p, ncols) out_ps.append(p) return out_ps
python
{ "resource": "" }
q279482
_find_optimal
test
def _find_optimal(rlist , separator_size=2 , displaywidth=80): """Calculate optimal info to columnize a list of string""" for nrow in range(1, len(rlist)+1) : chk = map(max,_chunks(rlist, nrow)) sumlength = sum(chk) ncols = len(chk) if sumlength+separator_size*(ncols-1) <= displaywidth : break; return {'columns_numbers' : ncols, 'optimal_separator_width':(displaywidth - sumlength)/(ncols-1) if (ncols -1) else 0, 'rows_numbers' : nrow, 'columns_width' : chk }
python
{ "resource": "" }
q279483
_get_or_default
test
def _get_or_default(mylist, i, default=None): """return list item number, or default if don't exist""" if i >= len(mylist): return default else : return mylist[i]
python
{ "resource": "" }
q279484
compute_item_matrix
test
def compute_item_matrix(items, empty=None, *args, **kwargs) : """Returns a nested list, and info to columnize items Parameters : ------------ items : list of strings to columize empty : (default None) default value to fill list if needed separator_size : int (default=2) How much caracters will be used as a separation between each columns. displaywidth : int (default=80) The width of the area onto wich the columns should enter Returns : --------- Returns a tuple of (strings_matrix, dict_info) strings_matrix : nested list of string, the outer most list contains as many list as rows, the innermost lists have each as many element as colums. If the total number of elements in `items` does not equal the product of rows*columns, the last element of some lists are filled with `None`. dict_info : some info to make columnize easier: columns_numbers : number of columns rows_numbers : number of rows columns_width : list of with of each columns optimal_separator_width : best separator width between columns Exemple : --------- In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l'] ...: compute_item_matrix(l,displaywidth=12) Out[1]: ([['aaa', 'f', 'k'], ['b', 'g', 'l'], ['cc', 'h', None], ['d', 'i', None], ['eeeee', 'j', None]], {'columns_numbers': 3, 'columns_width': [5, 1, 1], 'optimal_separator_width': 2, 'rows_numbers': 5}) """ info = _find_optimal(map(len, items), *args, **kwargs) nrow, ncol = info['rows_numbers'], info['columns_numbers'] return ([[ _get_or_default(items, c*nrow+i, default=empty) for c in range(ncol) ] for i in range(nrow) ], info)
python
{ "resource": "" }
q279485
SList.fields
test
def fields(self, *fields): """ Collect whitespace-separated fields from string list Allows quick awk-like usage of string lists. Example data (in var a, created by 'a = !ls -l'):: -rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython a.fields(0) is ['-rwxrwxrwx', 'drwxrwxrwx+'] a.fields(1,0) is ['1 -rwxrwxrwx', '6 drwxrwxrwx+'] (note the joining by space). a.fields(-1) is ['ChangeLog', 'IPython'] IndexErrors are ignored. Without args, fields() just split()'s the strings. """ if len(fields) == 0: return [el.split() for el in self] res = SList() for el in [f.split() for f in self]: lineparts = [] for fd in fields: try: lineparts.append(el[fd]) except IndexError: pass if lineparts: res.append(" ".join(lineparts)) return res
python
{ "resource": "" }
q279486
IPythonConsoleApp.build_kernel_argv
test
def build_kernel_argv(self, argv=None): """build argv to be passed to kernel subprocess""" if argv is None: argv = sys.argv[1:] self.kernel_argv = swallow_argv(argv, self.frontend_aliases, self.frontend_flags) # kernel should inherit default config file from frontend self.kernel_argv.append("--KernelApp.parent_appname='%s'"%self.name)
python
{ "resource": "" }
q279487
IPythonConsoleApp.init_ssh
test
def init_ssh(self): """set up ssh tunnels, if needed.""" if not self.sshserver and not self.sshkey: return if self.sshkey and not self.sshserver: # specifying just the key implies that we are connecting directly self.sshserver = self.ip self.ip = LOCALHOST # build connection dict for tunnels: info = dict(ip=self.ip, shell_port=self.shell_port, iopub_port=self.iopub_port, stdin_port=self.stdin_port, hb_port=self.hb_port ) self.log.info("Forwarding connections to %s via %s"%(self.ip, self.sshserver)) # tunnels return a new set of ports, which will be on localhost: self.ip = LOCALHOST try: newports = tunnel_to_kernel(info, self.sshserver, self.sshkey) except: # even catch KeyboardInterrupt self.log.error("Could not setup tunnels", exc_info=True) self.exit(1) self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports cf = self.connection_file base,ext = os.path.splitext(cf) base = os.path.basename(base) self.connection_file = os.path.basename(base)+'-ssh'+ext self.log.critical("To connect another client via this tunnel, use:") self.log.critical("--existing %s" % self.connection_file)
python
{ "resource": "" }
q279488
pretty
test
def pretty(obj, verbose=False, max_width=79, newline='\n'): """ Pretty print the object's representation. """ stream = StringIO() printer = RepresentationPrinter(stream, verbose, max_width, newline) printer.pretty(obj) printer.flush() return stream.getvalue()
python
{ "resource": "" }
q279489
pprint
test
def pprint(obj, verbose=False, max_width=79, newline='\n'): """ Like `pretty` but print to stdout. """ printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline) printer.pretty(obj) printer.flush() sys.stdout.write(newline) sys.stdout.flush()
python
{ "resource": "" }
q279490
_get_mro
test
def _get_mro(obj_class): """ Get a reasonable method resolution order of a class and its superclasses for both old-style and new-style classes. """ if not hasattr(obj_class, '__mro__'): # Old-style class. Mix in object to make a fake new-style class. try: obj_class = type(obj_class.__name__, (obj_class, object), {}) except TypeError: # Old-style extension type that does not descend from object. # FIXME: try to construct a more thorough MRO. mro = [obj_class] else: mro = obj_class.__mro__[1:-1] else: mro = obj_class.__mro__ return mro
python
{ "resource": "" }
q279491
_default_pprint
test
def _default_pprint(obj, p, cycle): """ The default print function. Used if an object does not provide one and it's none of the builtin objects. """ klass = getattr(obj, '__class__', None) or type(obj) if getattr(klass, '__repr__', None) not in _baseclass_reprs: # A user-provided repr. p.text(repr(obj)) return p.begin_group(1, '<') p.pretty(klass) p.text(' at 0x%x' % id(obj)) if cycle: p.text(' ...') elif p.verbose: first = True for key in dir(obj): if not key.startswith('_'): try: value = getattr(obj, key) except AttributeError: continue if isinstance(value, types.MethodType): continue if not first: p.text(',') p.breakable() p.text(key) p.text('=') step = len(key) + 1 p.indentation += step p.pretty(value) p.indentation -= step first = False p.end_group(1, '>')
python
{ "resource": "" }
q279492
_seq_pprinter_factory
test
def _seq_pprinter_factory(start, end, basetype): """ Factory that returns a pprint function useful for sequences. Used by the default pprint for tuples, dicts, lists, sets and frozensets. """ def inner(obj, p, cycle): typ = type(obj) if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text(start + '...' + end) step = len(start) p.begin_group(step, start) for idx, x in enumerate(obj): if idx: p.text(',') p.breakable() p.pretty(x) if len(obj) == 1 and type(obj) is tuple: # Special case for 1-item tuples. p.text(',') p.end_group(step, end) return inner
python
{ "resource": "" }
q279493
_dict_pprinter_factory
test
def _dict_pprinter_factory(start, end, basetype=None): """ Factory that returns a pprint function used by the default pprint of dicts and dict proxies. """ def inner(obj, p, cycle): typ = type(obj) if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text('{...}') p.begin_group(1, start) keys = obj.keys() try: keys.sort() except Exception, e: # Sometimes the keys don't sort. pass for idx, key in enumerate(keys): if idx: p.text(',') p.breakable() p.pretty(key) p.text(': ') p.pretty(obj[key]) p.end_group(1, end) return inner
python
{ "resource": "" }
q279494
_super_pprint
test
def _super_pprint(obj, p, cycle): """The pprint for the super type.""" p.begin_group(8, '<super: ') p.pretty(obj.__self_class__) p.text(',') p.breakable() p.pretty(obj.__self__) p.end_group(8, '>')
python
{ "resource": "" }
q279495
_re_pattern_pprint
test
def _re_pattern_pprint(obj, p, cycle): """The pprint function for regular expression patterns.""" p.text('re.compile(') pattern = repr(obj.pattern) if pattern[:1] in 'uU': pattern = pattern[1:] prefix = 'ur' else: prefix = 'r' pattern = prefix + pattern.replace('\\\\', '\\') p.text(pattern) if obj.flags: p.text(',') p.breakable() done_one = False for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', 'UNICODE', 'VERBOSE', 'DEBUG'): if obj.flags & getattr(re, flag): if done_one: p.text('|') p.text('re.' + flag) done_one = True p.text(')')
python
{ "resource": "" }
q279496
_type_pprint
test
def _type_pprint(obj, p, cycle): """The pprint for classes and types.""" if obj.__module__ in ('__builtin__', 'exceptions'): name = obj.__name__ else: name = obj.__module__ + '.' + obj.__name__ p.text(name)
python
{ "resource": "" }
q279497
_function_pprint
test
def _function_pprint(obj, p, cycle): """Base pprint for all functions and builtin functions.""" if obj.__module__ in ('__builtin__', 'exceptions') or not obj.__module__: name = obj.__name__ else: name = obj.__module__ + '.' + obj.__name__ p.text('<function %s>' % name)
python
{ "resource": "" }
q279498
_exception_pprint
test
def _exception_pprint(obj, p, cycle): """Base pprint for all exceptions.""" if obj.__class__.__module__ in ('exceptions', 'builtins'): name = obj.__class__.__name__ else: name = '%s.%s' % ( obj.__class__.__module__, obj.__class__.__name__ ) step = len(name) + 1 p.begin_group(step, name + '(') for idx, arg in enumerate(getattr(obj, 'args', ())): if idx: p.text(',') p.breakable() p.pretty(arg) p.end_group(step, ')')
python
{ "resource": "" }
q279499
for_type
test
def for_type(typ, func): """ Add a pretty printer for a given type. """ oldfunc = _type_pprinters.get(typ, None) if func is not None: # To support easy restoration of old pprinters, we need to ignore Nones. _type_pprinters[typ] = func return oldfunc
python
{ "resource": "" }