_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q279800
abs_file
test
def abs_file(filename): """Return the absolute normalized form of `filename`.""" path = os.path.expandvars(os.path.expanduser(filename)) path = os.path.abspath(os.path.realpath(path)) path = actual_path(path) return path
python
{ "resource": "" }
q279801
prep_patterns
test
def prep_patterns(patterns): """Prepare the file patterns for use in a `FnmatchMatcher`. If a pattern starts with a wildcard, it is used as a pattern as-is. If it does not start with a wildcard, then it is made absolute with the current directory. If `patterns` is None, an empty list is returned. """ prepped = [] for p in patterns or []: if p.startswith("*") or p.startswith("?"): prepped.append(p) else: prepped.append(abs_file(p)) return prepped
python
{ "resource": "" }
q279802
sep
test
def sep(s): """Find the path separator used in this string, or os.sep if none.""" sep_match = re.search(r"[\\/]", s) if sep_match: the_sep = sep_match.group(0) else: the_sep = os.sep return the_sep
python
{ "resource": "" }
q279803
find_python_files
test
def find_python_files(dirname): """Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but subdirectories are checked for a __init__.py to be sure we only find the importable files. """ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): if i > 0 and '__init__.py' not in filenames: # If a directory doesn't have __init__.py, then it isn't # importable and neither are its files del dirnames[:] continue for filename in filenames: # We're only interested in files that look like reasonable Python # files: Must end with .py or .pyw, and must not have certain funny # characters that probably mean they are editor junk. if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): yield os.path.join(dirpath, filename)
python
{ "resource": "" }
q279804
FileLocator.relative_filename
test
def relative_filename(self, filename): """Return the relative form of `filename`. The filename will be relative to the current directory when the `FileLocator` was constructed. """ fnorm = os.path.normcase(filename) if fnorm.startswith(self.relative_dir): filename = filename[len(self.relative_dir):] return filename
python
{ "resource": "" }
q279805
FileLocator.canonical_filename
test
def canonical_filename(self, filename): """Return a canonical filename for `filename`. An absolute path with no redundant components and normalized case. """ if filename not in self.canonical_filename_cache: if not os.path.isabs(filename): for path in [os.curdir] + sys.path: if path is None: continue f = os.path.join(path, filename) if os.path.exists(f): filename = f break cf = abs_file(filename) self.canonical_filename_cache[filename] = cf return self.canonical_filename_cache[filename]
python
{ "resource": "" }
q279806
FileLocator.get_zip_data
test
def get_zip_data(self, filename): """Get data from `filename` if it is a zip file path. Returns the string data read from the zip file, or None if no zip file could be found or `filename` isn't in it. The data returned will be an empty string if the file is empty. """ import zipimport markers = ['.zip'+os.sep, '.egg'+os.sep] for marker in markers: if marker in filename: parts = filename.split(marker) try: zi = zipimport.zipimporter(parts[0]+marker[:-1]) except zipimport.ZipImportError: continue try: data = zi.get_data(parts[1]) except IOError: continue return to_string(data) return None
python
{ "resource": "" }
q279807
TreeMatcher.match
test
def match(self, fpath): """Does `fpath` indicate a file in one of our trees?""" for d in self.dirs: if fpath.startswith(d): if fpath == d: # This is the same file! return True if fpath[len(d)] == os.sep: # This is a file in the directory return True return False
python
{ "resource": "" }
q279808
FnmatchMatcher.match
test
def match(self, fpath): """Does `fpath` match one of our filename patterns?""" for pat in self.pats: if fnmatch.fnmatch(fpath, pat): return True return False
python
{ "resource": "" }
q279809
PathAliases.map
test
def map(self, path): """Map `path` through the aliases. `path` is checked against all of the patterns. The first pattern to match is used to replace the root of the path with the result root. Only one pattern is ever used. If no patterns match, `path` is returned unchanged. The separator style in the result is made to match that of the result in the alias. """ for regex, result, pattern_sep, result_sep in self.aliases: m = regex.match(path) if m: new = path.replace(m.group(0), result) if pattern_sep != result_sep: new = new.replace(pattern_sep, result_sep) if self.locator: new = self.locator.canonical_filename(new) return new return path
python
{ "resource": "" }
q279810
loop_qt4
test
def loop_qt4(kernel): """Start a kernel with PyQt4 event loop integration.""" from IPython.external.qt_for_kernel import QtCore from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4 kernel.app = get_app_qt4([" "]) kernel.app.setQuitOnLastWindowClosed(False) kernel.timer = QtCore.QTimer() kernel.timer.timeout.connect(kernel.do_one_iteration) # Units for the timer are in milliseconds kernel.timer.start(1000*kernel._poll_interval) start_event_loop_qt4(kernel.app)
python
{ "resource": "" }
q279811
loop_wx
test
def loop_wx(kernel): """Start a kernel with wx event loop support.""" import wx from IPython.lib.guisupport import start_event_loop_wx doi = kernel.do_one_iteration # Wx uses milliseconds poll_interval = int(1000*kernel._poll_interval) # We have to put the wx.Timer in a wx.Frame for it to fire properly. # We make the Frame hidden when we create it in the main app below. class TimerFrame(wx.Frame): def __init__(self, func): wx.Frame.__init__(self, None, -1) self.timer = wx.Timer(self) # Units for the timer are in milliseconds self.timer.Start(poll_interval) self.Bind(wx.EVT_TIMER, self.on_timer) self.func = func def on_timer(self, event): self.func() # We need a custom wx.App to create our Frame subclass that has the # wx.Timer to drive the ZMQ event loop. class IPWxApp(wx.App): def OnInit(self): self.frame = TimerFrame(doi) self.frame.Show(False) return True # The redirect=False here makes sure that wx doesn't replace # sys.stdout/stderr with its own classes. kernel.app = IPWxApp(redirect=False) # The import of wx on Linux sets the handler for signal.SIGINT # to 0. This is a bug in wx or gtk. We fix by just setting it # back to the Python default. import signal if not callable(signal.getsignal(signal.SIGINT)): signal.signal(signal.SIGINT, signal.default_int_handler) start_event_loop_wx(kernel.app)
python
{ "resource": "" }
q279812
loop_tk
test
def loop_tk(kernel): """Start a kernel with the Tk event loop.""" import Tkinter doi = kernel.do_one_iteration # Tk uses milliseconds poll_interval = int(1000*kernel._poll_interval) # For Tkinter, we create a Tk object and call its withdraw method. class Timer(object): def __init__(self, func): self.app = Tkinter.Tk() self.app.withdraw() self.func = func def on_timer(self): self.func() self.app.after(poll_interval, self.on_timer) def start(self): self.on_timer() # Call it once to get things going. self.app.mainloop() kernel.timer = Timer(doi) kernel.timer.start()
python
{ "resource": "" }
q279813
loop_gtk
test
def loop_gtk(kernel): """Start the kernel, coordinating with the GTK event loop""" from .gui.gtkembed import GTKEmbed gtk_kernel = GTKEmbed(kernel) gtk_kernel.start()
python
{ "resource": "" }
q279814
loop_cocoa
test
def loop_cocoa(kernel): """Start the kernel, coordinating with the Cocoa CFRunLoop event loop via the matplotlib MacOSX backend. """ import matplotlib if matplotlib.__version__ < '1.1.0': kernel.log.warn( "MacOSX backend in matplotlib %s doesn't have a Timer, " "falling back on Tk for CFRunLoop integration. Note that " "even this won't work if Tk is linked against X11 instead of " "Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, " "you must use matplotlib >= 1.1.0, or a native libtk." ) return loop_tk(kernel) from matplotlib.backends.backend_macosx import TimerMac, show # scale interval for sec->ms poll_interval = int(1000*kernel._poll_interval) real_excepthook = sys.excepthook def handle_int(etype, value, tb): """don't let KeyboardInterrupts look like crashes""" if etype is KeyboardInterrupt: io.raw_print("KeyboardInterrupt caught in CFRunLoop") else: real_excepthook(etype, value, tb) # add doi() as a Timer to the CFRunLoop def doi(): # restore excepthook during IPython code sys.excepthook = real_excepthook kernel.do_one_iteration() # and back: sys.excepthook = handle_int t = TimerMac(poll_interval) t.add_callback(doi) t.start() # but still need a Poller for when there are no active windows, # during which time mainloop() returns immediately poller = zmq.Poller() if kernel.control_stream: poller.register(kernel.control_stream.socket, zmq.POLLIN) for stream in kernel.shell_streams: poller.register(stream.socket, zmq.POLLIN) while True: try: # double nested try/except, to properly catch KeyboardInterrupt # due to pyzmq Issue #130 try: # don't let interrupts during mainloop invoke crash_handler: sys.excepthook = handle_int show.mainloop() sys.excepthook = real_excepthook # use poller if mainloop returned (no windows) # scale by extra factor of 10, since it's a real poll poller.poll(10*poll_interval) kernel.do_one_iteration() except: raise except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel io.raw_print("KeyboardInterrupt caught in kernel") finally: # ensure excepthook is restored sys.excepthook = real_excepthook
python
{ "resource": "" }
q279815
enable_gui
test
def enable_gui(gui, kernel=None): """Enable integration with a given GUI""" if gui not in loop_map: raise ValueError("GUI %r not supported" % gui) if kernel is None: if Application.initialized(): kernel = getattr(Application.instance(), 'kernel', None) if kernel is None: raise RuntimeError("You didn't specify a kernel," " and no IPython Application with a kernel appears to be running." ) loop = loop_map[gui] if kernel.eventloop is not None and kernel.eventloop is not loop: raise RuntimeError("Cannot activate multiple GUI eventloops") kernel.eventloop = loop
python
{ "resource": "" }
q279816
GOE
test
def GOE(N): """Creates an NxN element of the Gaussian Orthogonal Ensemble""" m = ra.standard_normal((N,N)) m += m.T return m/2
python
{ "resource": "" }
q279817
center_eigenvalue_diff
test
def center_eigenvalue_diff(mat): """Compute the eigvals of mat and then find the center eigval difference.""" N = len(mat) evals = np.sort(la.eigvals(mat)) diff = np.abs(evals[N/2] - evals[N/2-1]) return diff
python
{ "resource": "" }
q279818
ensemble_diffs
test
def ensemble_diffs(num, N): """Return num eigenvalue diffs for the NxN GOE ensemble.""" diffs = np.empty(num) for i in xrange(num): mat = GOE(N) diffs[i] = center_eigenvalue_diff(mat) return diffs
python
{ "resource": "" }
q279819
StepItem.init
test
def init(self, ctxt, step_addr): """ Initialize the item. This calls the class constructor with the appropriate arguments and returns the initialized object. :param ctxt: The context object. :param step_addr: The address of the step in the test configuration. """ return self.cls(ctxt, self.name, self.conf, step_addr)
python
{ "resource": "" }
q279820
Step.parse_file
test
def parse_file(cls, ctxt, fname, key=None, step_addr=None): """ Parse a YAML file containing test steps. :param ctxt: The context object. :param fname: The name of the file to parse. :param key: An optional dictionary key. If specified, the file must be a YAML dictionary, and the referenced value will be interpreted as a list of steps. If not provided, the file must be a YAML list, which will be interpreted as the list of steps. :param step_addr: The address of the step in the test configuration. This may be used in the case of includes, for instance. :returns: A list of ``Step`` objects. """ # Load the YAML file try: with open(fname) as f: step_data = yaml.load(f) except Exception as exc: raise ConfigError( 'Failed to read file "%s": %s' % (fname, exc), step_addr, ) # Do we have a key? if key is not None: if (not isinstance(step_data, collections.Mapping) or key not in step_data): raise ConfigError( 'Bad step configuration file "%s": expecting dictionary ' 'with key "%s"' % (fname, key), step_addr, ) # Extract just the step data step_data = step_data[key] # Validate that it's a sequence if not isinstance(step_data, collections.Sequence): addr = ('%s[%s]' % (fname, key)) if key is not None else fname raise ConfigError( 'Bad step configuration sequence at %s: expecting list, ' 'not "%s"' % (addr, step_data.__class__.__name__), step_addr, ) # OK, assemble the step list and return it steps = [] for idx, step_conf in enumerate(step_data): steps.extend(cls.parse_step( ctxt, StepAddress(fname, idx, key), step_conf)) return steps
python
{ "resource": "" }
q279821
Step.parse_step
test
def parse_step(cls, ctxt, step_addr, step_conf): """ Parse a step dictionary. :param ctxt: The context object. :param step_addr: The address of the step in the test configuration. :param step_conf: The description of the step. This may be a scalar string or a dictionary. :returns: A list of steps. """ # Make sure the step makes sense if isinstance(step_conf, six.string_types): # Convert string to a dict for uniformity of processing step_conf = {step_conf: None} elif not isinstance(step_conf, collections.Mapping): raise ConfigError( 'Unable to parse step configuration: expecting string or ' 'dictionary, not "%s"' % step_conf.__class__.__name__, step_addr, ) # Parse the configuration into the action and modifier classes # and the configuration to apply to each action_item = None mod_items = {} kwargs = {} # extra args for Step.__init__() for key, key_conf in step_conf.items(): # Handle special keys first if key in cls.schemas: # Validate the key utils.schema_validate(key_conf, cls.schemas[key], ConfigError, key, step_addr=step_addr) # Save the value kwargs[key] = key_conf # Is it an action? elif key in entry.points[NAMESPACE_ACTION]: if action_item is not None: raise ConfigError( 'Bad step configuration: action "%s" specified, ' 'but action "%s" already processed' % (key, action_item.name), step_addr, ) action_item = StepItem( entry.points[NAMESPACE_ACTION][key], key, key_conf) # OK, is it a modifier? elif key in entry.points[NAMESPACE_MODIFIER]: mod_class = entry.points[NAMESPACE_MODIFIER][key] # Store it in priority order mod_items.setdefault(mod_class.priority, []) mod_items[mod_class.priority].append(StepItem( mod_class, key, key_conf)) # Couldn't resolve it else: raise ConfigError( 'Bad step configuration: unable to resolve action ' '"%s"' % key, step_addr, ) # Make sure we have an action if action_item is None: raise ConfigError( 'Bad step configuration: no action specified', step_addr, ) # What is the action type? action_type = (Modifier.STEP if action_item.cls.step_action else Modifier.NORMAL) # OK, build our modifiers list and preprocess the action # configuration modifiers = [] for mod_item in utils.iter_prio_dict(mod_items): # Verify that the modifier is compatible with the # action if mod_item.cls.restriction & action_type == 0: raise ConfigError( 'Bad step configuration: modifier "%s" is ' 'incompatible with the action "%s"' % (mod_item.name, action_item.name), step_addr, ) # Initialize the modifier modifier = mod_item.init(ctxt, step_addr) # Add it to the list of modifiers modifiers.append(modifier) # Apply the modifier's configuration processing action_item.conf = modifier.action_conf( ctxt, action_item.cls, action_item.name, action_item.conf, step_addr) # Now we can initialize the action action = action_item.init(ctxt, step_addr) # Create the step step = cls(step_addr, action, modifiers, **kwargs) # If the final_action is a StepAction, invoke it now and # return the list of steps. We do this after creating the # Step object so that we can take advantage of its handling of # modifiers. if action_item.cls.step_action: return step(ctxt) # Not a step action, return the step as a list of one element return [step]
python
{ "resource": "" }
q279822
BaseIPythonApplication.init_crash_handler
test
def init_crash_handler(self): """Create a crash handler, typically setting sys.excepthook to it.""" self.crash_handler = self.crash_handler_class(self) sys.excepthook = self.excepthook def unset_crashhandler(): sys.excepthook = sys.__excepthook__ atexit.register(unset_crashhandler)
python
{ "resource": "" }
q279823
BaseIPythonApplication.load_config_file
test
def load_config_file(self, suppress_errors=True): """Load the config file. By default, errors in loading config are handled, and a warning printed on screen. For testing, the suppress_errors option is set to False, so errors will make tests fail. """ self.log.debug("Searching path %s for config files", self.config_file_paths) base_config = 'ipython_config.py' self.log.debug("Attempting to load config file: %s" % base_config) try: Application.load_config_file( self, base_config, path=self.config_file_paths ) except ConfigFileNotFound: # ignore errors loading parent self.log.debug("Config file %s not found", base_config) pass if self.config_file_name == base_config: # don't load secondary config return self.log.debug("Attempting to load config file: %s" % self.config_file_name) try: Application.load_config_file( self, self.config_file_name, path=self.config_file_paths ) except ConfigFileNotFound: # Only warn if the default config file was NOT being used. if self.config_file_specified: msg = self.log.warn else: msg = self.log.debug msg("Config file not found, skipping: %s", self.config_file_name) except: # For testing purposes. if not suppress_errors: raise self.log.warn("Error loading config file: %s" % self.config_file_name, exc_info=True)
python
{ "resource": "" }
q279824
BaseIPythonApplication.init_profile_dir
test
def init_profile_dir(self): """initialize the profile dir""" try: # location explicitly specified: location = self.config.ProfileDir.location except AttributeError: # location not specified, find by profile name try: p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config) except ProfileDirError: # not found, maybe create it (always create default profile) if self.auto_create or self.profile=='default': try: p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config) except ProfileDirError: self.log.fatal("Could not create profile: %r"%self.profile) self.exit(1) else: self.log.info("Created profile dir: %r"%p.location) else: self.log.fatal("Profile %r not found."%self.profile) self.exit(1) else: self.log.info("Using existing profile dir: %r"%p.location) else: # location is fully specified try: p = ProfileDir.find_profile_dir(location, self.config) except ProfileDirError: # not found, maybe create it if self.auto_create: try: p = ProfileDir.create_profile_dir(location, self.config) except ProfileDirError: self.log.fatal("Could not create profile directory: %r"%location) self.exit(1) else: self.log.info("Creating new profile dir: %r"%location) else: self.log.fatal("Profile directory %r not found."%location) self.exit(1) else: self.log.info("Using existing profile dir: %r"%location) self.profile_dir = p self.config_file_paths.append(p.location)
python
{ "resource": "" }
q279825
BaseIPythonApplication.stage_default_config_file
test
def stage_default_config_file(self): """auto generate default config file, and stage it into the profile.""" s = self.generate_config_file() fname = os.path.join(self.profile_dir.location, self.config_file_name) if self.overwrite or not os.path.exists(fname): self.log.warn("Generating default config file: %r"%(fname)) with open(fname, 'w') as f: f.write(s)
python
{ "resource": "" }
q279826
CoverageData.write
test
def write(self, suffix=None): """Write the collected coverage data to a file. `suffix` is a suffix to append to the base file name. This can be used for multiple or parallel execution, so that many coverage data files can exist simultaneously. A dot will be used to join the base name and the suffix. """ if self.use_file: filename = self.filename if suffix: filename += "." + suffix self.write_file(filename)
python
{ "resource": "" }
q279827
CoverageData.erase
test
def erase(self): """Erase the data, both in this object, and from its file storage.""" if self.use_file: if self.filename: file_be_gone(self.filename) self.lines = {} self.arcs = {}
python
{ "resource": "" }
q279828
CoverageData.line_data
test
def line_data(self): """Return the map from filenames to lists of line numbers executed.""" return dict( [(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)] )
python
{ "resource": "" }
q279829
CoverageData.arc_data
test
def arc_data(self): """Return the map from filenames to lists of line number pairs.""" return dict( [(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)] )
python
{ "resource": "" }
q279830
CoverageData.write_file
test
def write_file(self, filename): """Write the coverage data to `filename`.""" # Create the file data. data = {} data['lines'] = self.line_data() arcs = self.arc_data() if arcs: data['arcs'] = arcs if self.collector: data['collector'] = self.collector if self.debug and self.debug.should('dataio'): self.debug.write("Writing data to %r" % (filename,)) # Write the pickle to the file. fdata = open(filename, 'wb') try: pickle.dump(data, fdata, 2) finally: fdata.close()
python
{ "resource": "" }
q279831
CoverageData.read_file
test
def read_file(self, filename): """Read the coverage data from `filename`.""" self.lines, self.arcs = self._read_file(filename)
python
{ "resource": "" }
q279832
CoverageData.raw_data
test
def raw_data(self, filename): """Return the raw pickled data from `filename`.""" if self.debug and self.debug.should('dataio'): self.debug.write("Reading data from %r" % (filename,)) fdata = open(filename, 'rb') try: data = pickle.load(fdata) finally: fdata.close() return data
python
{ "resource": "" }
q279833
CoverageData._read_file
test
def _read_file(self, filename): """Return the stored coverage data from the given file. Returns two values, suitable for assigning to `self.lines` and `self.arcs`. """ lines = {} arcs = {} try: data = self.raw_data(filename) if isinstance(data, dict): # Unpack the 'lines' item. lines = dict([ (f, dict.fromkeys(linenos, None)) for f, linenos in iitems(data.get('lines', {})) ]) # Unpack the 'arcs' item. arcs = dict([ (f, dict.fromkeys(arcpairs, None)) for f, arcpairs in iitems(data.get('arcs', {})) ]) except Exception: pass return lines, arcs
python
{ "resource": "" }
q279834
CoverageData.combine_parallel_data
test
def combine_parallel_data(self, aliases=None): """Combine a number of data files together. Treat `self.filename` as a file prefix, and combine the data from all of the data files starting with that prefix plus a dot. If `aliases` is provided, it's a `PathAliases` object that is used to re-map paths to match the local machine's. """ aliases = aliases or PathAliases() data_dir, local = os.path.split(self.filename) localdot = local + '.' for f in os.listdir(data_dir or '.'): if f.startswith(localdot): full_path = os.path.join(data_dir, f) new_lines, new_arcs = self._read_file(full_path) for filename, file_data in iitems(new_lines): filename = aliases.map(filename) self.lines.setdefault(filename, {}).update(file_data) for filename, file_data in iitems(new_arcs): filename = aliases.map(filename) self.arcs.setdefault(filename, {}).update(file_data) if f != local: os.remove(full_path)
python
{ "resource": "" }
q279835
CoverageData.add_line_data
test
def add_line_data(self, line_data): """Add executed line data. `line_data` is { filename: { lineno: None, ... }, ...} """ for filename, linenos in iitems(line_data): self.lines.setdefault(filename, {}).update(linenos)
python
{ "resource": "" }
q279836
CoverageData.add_arc_data
test
def add_arc_data(self, arc_data): """Add measured arc data. `arc_data` is { filename: { (l1,l2): None, ... }, ...} """ for filename, arcs in iitems(arc_data): self.arcs.setdefault(filename, {}).update(arcs)
python
{ "resource": "" }
q279837
CoverageData.add_to_hash
test
def add_to_hash(self, filename, hasher): """Contribute `filename`'s data to the Md5Hash `hasher`.""" hasher.update(self.executed_lines(filename)) hasher.update(self.executed_arcs(filename))
python
{ "resource": "" }
q279838
CoverageData.summary
test
def summary(self, fullpath=False): """Return a dict summarizing the coverage data. Keys are based on the filenames, and values are the number of executed lines. If `fullpath` is true, then the keys are the full pathnames of the files, otherwise they are the basenames of the files. """ summ = {} if fullpath: filename_fn = lambda f: f else: filename_fn = os.path.basename for filename, lines in iitems(self.lines): summ[filename_fn(filename)] = len(lines) return summ
python
{ "resource": "" }
q279839
get_pasted_lines
test
def get_pasted_lines(sentinel, l_input=py3compat.input): """ Yield pasted lines until the user enters the given sentinel value. """ print "Pasting code; enter '%s' alone on the line to stop or use Ctrl-D." \ % sentinel while True: try: l = l_input(':') if l == sentinel: return else: yield l except EOFError: print '<EOF>' return
python
{ "resource": "" }
q279840
TerminalInteractiveShell.mainloop
test
def mainloop(self, display_banner=None): """Start the mainloop. If an optional banner argument is given, it will override the internally created default banner. """ with nested(self.builtin_trap, self.display_trap): while 1: try: self.interact(display_banner=display_banner) #self.interact_with_readline() # XXX for testing of a readline-decoupled repl loop, call # interact_with_readline above break except KeyboardInterrupt: # this should not be necessary, but KeyboardInterrupt # handling seems rather unpredictable... self.write("\nKeyboardInterrupt in interact()\n")
python
{ "resource": "" }
q279841
TerminalInteractiveShell._replace_rlhist_multiline
test
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell): """Store multiple lines as a single entry in history""" # do nothing without readline or disabled multiline if not self.has_readline or not self.multiline_history: return hlen_before_cell # windows rl has no remove_history_item if not hasattr(self.readline, "remove_history_item"): return hlen_before_cell # skip empty cells if not source_raw.rstrip(): return hlen_before_cell # nothing changed do nothing, e.g. when rl removes consecutive dups hlen = self.readline.get_current_history_length() if hlen == hlen_before_cell: return hlen_before_cell for i in range(hlen - hlen_before_cell): self.readline.remove_history_item(hlen - i - 1) stdin_encoding = get_stream_enc(sys.stdin, 'utf-8') self.readline.add_history(py3compat.unicode_to_str(source_raw.rstrip(), stdin_encoding)) return self.readline.get_current_history_length()
python
{ "resource": "" }
q279842
TerminalInteractiveShell.raw_input
test
def raw_input(self, prompt=''): """Write a prompt and read a line. The returned line does not include the trailing newline. When the user enters the EOF key sequence, EOFError is raised. Optional inputs: - prompt(''): a string to be printed to prompt the user. - continue_prompt(False): whether this line is the first one or a continuation in a sequence of inputs. """ # Code run by the user may have modified the readline completer state. # We must ensure that our completer is back in place. if self.has_readline: self.set_readline_completer() # raw_input expects str, but we pass it unicode sometimes prompt = py3compat.cast_bytes_py2(prompt) try: line = py3compat.str_to_unicode(self.raw_input_original(prompt)) except ValueError: warn("\n********\nYou or a %run:ed script called sys.stdin.close()" " or sys.stdout.close()!\nExiting IPython!\n") self.ask_exit() return "" # Try to be reasonably smart about not re-indenting pasted input more # than necessary. We do this by trimming out the auto-indent initial # spaces, if the user's actual input started itself with whitespace. if self.autoindent: if num_ini_spaces(line) > self.indent_current_nsp: line = line[self.indent_current_nsp:] self.indent_current_nsp = 0 return line
python
{ "resource": "" }
q279843
TerminalInteractiveShell.edit_syntax_error
test
def edit_syntax_error(self): """The bottom half of the syntax error handler called in the main loop. Loop until syntax error is fixed or user cancels. """ while self.SyntaxTB.last_syntax_error: # copy and clear last_syntax_error err = self.SyntaxTB.clear_err_state() if not self._should_recompile(err): return try: # may set last_syntax_error again if a SyntaxError is raised self.safe_execfile(err.filename,self.user_ns) except: self.showtraceback() else: try: f = open(err.filename) try: # This should be inside a display_trap block and I # think it is. sys.displayhook(f.read()) finally: f.close() except: self.showtraceback()
python
{ "resource": "" }
q279844
TerminalInteractiveShell._should_recompile
test
def _should_recompile(self,e): """Utility routine for edit_syntax_error""" if e.filename in ('<ipython console>','<input>','<string>', '<console>','<BackgroundJob compilation>', None): return False try: if (self.autoedit_syntax and not self.ask_yes_no('Return to editor to correct syntax error? ' '[Y/n] ','y')): return False except EOFError: return False def int0(x): try: return int(x) except TypeError: return 0 # always pass integer line and offset values to editor hook try: self.hooks.fix_error_editor(e.filename, int0(e.lineno),int0(e.offset),e.msg) except TryNext: warn('Could not open editor') return False return True
python
{ "resource": "" }
q279845
TerminalInteractiveShell.exit
test
def exit(self): """Handle interactive exit. This method calls the ask_exit callback.""" if self.confirm_exit: if self.ask_yes_no('Do you really want to exit ([y]/n)?','y'): self.ask_exit() else: self.ask_exit()
python
{ "resource": "" }
q279846
VersionControl.get_url_rev
test
def get_url_rev(self): """ Returns the correct repository URL and revision by parsing the given repository URL """ error_message= ( "Sorry, '%s' is a malformed VCS url. " "Ihe format is <vcs>+<protocol>://<url>, " "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp") assert '+' in self.url, error_message % self.url url = self.url.split('+', 1)[1] scheme, netloc, path, query, frag = urlparse.urlsplit(url) rev = None if '@' in path: path, rev = path.rsplit('@', 1) url = urlparse.urlunsplit((scheme, netloc, path, query, '')) return url, rev
python
{ "resource": "" }
q279847
IPythonQtConsoleApp.new_frontend_master
test
def new_frontend_master(self): """ Create and return new frontend attached to new kernel, launched on localhost. """ ip = self.ip if self.ip in LOCAL_IPS else LOCALHOST kernel_manager = self.kernel_manager_class( ip=ip, connection_file=self._new_connection_file(), config=self.config, ) # start the kernel kwargs = dict() kwargs['extra_arguments'] = self.kernel_argv kernel_manager.start_kernel(**kwargs) kernel_manager.start_channels() widget = self.widget_factory(config=self.config, local_kernel=True) self.init_colors(widget) widget.kernel_manager = kernel_manager widget._existing = False widget._may_close = True widget._confirm_exit = self.confirm_exit return widget
python
{ "resource": "" }
q279848
IPythonQtConsoleApp.init_colors
test
def init_colors(self, widget): """Configure the coloring of the widget""" # Note: This will be dramatically simplified when colors # are removed from the backend. # parse the colors arg down to current known labels try: colors = self.config.ZMQInteractiveShell.colors except AttributeError: colors = None try: style = self.config.IPythonWidget.syntax_style except AttributeError: style = None try: sheet = self.config.IPythonWidget.style_sheet except AttributeError: sheet = None # find the value for colors: if colors: colors=colors.lower() if colors in ('lightbg', 'light'): colors='lightbg' elif colors in ('dark', 'linux'): colors='linux' else: colors='nocolor' elif style: if style=='bw': colors='nocolor' elif styles.dark_style(style): colors='linux' else: colors='lightbg' else: colors=None # Configure the style if style: widget.style_sheet = styles.sheet_from_template(style, colors) widget.syntax_style = style widget._syntax_style_changed() widget._style_sheet_changed() elif colors: # use a default dark/light/bw style widget.set_default_style(colors=colors) if self.stylesheet: # we got an explicit stylesheet if os.path.isfile(self.stylesheet): with open(self.stylesheet) as f: sheet = f.read() else: raise IOError("Stylesheet %r not found." % self.stylesheet) if sheet: widget.style_sheet = sheet widget._style_sheet_changed()
python
{ "resource": "" }
q279849
EngineCommunicator.info
test
def info(self): """return the connection info for this object's sockets.""" return (self.identity, self.url, self.pub_url, self.location)
python
{ "resource": "" }
q279850
Rconverter
test
def Rconverter(Robj, dataframe=False): """ Convert an object in R's namespace to one suitable for ipython's namespace. For a data.frame, it tries to return a structured array. It first checks for colnames, then names. If all are NULL, it returns np.asarray(Robj), else it tries to construct a recarray Parameters ---------- Robj: an R object returned from rpy2 """ is_data_frame = ro.r('is.data.frame') colnames = ro.r('colnames') rownames = ro.r('rownames') # with pandas, these could be used for the index names = ro.r('names') if dataframe: as_data_frame = ro.r('as.data.frame') cols = colnames(Robj) _names = names(Robj) if cols != ri.NULL: Robj = as_data_frame(Robj) names = tuple(np.array(cols)) elif _names != ri.NULL: names = tuple(np.array(_names)) else: # failed to find names return np.asarray(Robj) Robj = np.rec.fromarrays(Robj, names = names) return np.asarray(Robj)
python
{ "resource": "" }
q279851
findsource
test
def findsource(object): """Return the entire source file and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of all the lines in the file and the line number indexes a line in that list. An IOError is raised if the source code cannot be retrieved. FIXED version with which we monkeypatch the stdlib to work around a bug.""" file = getsourcefile(object) or getfile(object) # If the object is a frame, then trying to get the globals dict from its # module won't work. Instead, the frame object itself has the globals # dictionary. globals_dict = None if inspect.isframe(object): # XXX: can this ever be false? globals_dict = object.f_globals else: module = getmodule(object, file) if module: globals_dict = module.__dict__ lines = linecache.getlines(file, globals_dict) if not lines: raise IOError('could not get source code') if ismodule(object): return lines, 0 if isclass(object): name = object.__name__ pat = re.compile(r'^(\s*)class\s*' + name + r'\b') # make some effort to find the best matching class definition: # use the one with the least indentation, which is the one # that's most probably not inside a function definition. candidates = [] for i in range(len(lines)): match = pat.match(lines[i]) if match: # if it's at toplevel, it's already the best one if lines[i][0] == 'c': return lines, i # else add whitespace to candidate list candidates.append((match.group(1), i)) if candidates: # this will sort by whitespace, and by line number, # less whitespace first candidates.sort() return lines, candidates[0][1] else: raise IOError('could not find class definition') if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): if not hasattr(object, 'co_firstlineno'): raise IOError('could not find function definition') pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') pmatch = pat.match # fperez - fix: sometimes, co_firstlineno can give a number larger than # the length of lines, which causes an error. Safeguard against that. lnum = min(object.co_firstlineno,len(lines))-1 while lnum > 0: if pmatch(lines[lnum]): break lnum -= 1 return lines, lnum raise IOError('could not find code object')
python
{ "resource": "" }
q279852
TBTools.set_colors
test
def set_colors(self,*args,**kw): """Shorthand access to the color table scheme selector method.""" # Set own color table self.color_scheme_table.set_active_scheme(*args,**kw) # for convenience, set Colors to the active scheme self.Colors = self.color_scheme_table.active_colors # Also set colors of debugger if hasattr(self,'pdb') and self.pdb is not None: self.pdb.set_colors(*args,**kw)
python
{ "resource": "" }
q279853
TBTools.color_toggle
test
def color_toggle(self): """Toggle between the currently active color scheme and NoColor.""" if self.color_scheme_table.active_scheme_name == 'NoColor': self.color_scheme_table.set_active_scheme(self.old_scheme) self.Colors = self.color_scheme_table.active_colors else: self.old_scheme = self.color_scheme_table.active_scheme_name self.color_scheme_table.set_active_scheme('NoColor') self.Colors = self.color_scheme_table.active_colors
python
{ "resource": "" }
q279854
TBTools.text
test
def text(self, etype, value, tb, tb_offset=None, context=5): """Return formatted traceback. Subclasses may override this if they add extra arguments. """ tb_list = self.structured_traceback(etype, value, tb, tb_offset, context) return self.stb2text(tb_list)
python
{ "resource": "" }
q279855
ListTB.structured_traceback
test
def structured_traceback(self, etype, value, elist, tb_offset=None, context=5): """Return a color formatted string with the traceback info. Parameters ---------- etype : exception type Type of the exception raised. value : object Data stored in the exception elist : list List of frames, see class docstring for details. tb_offset : int, optional Number of frames in the traceback to skip. If not given, the instance value is used (set in constructor). context : int, optional Number of lines of context information to print. Returns ------- String with formatted exception. """ tb_offset = self.tb_offset if tb_offset is None else tb_offset Colors = self.Colors out_list = [] if elist: if tb_offset and len(elist) > tb_offset: elist = elist[tb_offset:] out_list.append('Traceback %s(most recent call last)%s:' % (Colors.normalEm, Colors.Normal) + '\n') out_list.extend(self._format_list(elist)) # The exception info should be a single entry in the list. lines = ''.join(self._format_exception_only(etype, value)) out_list.append(lines) # Note: this code originally read: ## for line in lines[:-1]: ## out_list.append(" "+line) ## out_list.append(lines[-1]) # This means it was indenting everything but the last line by a little # bit. I've disabled this for now, but if we see ugliness somewhre we # can restore it. return out_list
python
{ "resource": "" }
q279856
ListTB._format_list
test
def _format_list(self, extracted_list): """Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. Lifted almost verbatim from traceback.py """ Colors = self.Colors list = [] for filename, lineno, name, line in extracted_list[:-1]: item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \ (Colors.filename, filename, Colors.Normal, Colors.lineno, lineno, Colors.Normal, Colors.name, name, Colors.Normal) if line: item += ' %s\n' % line.strip() list.append(item) # Emphasize the last entry filename, lineno, name, line = extracted_list[-1] item = '%s File %s"%s"%s, line %s%d%s, in %s%s%s%s\n' % \ (Colors.normalEm, Colors.filenameEm, filename, Colors.normalEm, Colors.linenoEm, lineno, Colors.normalEm, Colors.nameEm, name, Colors.normalEm, Colors.Normal) if line: item += '%s %s%s\n' % (Colors.line, line.strip(), Colors.Normal) list.append(item) #from pprint import pformat; print 'LISTTB', pformat(list) # dbg return list
python
{ "resource": "" }
q279857
ListTB._format_exception_only
test
def _format_exception_only(self, etype, value): """Format the exception part of a traceback. The arguments are the exception type and value such as given by sys.exc_info()[:2]. The return value is a list of strings, each ending in a newline. Normally, the list contains a single string; however, for SyntaxError exceptions, it contains several lines that (when printed) display detailed information about where the syntax error occurred. The message indicating which exception occurred is the always last string in the list. Also lifted nearly verbatim from traceback.py """ have_filedata = False Colors = self.Colors list = [] stype = Colors.excName + etype.__name__ + Colors.Normal if value is None: # Not sure if this can still happen in Python 2.6 and above list.append( str(stype) + '\n') else: if etype is SyntaxError: have_filedata = True #print 'filename is',filename # dbg if not value.filename: value.filename = "<string>" list.append('%s File %s"%s"%s, line %s%d%s\n' % \ (Colors.normalEm, Colors.filenameEm, value.filename, Colors.normalEm, Colors.linenoEm, value.lineno, Colors.Normal )) if value.text is not None: i = 0 while i < len(value.text) and value.text[i].isspace(): i += 1 list.append('%s %s%s\n' % (Colors.line, value.text.strip(), Colors.Normal)) if value.offset is not None: s = ' ' for c in value.text[i:value.offset-1]: if c.isspace(): s += c else: s += ' ' list.append('%s%s^%s\n' % (Colors.caret, s, Colors.Normal) ) try: s = value.msg except Exception: s = self._some_str(value) if s: list.append('%s%s:%s %s\n' % (str(stype), Colors.excName, Colors.Normal, s)) else: list.append('%s\n' % str(stype)) # sync with user hooks if have_filedata: ipinst = ipapi.get() if ipinst is not None: ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0) return list
python
{ "resource": "" }
q279858
ListTB.show_exception_only
test
def show_exception_only(self, etype, evalue): """Only print the exception type and message, without a traceback. Parameters ---------- etype : exception type value : exception value """ # This method needs to use __call__ from *this* class, not the one from # a subclass whose signature or behavior may be different ostream = self.ostream ostream.flush() ostream.write('\n'.join(self.get_exception_only(etype, evalue))) ostream.flush()
python
{ "resource": "" }
q279859
VerboseTB.debugger
test
def debugger(self,force=False): """Call up the pdb debugger if desired, always clean up the tb reference. Keywords: - force(False): by default, this routine checks the instance call_pdb flag and does not actually invoke the debugger if the flag is false. The 'force' option forces the debugger to activate even if the flag is false. If the call_pdb flag is set, the pdb interactive debugger is invoked. In all cases, the self.tb reference to the current traceback is deleted to prevent lingering references which hamper memory management. Note that each call to pdb() does an 'import readline', so if your app requires a special setup for the readline completers, you'll have to fix that by hand after invoking the exception handler.""" if force or self.call_pdb: if self.pdb is None: self.pdb = debugger.Pdb( self.color_scheme_table.active_scheme_name) # the system displayhook may have changed, restore the original # for pdb display_trap = DisplayTrap(hook=sys.__displayhook__) with display_trap: self.pdb.reset() # Find the right frame so we don't pop up inside ipython itself if hasattr(self,'tb') and self.tb is not None: etb = self.tb else: etb = self.tb = sys.last_traceback while self.tb is not None and self.tb.tb_next is not None: self.tb = self.tb.tb_next if etb and etb.tb_next: etb = etb.tb_next self.pdb.botframe = etb.tb_frame self.pdb.interaction(self.tb.tb_frame, self.tb) if hasattr(self,'tb'): del self.tb
python
{ "resource": "" }
q279860
FormattedTB.set_mode
test
def set_mode(self,mode=None): """Switch to the desired mode. If mode is not specified, cycles through the available modes.""" if not mode: new_idx = ( self.valid_modes.index(self.mode) + 1 ) % \ len(self.valid_modes) self.mode = self.valid_modes[new_idx] elif mode not in self.valid_modes: raise ValueError, 'Unrecognized mode in FormattedTB: <'+mode+'>\n'\ 'Valid modes: '+str(self.valid_modes) else: self.mode = mode # include variable details only in 'Verbose' mode self.include_vars = (self.mode == self.valid_modes[2]) # Set the join character for generating text tracebacks self.tb_join_char = self._join_chars[self.mode]
python
{ "resource": "" }
q279861
group_required
test
def group_required(group, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME, skip_superuser=True): """ View decorator for requiring a user group. """ def decorator(view_func): @login_required(redirect_field_name=redirect_field_name, login_url=login_url) def _wrapped_view(request, *args, **kwargs): if not (request.user.is_superuser and skip_superuser): if request.user.groups.filter(name=group).count() == 0: raise PermissionDenied return view_func(request, *args, **kwargs) return _wrapped_view return decorator
python
{ "resource": "" }
q279862
ensure_fromlist
test
def ensure_fromlist(mod, fromlist, buf, recursive): """Handle 'from module import a, b, c' imports.""" if not hasattr(mod, '__path__'): return for item in fromlist: if not hasattr(item, 'rindex'): raise TypeError("Item in ``from list'' not a string") if item == '*': if recursive: continue # avoid endless recursion try: all = mod.__all__ except AttributeError: pass else: ret = ensure_fromlist(mod, all, buf, 1) if not ret: return 0 elif not hasattr(mod, item): import_submodule(mod, item, buf + '.' + item)
python
{ "resource": "" }
q279863
CodeBuilder.add_line
test
def add_line(self, line): """Add a line of source to the code. Don't include indentations or newlines. """ self.code.append(" " * self.indent_amount) self.code.append(line) self.code.append("\n")
python
{ "resource": "" }
q279864
CodeBuilder.add_section
test
def add_section(self): """Add a section, a sub-CodeBuilder.""" sect = CodeBuilder(self.indent_amount) self.code.append(sect) return sect
python
{ "resource": "" }
q279865
CodeBuilder.get_function
test
def get_function(self, fn_name): """Compile the code, and return the function `fn_name`.""" assert self.indent_amount == 0 g = {} code_text = str(self) exec(code_text, g) return g[fn_name]
python
{ "resource": "" }
q279866
Templite.expr_code
test
def expr_code(self, expr): """Generate a Python expression for `expr`.""" if "|" in expr: pipes = expr.split("|") code = self.expr_code(pipes[0]) for func in pipes[1:]: self.all_vars.add(func) code = "c_%s(%s)" % (func, code) elif "." in expr: dots = expr.split(".") code = self.expr_code(dots[0]) args = [repr(d) for d in dots[1:]] code = "dot(%s, %s)" % (code, ", ".join(args)) else: self.all_vars.add(expr) code = "c_%s" % expr return code
python
{ "resource": "" }
q279867
Templite.render
test
def render(self, context=None): """Render this template by applying it to `context`. `context` is a dictionary of values to use in this rendering. """ # Make the complete context we'll use. ctx = dict(self.context) if context: ctx.update(context) return self.render_function(ctx, self.do_dots)
python
{ "resource": "" }
q279868
Templite.do_dots
test
def do_dots(self, value, *dots): """Evaluate dotted expressions at runtime.""" for dot in dots: try: value = getattr(value, dot) except AttributeError: value = value[dot] if hasattr(value, '__call__'): value = value() return value
python
{ "resource": "" }
q279869
render_template
test
def render_template(tpl, context): ''' A shortcut function to render a partial template with context and return the output. ''' templates = [tpl] if type(tpl) != list else tpl tpl_instance = None for tpl in templates: try: tpl_instance = template.loader.get_template(tpl) break except template.TemplateDoesNotExist: pass if not tpl_instance: raise Exception('Template does not exist: ' + templates[-1]) return tpl_instance.render(template.Context(context))
python
{ "resource": "" }
q279870
DisplayFormatter._formatters_default
test
def _formatters_default(self): """Activate the default formatters.""" formatter_classes = [ PlainTextFormatter, HTMLFormatter, SVGFormatter, PNGFormatter, JPEGFormatter, LatexFormatter, JSONFormatter, JavascriptFormatter ] d = {} for cls in formatter_classes: f = cls(config=self.config) d[f.format_type] = f return d
python
{ "resource": "" }
q279871
BaseFormatter.for_type
test
def for_type(self, typ, func): """Add a format function for a given type. Parameters ----------- typ : class The class of the object that will be formatted using `func`. func : callable The callable that will be called to compute the format data. The call signature of this function is simple, it must take the object to be formatted and return the raw data for the given format. Subclasses may use a different call signature for the `func` argument. """ oldfunc = self.type_printers.get(typ, None) if func is not None: # To support easy restoration of old printers, we need to ignore # Nones. self.type_printers[typ] = func return oldfunc
python
{ "resource": "" }
q279872
BaseFormatter.for_type_by_name
test
def for_type_by_name(self, type_module, type_name, func): """Add a format function for a type specified by the full dotted module and name of the type, rather than the type of the object. Parameters ---------- type_module : str The full dotted name of the module the type is defined in, like ``numpy``. type_name : str The name of the type (the class name), like ``dtype`` func : callable The callable that will be called to compute the format data. The call signature of this function is simple, it must take the object to be formatted and return the raw data for the given format. Subclasses may use a different call signature for the `func` argument. """ key = (type_module, type_name) oldfunc = self.deferred_printers.get(key, None) if func is not None: # To support easy restoration of old printers, we need to ignore # Nones. self.deferred_printers[key] = func return oldfunc
python
{ "resource": "" }
q279873
PlainTextFormatter._float_precision_changed
test
def _float_precision_changed(self, name, old, new): """float_precision changed, set float_format accordingly. float_precision can be set by int or str. This will set float_format, after interpreting input. If numpy has been imported, numpy print precision will also be set. integer `n` sets format to '%.nf', otherwise, format set directly. An empty string returns to defaults (repr for float, 8 for numpy). This parameter can be set via the '%precision' magic. """ if '%' in new: # got explicit format string fmt = new try: fmt%3.14159 except Exception: raise ValueError("Precision must be int or format string, not %r"%new) elif new: # otherwise, should be an int try: i = int(new) assert i >= 0 except ValueError: raise ValueError("Precision must be int or format string, not %r"%new) except AssertionError: raise ValueError("int precision must be non-negative, not %r"%i) fmt = '%%.%if'%i if 'numpy' in sys.modules: # set numpy precision if it has been imported import numpy numpy.set_printoptions(precision=i) else: # default back to repr fmt = '%r' if 'numpy' in sys.modules: import numpy # numpy default is 8 numpy.set_printoptions(precision=8) self.float_format = fmt
python
{ "resource": "" }
q279874
user_config_files
test
def user_config_files(): """Return path to any existing user config files """ return filter(os.path.exists, map(os.path.expanduser, config_files))
python
{ "resource": "" }
q279875
Config.configure
test
def configure(self, argv=None, doc=None): """Configure the nose running environment. Execute configure before collecting tests with nose.TestCollector to enable output capture and other features. """ env = self.env if argv is None: argv = sys.argv cfg_files = getattr(self, 'files', []) options, args = self._parseArgs(argv, cfg_files) # If -c --config has been specified on command line, # load those config files and reparse if getattr(options, 'files', []): options, args = self._parseArgs(argv, options.files) self.options = options if args: self.testNames = args if options.testNames is not None: self.testNames.extend(tolist(options.testNames)) if options.py3where is not None: if sys.version_info >= (3,): options.where = options.py3where # `where` is an append action, so it can't have a default value # in the parser, or that default will always be in the list if not options.where: options.where = env.get('NOSE_WHERE', None) # include and exclude also if not options.ignoreFiles: options.ignoreFiles = env.get('NOSE_IGNORE_FILES', []) if not options.include: options.include = env.get('NOSE_INCLUDE', []) if not options.exclude: options.exclude = env.get('NOSE_EXCLUDE', []) self.addPaths = options.addPaths self.stopOnError = options.stopOnError self.verbosity = options.verbosity self.includeExe = options.includeExe self.traverseNamespace = options.traverseNamespace self.debug = options.debug self.debugLog = options.debugLog self.loggingConfig = options.loggingConfig self.firstPackageWins = options.firstPackageWins self.configureLogging() if options.where is not None: self.configureWhere(options.where) if options.testMatch: self.testMatch = re.compile(options.testMatch) if options.ignoreFiles: self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles)) log.info("Ignoring files matching %s", options.ignoreFiles) else: log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings) if options.include: self.include = map(re.compile, tolist(options.include)) log.info("Including tests matching %s", options.include) if options.exclude: self.exclude = map(re.compile, tolist(options.exclude)) log.info("Excluding tests matching %s", options.exclude) # When listing plugins we don't want to run them if not options.showPlugins: self.plugins.configure(options, self) self.plugins.begin()
python
{ "resource": "" }
q279876
Config.configureLogging
test
def configureLogging(self): """Configure logging for nose, or optionally other packages. Any logger name may be set with the debug option, and that logger will be set to debug level and be assigned the same handler as the nose loggers, unless it already has a handler. """ if self.loggingConfig: from logging.config import fileConfig fileConfig(self.loggingConfig) return format = logging.Formatter('%(name)s: %(levelname)s: %(message)s') if self.debugLog: handler = logging.FileHandler(self.debugLog) else: handler = logging.StreamHandler(self.logStream) handler.setFormatter(format) logger = logging.getLogger('nose') logger.propagate = 0 # only add our default handler if there isn't already one there # this avoids annoying duplicate log messages. if handler not in logger.handlers: logger.addHandler(handler) # default level lvl = logging.WARNING if self.verbosity >= 5: lvl = 0 elif self.verbosity >= 4: lvl = logging.DEBUG elif self.verbosity >= 3: lvl = logging.INFO logger.setLevel(lvl) # individual overrides if self.debug: # no blanks debug_loggers = [ name for name in self.debug.split(',') if name ] for logger_name in debug_loggers: l = logging.getLogger(logger_name) l.setLevel(logging.DEBUG) if not l.handlers and not logger_name.startswith('nose'): l.addHandler(handler)
python
{ "resource": "" }
q279877
Config.configureWhere
test
def configureWhere(self, where): """Configure the working directory or directories for the test run. """ from nose.importer import add_path self.workingDir = None where = tolist(where) warned = False for path in where: if not self.workingDir: abs_path = absdir(path) if abs_path is None: raise ValueError("Working directory %s not found, or " "not a directory" % path) log.info("Set working dir to %s", abs_path) self.workingDir = abs_path if self.addPaths and \ os.path.exists(os.path.join(abs_path, '__init__.py')): log.info("Working directory %s is a package; " "adding to sys.path" % abs_path) add_path(abs_path) continue if not warned: warn("Use of multiple -w arguments is deprecated and " "support may be removed in a future release. You can " "get the same behavior by passing directories without " "the -w argument on the command line, or by using the " "--tests argument in a configuration file.", DeprecationWarning) self.testNames.append(path)
python
{ "resource": "" }
q279878
page_dumb
test
def page_dumb(strng, start=0, screen_lines=25): """Very dumb 'pager' in Python, for when nothing else works. Only moves forward, same interface as page(), except for pager_cmd and mode.""" out_ln = strng.splitlines()[start:] screens = chop(out_ln,screen_lines-1) if len(screens) == 1: print >>io.stdout, os.linesep.join(screens[0]) else: last_escape = "" for scr in screens[0:-1]: hunk = os.linesep.join(scr) print >>io.stdout, last_escape + hunk if not page_more(): return esc_list = esc_re.findall(hunk) if len(esc_list) > 0: last_escape = esc_list[-1] print >>io.stdout, last_escape + os.linesep.join(screens[-1])
python
{ "resource": "" }
q279879
page
test
def page(strng, start=0, screen_lines=0, pager_cmd=None): """Print a string, piping through a pager after a certain length. The screen_lines parameter specifies the number of *usable* lines of your terminal screen (total lines minus lines you need to reserve to show other information). If you set screen_lines to a number <=0, page() will try to auto-determine your screen size and will only use up to (screen_size+screen_lines) for printing, paging after that. That is, if you want auto-detection but need to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for auto-detection without any lines reserved simply use screen_lines = 0. If a string won't fit in the allowed lines, it is sent through the specified pager command. If none given, look for PAGER in the environment, and ultimately default to less. If no system pager works, the string is sent through a 'dumb pager' written in python, very simplistic. """ # Some routines may auto-compute start offsets incorrectly and pass a # negative value. Offset to 0 for robustness. start = max(0, start) # first, try the hook ip = ipapi.get() if ip: try: ip.hooks.show_in_pager(strng) return except TryNext: pass # Ugly kludge, but calling curses.initscr() flat out crashes in emacs TERM = os.environ.get('TERM','dumb') if TERM in ['dumb','emacs'] and os.name != 'nt': print strng return # chop off the topmost part of the string we don't want to see str_lines = strng.splitlines()[start:] str_toprint = os.linesep.join(str_lines) num_newlines = len(str_lines) len_str = len(str_toprint) # Dumb heuristics to guesstimate number of on-screen lines the string # takes. Very basic, but good enough for docstrings in reasonable # terminals. If someone later feels like refining it, it's not hard. numlines = max(num_newlines,int(len_str/80)+1) screen_lines_def = get_terminal_size()[1] # auto-determine screen size if screen_lines <= 0: try: screen_lines += _detect_screen_size(use_curses, screen_lines_def) except (TypeError, UnsupportedOperation): print >>io.stdout, str_toprint return #print 'numlines',numlines,'screenlines',screen_lines # dbg if numlines <= screen_lines : #print '*** normal print' # dbg print >>io.stdout, str_toprint else: # Try to open pager and default to internal one if that fails. # All failure modes are tagged as 'retval=1', to match the return # value of a failed system command. If any intermediate attempt # sets retval to 1, at the end we resort to our own page_dumb() pager. pager_cmd = get_pager_cmd(pager_cmd) pager_cmd += ' ' + get_pager_start(pager_cmd,start) if os.name == 'nt': if pager_cmd.startswith('type'): # The default WinXP 'type' command is failing on complex strings. retval = 1 else: tmpname = tempfile.mktemp('.txt') tmpfile = open(tmpname,'wt') tmpfile.write(strng) tmpfile.close() cmd = "%s < %s" % (pager_cmd,tmpname) if os.system(cmd): retval = 1 else: retval = None os.remove(tmpname) else: try: retval = None # if I use popen4, things hang. No idea why. #pager,shell_out = os.popen4(pager_cmd) pager = os.popen(pager_cmd,'w') pager.write(strng) pager.close() retval = pager.close() # success returns None except IOError,msg: # broken pipe when user quits if msg.args == (32,'Broken pipe'): retval = None else: retval = 1 except OSError: # Other strange problems, sometimes seen in Win2k/cygwin retval = 1 if retval is not None: page_dumb(strng,screen_lines=screen_lines)
python
{ "resource": "" }
q279880
page_file
test
def page_file(fname, start=0, pager_cmd=None): """Page a file, using an optional pager command and starting line. """ pager_cmd = get_pager_cmd(pager_cmd) pager_cmd += ' ' + get_pager_start(pager_cmd,start) try: if os.environ['TERM'] in ['emacs','dumb']: raise EnvironmentError system(pager_cmd + ' ' + fname) except: try: if start > 0: start -= 1 page(open(fname).read(),start) except: print 'Unable to show file',`fname`
python
{ "resource": "" }
q279881
get_pager_cmd
test
def get_pager_cmd(pager_cmd=None): """Return a pager command. Makes some attempts at finding an OS-correct one. """ if os.name == 'posix': default_pager_cmd = 'less -r' # -r for color control sequences elif os.name in ['nt','dos']: default_pager_cmd = 'type' if pager_cmd is None: try: pager_cmd = os.environ['PAGER'] except: pager_cmd = default_pager_cmd return pager_cmd
python
{ "resource": "" }
q279882
get_pager_start
test
def get_pager_start(pager, start): """Return the string for paging files with an offset. This is the '+N' argument which less and more (under Unix) accept. """ if pager in ['less','more']: if start: start_string = '+' + str(start) else: start_string = '' else: start_string = '' return start_string
python
{ "resource": "" }
q279883
snip_print
test
def snip_print(str,width = 75,print_full = 0,header = ''): """Print a string snipping the midsection to fit in width. print_full: mode control: - 0: only snip long strings - 1: send to page() directly. - 2: snip long strings and ask for full length viewing with page() Return 1 if snipping was necessary, 0 otherwise.""" if print_full == 1: page(header+str) return 0 print header, if len(str) < width: print str snip = 0 else: whalf = int((width -5)/2) print str[:whalf] + ' <...> ' + str[-whalf:] snip = 1 if snip and print_full == 2: if raw_input(header+' Snipped. View (y/n)? [N]').lower() == 'y': page(str) return snip
python
{ "resource": "" }
q279884
print_basic_unicode
test
def print_basic_unicode(o, p, cycle): """A function to pretty print sympy Basic objects.""" if cycle: return p.text('Basic(...)') out = pretty(o, use_unicode=True) if '\n' in out: p.text(u'\n') p.text(out)
python
{ "resource": "" }
q279885
print_png
test
def print_png(o): """ A function to display sympy expression using inline style LaTeX in PNG. """ s = latex(o, mode='inline') # mathtext does not understand certain latex flags, so we try to replace # them with suitable subs. s = s.replace('\\operatorname','') s = s.replace('\\overline', '\\bar') png = latex_to_png(s) return png
python
{ "resource": "" }
q279886
print_display_png
test
def print_display_png(o): """ A function to display sympy expression using display style LaTeX in PNG. """ s = latex(o, mode='plain') s = s.strip('$') # As matplotlib does not support display style, dvipng backend is # used here. png = latex_to_png('$$%s$$' % s, backend='dvipng') return png
python
{ "resource": "" }
q279887
can_print_latex
test
def can_print_latex(o): """ Return True if type o can be printed with LaTeX. If o is a container type, this is True if and only if every element of o can be printed with LaTeX. """ import sympy if isinstance(o, (list, tuple, set, frozenset)): return all(can_print_latex(i) for i in o) elif isinstance(o, dict): return all((isinstance(i, basestring) or can_print_latex(i)) and can_print_latex(o[i]) for i in o) elif isinstance(o,(sympy.Basic, sympy.matrices.Matrix, int, long, float)): return True return False
python
{ "resource": "" }
q279888
print_latex
test
def print_latex(o): """A function to generate the latex representation of sympy expressions.""" if can_print_latex(o): s = latex(o, mode='plain') s = s.replace('\\dag','\\dagger') s = s.strip('$') return '$$%s$$' % s # Fallback to the string printer return None
python
{ "resource": "" }
q279889
Plugin.add_options
test
def add_options(self, parser, env=None): """Non-camel-case version of func name for backwards compatibility. .. warning :: DEPRECATED: Do not use this method, use :meth:`options <nose.plugins.base.IPluginInterface.options>` instead. """ # FIXME raise deprecation warning if wasn't called by wrapper if env is None: env = os.environ try: self.options(parser, env) self.can_configure = True except OptionConflictError, e: warn("Plugin %s has conflicting option string: %s and will " "be disabled" % (self, e), RuntimeWarning) self.enabled = False self.can_configure = False
python
{ "resource": "" }
q279890
validate_string_list
test
def validate_string_list(lst): """Validate that the input is a list of strings. Raises ValueError if not.""" if not isinstance(lst, list): raise ValueError('input %r must be a list' % lst) for x in lst: if not isinstance(x, basestring): raise ValueError('element %r in list must be a string' % x)
python
{ "resource": "" }
q279891
validate_string_dict
test
def validate_string_dict(dct): """Validate that the input is a dict with string keys and values. Raises ValueError if not.""" for k,v in dct.iteritems(): if not isinstance(k, basestring): raise ValueError('key %r in dict must be a string' % k) if not isinstance(v, basestring): raise ValueError('value %r in dict must be a string' % v)
python
{ "resource": "" }
q279892
ZMQSocketChannel._run_loop
test
def _run_loop(self): """Run my loop, ignoring EINTR events in the poller""" while True: try: self.ioloop.start() except ZMQError as e: if e.errno == errno.EINTR: continue else: raise except Exception: if self._exiting: break else: raise else: break
python
{ "resource": "" }
q279893
ZMQSocketChannel._handle_recv
test
def _handle_recv(self, msg): """callback for stream.on_recv unpacks message, and calls handlers with it. """ ident,smsg = self.session.feed_identities(msg) self.call_handlers(self.session.unserialize(smsg))
python
{ "resource": "" }
q279894
ShellSocketChannel.execute
test
def execute(self, code, silent=False, user_variables=None, user_expressions=None, allow_stdin=None): """Execute code in the kernel. Parameters ---------- code : str A string of Python code. silent : bool, optional (default False) If set, the kernel will execute the code as quietly possible. user_variables : list, optional A list of variable names to pull from the user's namespace. They will come back as a dict with these names as keys and their :func:`repr` as values. user_expressions : dict, optional A dict with string keys and to pull from the user's namespace. They will come back as a dict with these names as keys and their :func:`repr` as values. allow_stdin : bool, optional Flag for A dict with string keys and to pull from the user's namespace. They will come back as a dict with these names as keys and their :func:`repr` as values. Returns ------- The msg_id of the message sent. """ if user_variables is None: user_variables = [] if user_expressions is None: user_expressions = {} if allow_stdin is None: allow_stdin = self.allow_stdin # Don't waste network traffic if inputs are invalid if not isinstance(code, basestring): raise ValueError('code %r must be a string' % code) validate_string_list(user_variables) validate_string_dict(user_expressions) # Create class for content/msg creation. Related to, but possibly # not in Session. content = dict(code=code, silent=silent, user_variables=user_variables, user_expressions=user_expressions, allow_stdin=allow_stdin, ) msg = self.session.msg('execute_request', content) self._queue_send(msg) return msg['header']['msg_id']
python
{ "resource": "" }
q279895
ShellSocketChannel.complete
test
def complete(self, text, line, cursor_pos, block=None): """Tab complete text in the kernel's namespace. Parameters ---------- text : str The text to complete. line : str The full line of text that is the surrounding context for the text to complete. cursor_pos : int The position of the cursor in the line where the completion was requested. block : str, optional The full block of code in which the completion is being requested. Returns ------- The msg_id of the message sent. """ content = dict(text=text, line=line, block=block, cursor_pos=cursor_pos) msg = self.session.msg('complete_request', content) self._queue_send(msg) return msg['header']['msg_id']
python
{ "resource": "" }
q279896
ShellSocketChannel.object_info
test
def object_info(self, oname, detail_level=0): """Get metadata information about an object. Parameters ---------- oname : str A string specifying the object name. detail_level : int, optional The level of detail for the introspection (0-2) Returns ------- The msg_id of the message sent. """ content = dict(oname=oname, detail_level=detail_level) msg = self.session.msg('object_info_request', content) self._queue_send(msg) return msg['header']['msg_id']
python
{ "resource": "" }
q279897
ShellSocketChannel.history
test
def history(self, raw=True, output=False, hist_access_type='range', **kwargs): """Get entries from the history list. Parameters ---------- raw : bool If True, return the raw input. output : bool If True, then return the output as well. hist_access_type : str 'range' (fill in session, start and stop params), 'tail' (fill in n) or 'search' (fill in pattern param). session : int For a range request, the session from which to get lines. Session numbers are positive integers; negative ones count back from the current session. start : int The first line number of a history range. stop : int The final (excluded) line number of a history range. n : int The number of lines of history to get for a tail request. pattern : str The glob-syntax pattern for a search request. Returns ------- The msg_id of the message sent. """ content = dict(raw=raw, output=output, hist_access_type=hist_access_type, **kwargs) msg = self.session.msg('history_request', content) self._queue_send(msg) return msg['header']['msg_id']
python
{ "resource": "" }
q279898
ShellSocketChannel.shutdown
test
def shutdown(self, restart=False): """Request an immediate kernel shutdown. Upon receipt of the (empty) reply, client code can safely assume that the kernel has shut down and it's safe to forcefully terminate it if it's still alive. The kernel will send the reply via a function registered with Python's atexit module, ensuring it's truly done as the kernel is done with all normal operation. """ # Send quit message to kernel. Once we implement kernel-side setattr, # this should probably be done that way, but for now this will do. msg = self.session.msg('shutdown_request', {'restart':restart}) self._queue_send(msg) return msg['header']['msg_id']
python
{ "resource": "" }
q279899
SubSocketChannel.flush
test
def flush(self, timeout=1.0): """Immediately processes all pending messages on the SUB channel. Callers should use this method to ensure that :method:`call_handlers` has been called for all messages that have been received on the 0MQ SUB socket of this channel. This method is thread safe. Parameters ---------- timeout : float, optional The maximum amount of time to spend flushing, in seconds. The default is one second. """ # We do the IOLoop callback process twice to ensure that the IOLoop # gets to perform at least one full poll. stop_time = time.time() + timeout for i in xrange(2): self._flushed = False self.ioloop.add_callback(self._flush) while not self._flushed and time.time() < stop_time: time.sleep(0.01)
python
{ "resource": "" }