_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q279800
|
abs_file
|
test
|
def abs_file(filename):
"""Return the absolute normalized form of `filename`."""
path = os.path.expandvars(os.path.expanduser(filename))
|
python
|
{
"resource": ""
}
|
q279801
|
prep_patterns
|
test
|
def prep_patterns(patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
|
python
|
{
"resource": ""
}
|
q279802
|
sep
|
test
|
def sep(s):
"""Find the path separator used in this string, or os.sep if none."""
sep_match = re.search(r"[\\/]", s)
if sep_match:
the_sep
|
python
|
{
"resource": ""
}
|
q279803
|
find_python_files
|
test
|
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but subdirectories are checked for a __init__.py to be sure we only
find the importable files.
"""
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
|
python
|
{
"resource": ""
}
|
q279804
|
FileLocator.relative_filename
|
test
|
def relative_filename(self, filename):
"""Return the relative form of `filename`.
The filename will be relative to the current directory when the
`FileLocator` was constructed.
"""
fnorm = os.path.normcase(filename)
|
python
|
{
"resource": ""
}
|
q279805
|
FileLocator.canonical_filename
|
test
|
def canonical_filename(self, filename):
"""Return a canonical filename for `filename`.
An absolute path with no redundant components and normalized case.
"""
if filename not in self.canonical_filename_cache:
if not os.path.isabs(filename):
for path in [os.curdir] + sys.path:
if path is None:
continue
|
python
|
{
"resource": ""
}
|
q279806
|
FileLocator.get_zip_data
|
test
|
def get_zip_data(self, filename):
"""Get data from `filename` if it is a zip file path.
Returns the string data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
import zipimport
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
|
python
|
{
"resource": ""
}
|
q279807
|
TreeMatcher.match
|
test
|
def match(self, fpath):
"""Does `fpath` indicate a file in one of our trees?"""
for d in self.dirs:
if fpath.startswith(d):
if
|
python
|
{
"resource": ""
}
|
q279808
|
FnmatchMatcher.match
|
test
|
def match(self, fpath):
"""Does `fpath` match one of our filename patterns?"""
for pat in self.pats:
|
python
|
{
"resource": ""
}
|
q279809
|
PathAliases.map
|
test
|
def map(self, path):
"""Map `path` through the aliases.
`path` is checked against all of the patterns. The first pattern to
match is used to replace the root of the path with the result root.
Only one pattern is ever used. If no patterns match, `path` is
returned unchanged.
The separator style in the result is made to match that of the result
in the alias.
"""
for regex, result, pattern_sep, result_sep in self.aliases:
m = regex.match(path)
|
python
|
{
"resource": ""
}
|
q279810
|
loop_qt4
|
test
|
def loop_qt4(kernel):
"""Start a kernel with PyQt4 event loop integration."""
from IPython.external.qt_for_kernel import QtCore
from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4
kernel.app = get_app_qt4([" "])
kernel.app.setQuitOnLastWindowClosed(False)
|
python
|
{
"resource": ""
}
|
q279811
|
loop_wx
|
test
|
def loop_wx(kernel):
"""Start a kernel with wx event loop support."""
import wx
from IPython.lib.guisupport import start_event_loop_wx
doi = kernel.do_one_iteration
# Wx uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
# We make the Frame hidden when we create it in the main app below.
class TimerFrame(wx.Frame):
def __init__(self, func):
wx.Frame.__init__(self, None, -1)
self.timer = wx.Timer(self)
# Units for the timer are in milliseconds
self.timer.Start(poll_interval)
self.Bind(wx.EVT_TIMER, self.on_timer)
self.func = func
def on_timer(self, event):
self.func()
# We need a custom wx.App to create our Frame subclass that has the
# wx.Timer to drive the ZMQ event loop.
class IPWxApp(wx.App):
def OnInit(self):
self.frame = TimerFrame(doi)
self.frame.Show(False)
return True
|
python
|
{
"resource": ""
}
|
q279812
|
loop_tk
|
test
|
def loop_tk(kernel):
"""Start a kernel with the Tk event loop."""
import Tkinter
doi = kernel.do_one_iteration
# Tk uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# For Tkinter, we create a Tk object and call its withdraw method.
class Timer(object):
def __init__(self, func):
self.app = Tkinter.Tk()
self.app.withdraw()
self.func = func
def on_timer(self):
|
python
|
{
"resource": ""
}
|
q279813
|
loop_gtk
|
test
|
def loop_gtk(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtkembed import GTKEmbed
|
python
|
{
"resource": ""
}
|
q279814
|
loop_cocoa
|
test
|
def loop_cocoa(kernel):
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
"""
import matplotlib
if matplotlib.__version__ < '1.1.0':
kernel.log.warn(
"MacOSX backend in matplotlib %s doesn't have a Timer, "
"falling back on Tk for CFRunLoop integration. Note that "
"even this won't work if Tk is linked against X11 instead of "
"Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, "
"you must use matplotlib >= 1.1.0, or a native libtk."
)
return loop_tk(kernel)
from matplotlib.backends.backend_macosx import TimerMac, show
# scale interval for sec->ms
poll_interval = int(1000*kernel._poll_interval)
real_excepthook = sys.excepthook
def handle_int(etype, value, tb):
"""don't let KeyboardInterrupts look like crashes"""
if etype is KeyboardInterrupt:
io.raw_print("KeyboardInterrupt caught in CFRunLoop")
else:
real_excepthook(etype, value, tb)
# add doi() as a Timer to the CFRunLoop
def doi():
# restore excepthook during IPython code
sys.excepthook = real_excepthook
kernel.do_one_iteration()
# and back:
sys.excepthook = handle_int
t = TimerMac(poll_interval)
t.add_callback(doi)
t.start()
# but still need a Poller for when there are no active windows,
# during which time mainloop() returns immediately
poller = zmq.Poller()
if kernel.control_stream:
|
python
|
{
"resource": ""
}
|
q279815
|
enable_gui
|
test
|
def enable_gui(gui, kernel=None):
"""Enable integration with a given GUI"""
if gui not in loop_map:
raise ValueError("GUI %r not supported" % gui)
if kernel is None:
if Application.initialized():
kernel = getattr(Application.instance(), 'kernel', None)
|
python
|
{
"resource": ""
}
|
q279816
|
GOE
|
test
|
def GOE(N):
"""Creates an NxN element of the Gaussian
|
python
|
{
"resource": ""
}
|
q279817
|
center_eigenvalue_diff
|
test
|
def center_eigenvalue_diff(mat):
"""Compute the eigvals of mat and then find the center eigval
|
python
|
{
"resource": ""
}
|
q279818
|
ensemble_diffs
|
test
|
def ensemble_diffs(num, N):
"""Return num eigenvalue diffs for the NxN GOE ensemble."""
diffs = np.empty(num)
for i in xrange(num):
|
python
|
{
"resource": ""
}
|
q279819
|
StepItem.init
|
test
|
def init(self, ctxt, step_addr):
"""
Initialize the item. This calls the class constructor with the
appropriate arguments and returns the initialized object.
|
python
|
{
"resource": ""
}
|
q279820
|
Step.parse_file
|
test
|
def parse_file(cls, ctxt, fname, key=None, step_addr=None):
"""
Parse a YAML file containing test steps.
:param ctxt: The context object.
:param fname: The name of the file to parse.
:param key: An optional dictionary key. If specified, the
file must be a YAML dictionary, and the referenced
value will be interpreted as a list of steps. If
not provided, the file must be a YAML list, which
will be interpreted as the list of steps.
:param step_addr: The address of the step in the test
configuration. This may be used in the case
of includes, for instance.
:returns: A list of ``Step`` objects.
"""
# Load the YAML file
try:
with open(fname) as f:
step_data = yaml.load(f)
except Exception as exc:
raise ConfigError(
'Failed to read file "%s": %s' % (fname, exc),
step_addr,
)
# Do we have a key?
if key is not None:
if (not isinstance(step_data, collections.Mapping) or
key not in step_data):
raise ConfigError(
'Bad step configuration file "%s": expecting dictionary '
'with key "%s"' % (fname, key),
step_addr,
)
|
python
|
{
"resource": ""
}
|
q279821
|
Step.parse_step
|
test
|
def parse_step(cls, ctxt, step_addr, step_conf):
"""
Parse a step dictionary.
:param ctxt: The context object.
:param step_addr: The address of the step in the test
configuration.
:param step_conf: The description of the step. This may be a
scalar string or a dictionary.
:returns: A list of steps.
"""
# Make sure the step makes sense
if isinstance(step_conf, six.string_types):
# Convert string to a dict for uniformity of processing
step_conf = {step_conf: None}
elif not isinstance(step_conf, collections.Mapping):
raise ConfigError(
'Unable to parse step configuration: expecting string or '
'dictionary, not "%s"' % step_conf.__class__.__name__,
step_addr,
)
# Parse the configuration into the action and modifier classes
# and the configuration to apply to each
action_item = None
mod_items = {}
kwargs = {} # extra args for Step.__init__()
for key, key_conf in step_conf.items():
# Handle special keys first
if key in cls.schemas:
# Validate the key
utils.schema_validate(key_conf, cls.schemas[key], ConfigError,
key, step_addr=step_addr)
# Save the value
kwargs[key] = key_conf
# Is it an action?
elif key in entry.points[NAMESPACE_ACTION]:
if action_item is not None:
raise ConfigError(
'Bad step configuration: action "%s" specified, '
'but action "%s" already processed' %
(key, action_item.name),
|
python
|
{
"resource": ""
}
|
q279822
|
BaseIPythonApplication.init_crash_handler
|
test
|
def init_crash_handler(self):
"""Create a crash handler, typically setting sys.excepthook to it."""
self.crash_handler = self.crash_handler_class(self)
sys.excepthook = self.excepthook
|
python
|
{
"resource": ""
}
|
q279823
|
BaseIPythonApplication.load_config_file
|
test
|
def load_config_file(self, suppress_errors=True):
"""Load the config file.
By default, errors in loading config are handled, and a warning
printed on screen. For testing, the suppress_errors option is set
to False, so errors will make tests fail.
"""
self.log.debug("Searching path %s for config files", self.config_file_paths)
base_config = 'ipython_config.py'
self.log.debug("Attempting to load config file: %s" %
base_config)
try:
Application.load_config_file(
self,
base_config,
path=self.config_file_paths
)
except ConfigFileNotFound:
# ignore errors loading parent
self.log.debug("Config file %s not found", base_config)
pass
if self.config_file_name == base_config:
# don't load secondary config
return
self.log.debug("Attempting to load config file: %s" %
self.config_file_name)
try:
|
python
|
{
"resource": ""
}
|
q279824
|
BaseIPythonApplication.init_profile_dir
|
test
|
def init_profile_dir(self):
"""initialize the profile dir"""
try:
# location explicitly specified:
location = self.config.ProfileDir.location
except AttributeError:
# location not specified, find by profile name
try:
p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
# not found, maybe create it (always create default profile)
if self.auto_create or self.profile=='default':
try:
p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile: %r"%self.profile)
self.exit(1)
else:
self.log.info("Created profile dir: %r"%p.location)
else:
self.log.fatal("Profile %r not found."%self.profile)
self.exit(1)
else:
self.log.info("Using existing profile dir: %r"%p.location)
else:
# location is fully specified
try:
p = ProfileDir.find_profile_dir(location, self.config)
except ProfileDirError:
# not found, maybe create it
if self.auto_create:
try:
|
python
|
{
"resource": ""
}
|
q279825
|
BaseIPythonApplication.stage_default_config_file
|
test
|
def stage_default_config_file(self):
"""auto generate default config file, and stage it into the profile."""
s = self.generate_config_file()
fname = os.path.join(self.profile_dir.location, self.config_file_name)
if self.overwrite or not os.path.exists(fname):
|
python
|
{
"resource": ""
}
|
q279826
|
CoverageData.write
|
test
|
def write(self, suffix=None):
"""Write the collected coverage data to a file.
`suffix` is a suffix to append to the base file name. This can be used
for multiple or parallel execution, so that many coverage data files
can exist simultaneously. A dot will be used to join the base name and
|
python
|
{
"resource": ""
}
|
q279827
|
CoverageData.erase
|
test
|
def erase(self):
"""Erase the data, both in this object, and from its file storage."""
if self.use_file:
if self.filename:
|
python
|
{
"resource": ""
}
|
q279828
|
CoverageData.line_data
|
test
|
def line_data(self):
"""Return the map from filenames to lists of line numbers executed."""
return dict(
|
python
|
{
"resource": ""
}
|
q279829
|
CoverageData.arc_data
|
test
|
def arc_data(self):
"""Return the map from filenames to lists of line number pairs."""
return dict(
|
python
|
{
"resource": ""
}
|
q279830
|
CoverageData.write_file
|
test
|
def write_file(self, filename):
"""Write the coverage data to `filename`."""
# Create the file data.
data = {}
data['lines'] = self.line_data()
arcs = self.arc_data()
if arcs:
data['arcs'] = arcs
if self.collector:
data['collector'] = self.collector
|
python
|
{
"resource": ""
}
|
q279831
|
CoverageData.read_file
|
test
|
def read_file(self, filename):
"""Read the coverage data from `filename`."""
|
python
|
{
"resource": ""
}
|
q279832
|
CoverageData.raw_data
|
test
|
def raw_data(self, filename):
"""Return the raw pickled data from `filename`."""
if self.debug and self.debug.should('dataio'):
|
python
|
{
"resource": ""
}
|
q279833
|
CoverageData._read_file
|
test
|
def _read_file(self, filename):
"""Return the stored coverage data from the given file.
Returns two values, suitable for assigning to `self.lines` and
`self.arcs`.
"""
lines = {}
arcs = {}
try:
data = self.raw_data(filename)
if isinstance(data, dict):
|
python
|
{
"resource": ""
}
|
q279834
|
CoverageData.combine_parallel_data
|
test
|
def combine_parallel_data(self, aliases=None):
"""Combine a number of data files together.
Treat `self.filename` as a file prefix, and combine the data from all
of the data files starting with that prefix plus a dot.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
"""
aliases = aliases or PathAliases()
data_dir, local = os.path.split(self.filename)
localdot = local + '.'
for f in os.listdir(data_dir or '.'):
if f.startswith(localdot):
full_path = os.path.join(data_dir, f)
|
python
|
{
"resource": ""
}
|
q279835
|
CoverageData.add_line_data
|
test
|
def add_line_data(self, line_data):
"""Add executed line data.
`line_data` is { filename: { lineno: None, ... }, ...}
"""
|
python
|
{
"resource": ""
}
|
q279836
|
CoverageData.add_arc_data
|
test
|
def add_arc_data(self, arc_data):
"""Add measured arc data.
`arc_data` is { filename: { (l1,l2): None, ... }, ...}
"""
|
python
|
{
"resource": ""
}
|
q279837
|
CoverageData.add_to_hash
|
test
|
def add_to_hash(self, filename, hasher):
"""Contribute `filename`'s data to the Md5Hash `hasher`."""
|
python
|
{
"resource": ""
}
|
q279838
|
CoverageData.summary
|
test
|
def summary(self, fullpath=False):
"""Return a dict summarizing the coverage data.
Keys are based on the filenames, and values are the number of executed
lines. If `fullpath` is true, then the keys are the full pathnames of
the files, otherwise they are the basenames of the files.
"""
summ = {}
if fullpath:
filename_fn = lambda f: f
|
python
|
{
"resource": ""
}
|
q279839
|
get_pasted_lines
|
test
|
def get_pasted_lines(sentinel, l_input=py3compat.input):
""" Yield pasted lines until the user enters the given sentinel value.
"""
print "Pasting code; enter '%s' alone on the line to stop or
|
python
|
{
"resource": ""
}
|
q279840
|
TerminalInteractiveShell.mainloop
|
test
|
def mainloop(self, display_banner=None):
"""Start the mainloop.
If an optional banner argument is given, it will override the
internally created default banner.
"""
with nested(self.builtin_trap, self.display_trap):
while 1:
try:
self.interact(display_banner=display_banner)
#self.interact_with_readline()
# XXX for testing of a readline-decoupled repl loop, call
# interact_with_readline
|
python
|
{
"resource": ""
}
|
q279841
|
TerminalInteractiveShell._replace_rlhist_multiline
|
test
|
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell):
"""Store multiple lines as a single entry in history"""
# do nothing without readline or disabled multiline
if not self.has_readline or not self.multiline_history:
return hlen_before_cell
# windows rl has no remove_history_item
if not hasattr(self.readline, "remove_history_item"):
return hlen_before_cell
# skip empty cells
if not source_raw.rstrip():
return hlen_before_cell
# nothing changed do nothing, e.g. when rl removes consecutive dups
hlen = self.readline.get_current_history_length()
|
python
|
{
"resource": ""
}
|
q279842
|
TerminalInteractiveShell.raw_input
|
test
|
def raw_input(self, prompt=''):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
Optional inputs:
- prompt(''): a string to be printed to prompt the user.
- continue_prompt(False): whether this line is the first one or a
continuation in a sequence of inputs.
"""
# Code run by the user may have modified the readline completer state.
# We must ensure that our completer is back in place.
if self.has_readline:
|
python
|
{
"resource": ""
}
|
q279843
|
TerminalInteractiveShell.edit_syntax_error
|
test
|
def edit_syntax_error(self):
"""The bottom half of the syntax error handler called in the main loop.
Loop until syntax error is fixed or user cancels.
"""
while self.SyntaxTB.last_syntax_error:
# copy and clear last_syntax_error
err = self.SyntaxTB.clear_err_state()
if not self._should_recompile(err):
return
try:
# may set last_syntax_error again if a SyntaxError is raised
self.safe_execfile(err.filename,self.user_ns)
except:
self.showtraceback()
else:
try:
f = open(err.filename)
|
python
|
{
"resource": ""
}
|
q279844
|
TerminalInteractiveShell._should_recompile
|
test
|
def _should_recompile(self,e):
"""Utility routine for edit_syntax_error"""
if e.filename in ('<ipython console>','<input>','<string>',
'<console>','<BackgroundJob compilation>',
None):
return False
try:
if (self.autoedit_syntax and
not self.ask_yes_no('Return to editor to correct syntax error? '
'[Y/n] ','y')):
return False
except EOFError:
return False
def int0(x):
try:
return int(x)
except TypeError:
|
python
|
{
"resource": ""
}
|
q279845
|
TerminalInteractiveShell.exit
|
test
|
def exit(self):
"""Handle interactive exit.
This method calls the ask_exit callback."""
if self.confirm_exit:
if self.ask_yes_no('Do you really want
|
python
|
{
"resource": ""
}
|
q279846
|
VersionControl.get_url_rev
|
test
|
def get_url_rev(self):
"""
Returns the correct repository URL and revision by parsing the given
repository URL
"""
error_message= (
"Sorry, '%s' is a malformed VCS url. "
"Ihe format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp")
assert '+' in self.url, error_message % self.url
|
python
|
{
"resource": ""
}
|
q279847
|
IPythonQtConsoleApp.new_frontend_master
|
test
|
def new_frontend_master(self):
""" Create and return new frontend attached to new kernel, launched on localhost.
"""
ip = self.ip if self.ip in LOCAL_IPS else LOCALHOST
kernel_manager = self.kernel_manager_class(
ip=ip,
connection_file=self._new_connection_file(),
config=self.config,
)
# start the kernel
kwargs = dict()
kwargs['extra_arguments'] = self.kernel_argv
kernel_manager.start_kernel(**kwargs)
|
python
|
{
"resource": ""
}
|
q279848
|
IPythonQtConsoleApp.init_colors
|
test
|
def init_colors(self, widget):
"""Configure the coloring of the widget"""
# Note: This will be dramatically simplified when colors
# are removed from the backend.
# parse the colors arg down to current known labels
try:
colors = self.config.ZMQInteractiveShell.colors
except AttributeError:
colors = None
try:
style = self.config.IPythonWidget.syntax_style
except AttributeError:
style = None
try:
sheet = self.config.IPythonWidget.style_sheet
except AttributeError:
sheet = None
# find the value for colors:
if colors:
colors=colors.lower()
if colors in ('lightbg', 'light'):
colors='lightbg'
|
python
|
{
"resource": ""
}
|
q279849
|
EngineCommunicator.info
|
test
|
def info(self):
"""return the connection info for this object's sockets."""
|
python
|
{
"resource": ""
}
|
q279850
|
Rconverter
|
test
|
def Rconverter(Robj, dataframe=False):
"""
Convert an object in R's namespace to one suitable
for ipython's namespace.
For a data.frame, it tries to return a structured array.
It first checks for colnames, then names.
If all are NULL, it returns np.asarray(Robj), else
it tries to construct a recarray
Parameters
----------
Robj: an R object returned from rpy2
"""
is_data_frame = ro.r('is.data.frame')
colnames = ro.r('colnames')
rownames = ro.r('rownames') # with pandas, these could be used for the index
names = ro.r('names')
if dataframe:
as_data_frame = ro.r('as.data.frame')
cols = colnames(Robj)
_names = names(Robj)
|
python
|
{
"resource": ""
}
|
q279851
|
findsource
|
test
|
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved.
FIXED version with which we monkeypatch the stdlib to work around a bug."""
file = getsourcefile(object) or getfile(object)
# If the object is a frame, then trying to get the globals dict from its
# module won't work. Instead, the frame object itself has the globals
# dictionary.
globals_dict = None
if inspect.isframe(object):
# XXX: can this ever be false?
globals_dict = object.f_globals
else:
module = getmodule(object, file)
if module:
globals_dict = module.__dict__
lines = linecache.getlines(file, globals_dict)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
|
python
|
{
"resource": ""
}
|
q279852
|
TBTools.set_colors
|
test
|
def set_colors(self,*args,**kw):
"""Shorthand access to the color table scheme selector method."""
# Set own color table
self.color_scheme_table.set_active_scheme(*args,**kw)
# for convenience, set Colors to the active scheme
|
python
|
{
"resource": ""
}
|
q279853
|
TBTools.color_toggle
|
test
|
def color_toggle(self):
"""Toggle between the currently active color scheme and NoColor."""
if self.color_scheme_table.active_scheme_name == 'NoColor':
self.color_scheme_table.set_active_scheme(self.old_scheme)
self.Colors = self.color_scheme_table.active_colors
else:
|
python
|
{
"resource": ""
}
|
q279854
|
TBTools.text
|
test
|
def text(self, etype, value, tb, tb_offset=None, context=5):
"""Return formatted traceback.
Subclasses may override this if they add extra arguments.
"""
|
python
|
{
"resource": ""
}
|
q279855
|
ListTB.structured_traceback
|
test
|
def structured_traceback(self, etype, value, elist, tb_offset=None,
context=5):
"""Return a color formatted string with the traceback info.
Parameters
----------
etype : exception type
Type of the exception raised.
value : object
Data stored in the exception
elist : list
List of frames, see class docstring for details.
tb_offset : int, optional
Number of frames in the traceback to skip. If not given, the
instance value is used (set in constructor).
context : int, optional
Number of lines of context information to print.
Returns
-------
String with formatted exception.
"""
tb_offset = self.tb_offset if tb_offset is None else tb_offset
Colors = self.Colors
out_list = []
if elist:
if tb_offset and len(elist) > tb_offset:
elist = elist[tb_offset:]
out_list.append('Traceback %s(most recent call last)%s:' %
|
python
|
{
"resource": ""
}
|
q279856
|
ListTB._format_list
|
test
|
def _format_list(self, extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
Lifted almost verbatim from traceback.py
"""
Colors = self.Colors
list = []
for filename, lineno, name, line in extracted_list[:-1]:
item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \
(Colors.filename, filename, Colors.Normal,
Colors.lineno, lineno, Colors.Normal,
Colors.name, name, Colors.Normal)
if line:
item += ' %s\n' % line.strip()
|
python
|
{
"resource": ""
}
|
q279857
|
ListTB._format_exception_only
|
test
|
def _format_exception_only(self, etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.exc_info()[:2]. The return value is a list of strings, each ending
in a newline. Normally, the list contains a single string; however,
for SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax error
occurred. The message indicating which exception occurred is the
always last string in the list.
Also lifted nearly verbatim from traceback.py
"""
have_filedata = False
Colors = self.Colors
list = []
stype = Colors.excName + etype.__name__ + Colors.Normal
if value is None:
# Not sure if this can still happen in Python 2.6 and above
list.append( str(stype) + '\n')
else:
if etype is SyntaxError:
have_filedata = True
#print 'filename is',filename # dbg
if not value.filename: value.filename = "<string>"
list.append('%s File %s"%s"%s, line %s%d%s\n' % \
(Colors.normalEm,
Colors.filenameEm, value.filename, Colors.normalEm,
Colors.linenoEm, value.lineno, Colors.Normal ))
if value.text is not None:
i = 0
while i < len(value.text) and value.text[i].isspace():
i += 1
list.append('%s %s%s\n' % (Colors.line,
value.text.strip(),
|
python
|
{
"resource": ""
}
|
q279858
|
ListTB.show_exception_only
|
test
|
def show_exception_only(self, etype, evalue):
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
value : exception value
"""
# This method needs to use __call__ from *this* class, not the one from
|
python
|
{
"resource": ""
}
|
q279859
|
VerboseTB.debugger
|
test
|
def debugger(self,force=False):
"""Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler."""
if force or self.call_pdb:
if self.pdb is None:
self.pdb = debugger.Pdb(
self.color_scheme_table.active_scheme_name)
# the system displayhook may have changed, restore the original
# for pdb
display_trap = DisplayTrap(hook=sys.__displayhook__)
|
python
|
{
"resource": ""
}
|
q279860
|
FormattedTB.set_mode
|
test
|
def set_mode(self,mode=None):
"""Switch to the desired mode.
If mode is not specified, cycles through the available modes."""
if not mode:
new_idx = ( self.valid_modes.index(self.mode) + 1 ) % \
len(self.valid_modes)
self.mode = self.valid_modes[new_idx]
elif mode not in self.valid_modes:
raise ValueError, 'Unrecognized mode in FormattedTB: <'+mode+'>\n'\
'Valid modes: '+str(self.valid_modes)
else:
|
python
|
{
"resource": ""
}
|
q279861
|
group_required
|
test
|
def group_required(group,
login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME,
skip_superuser=True):
"""
View decorator for requiring a user group.
"""
def decorator(view_func):
@login_required(redirect_field_name=redirect_field_name,
|
python
|
{
"resource": ""
}
|
q279862
|
ensure_fromlist
|
test
|
def ensure_fromlist(mod, fromlist, buf, recursive):
"""Handle 'from module import a, b, c' imports."""
if not hasattr(mod, '__path__'):
return
for item in fromlist:
if not hasattr(item, 'rindex'):
raise TypeError("Item in ``from list'' not a string")
if item == '*':
if recursive:
continue # avoid endless recursion
try:
|
python
|
{
"resource": ""
}
|
q279863
|
CodeBuilder.add_line
|
test
|
def add_line(self, line):
"""Add a line of source to the code.
Don't include indentations or newlines.
"""
|
python
|
{
"resource": ""
}
|
q279864
|
CodeBuilder.add_section
|
test
|
def add_section(self):
"""Add a section, a sub-CodeBuilder."""
sect
|
python
|
{
"resource": ""
}
|
q279865
|
CodeBuilder.get_function
|
test
|
def get_function(self, fn_name):
"""Compile the code, and return the function `fn_name`."""
|
python
|
{
"resource": ""
}
|
q279866
|
Templite.expr_code
|
test
|
def expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
code = self.expr_code(pipes[0])
for func in pipes[1:]:
self.all_vars.add(func)
code = "c_%s(%s)" % (func, code)
elif "." in expr:
dots = expr.split(".")
code = self.expr_code(dots[0])
|
python
|
{
"resource": ""
}
|
q279867
|
Templite.render
|
test
|
def render(self, context=None):
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
"""
# Make the complete context we'll use.
ctx
|
python
|
{
"resource": ""
}
|
q279868
|
Templite.do_dots
|
test
|
def do_dots(self, value, *dots):
"""Evaluate dotted expressions at runtime."""
for dot in dots:
try:
value = getattr(value, dot)
except AttributeError:
|
python
|
{
"resource": ""
}
|
q279869
|
render_template
|
test
|
def render_template(tpl, context):
'''
A shortcut function to render a partial template with context and return
the output.
'''
templates = [tpl] if type(tpl) != list else tpl
tpl_instance = None
for tpl in templates:
try:
tpl_instance = template.loader.get_template(tpl)
|
python
|
{
"resource": ""
}
|
q279870
|
DisplayFormatter._formatters_default
|
test
|
def _formatters_default(self):
"""Activate the default formatters."""
formatter_classes = [
PlainTextFormatter,
HTMLFormatter,
SVGFormatter,
|
python
|
{
"resource": ""
}
|
q279871
|
BaseFormatter.for_type
|
test
|
def for_type(self, typ, func):
"""Add a format function for a given type.
Parameters
-----------
typ : class
The class of the object that will be formatted using `func`.
func : callable
The callable that will be called to compute the format data. The
call signature of this function is simple, it must take the
object to be formatted and return the raw data for the given
format. Subclasses may use a different call signature for the
`func` argument.
|
python
|
{
"resource": ""
}
|
q279872
|
BaseFormatter.for_type_by_name
|
test
|
def for_type_by_name(self, type_module, type_name, func):
"""Add a format function for a type specified by the full dotted
module and name of the type, rather than the type of the object.
Parameters
----------
type_module : str
The full dotted name of the module the type is defined in, like
``numpy``.
type_name : str
The name of the type (the class name), like ``dtype``
func : callable
The callable that will be called to compute the format data. The
call signature of this function is simple, it must take the
object to
|
python
|
{
"resource": ""
}
|
q279873
|
PlainTextFormatter._float_precision_changed
|
test
|
def _float_precision_changed(self, name, old, new):
"""float_precision changed, set float_format accordingly.
float_precision can be set by int or str.
This will set float_format, after interpreting input.
If numpy has been imported, numpy print precision will also be set.
integer `n` sets format to '%.nf', otherwise, format set directly.
An empty string returns to defaults (repr for float, 8 for numpy).
This parameter can be set via the '%precision' magic.
"""
if '%' in new:
# got explicit format string
fmt = new
try:
fmt%3.14159
except Exception:
raise ValueError("Precision must be int or format string, not %r"%new)
elif new:
# otherwise, should be an int
try:
i = int(new)
assert i >= 0
except ValueError:
|
python
|
{
"resource": ""
}
|
q279874
|
user_config_files
|
test
|
def user_config_files():
"""Return path to any existing user config files
|
python
|
{
"resource": ""
}
|
q279875
|
Config.configure
|
test
|
def configure(self, argv=None, doc=None):
"""Configure the nose running environment. Execute configure before
collecting tests with nose.TestCollector to enable output capture and
other features.
"""
env = self.env
if argv is None:
argv = sys.argv
cfg_files = getattr(self, 'files', [])
options, args = self._parseArgs(argv, cfg_files)
# If -c --config has been specified on command line,
# load those config files and reparse
if getattr(options, 'files', []):
options, args = self._parseArgs(argv, options.files)
self.options = options
if args:
self.testNames = args
if options.testNames is not None:
self.testNames.extend(tolist(options.testNames))
if options.py3where is not None:
if sys.version_info >= (3,):
options.where = options.py3where
# `where` is an append action, so it can't have a default value
# in the parser, or that default will always be in the list
if not options.where:
options.where = env.get('NOSE_WHERE', None)
# include and exclude also
if not options.ignoreFiles:
options.ignoreFiles = env.get('NOSE_IGNORE_FILES', [])
if not options.include:
options.include = env.get('NOSE_INCLUDE', [])
if not options.exclude:
options.exclude = env.get('NOSE_EXCLUDE', [])
self.addPaths = options.addPaths
self.stopOnError = options.stopOnError
self.verbosity = options.verbosity
self.includeExe = options.includeExe
|
python
|
{
"resource": ""
}
|
q279876
|
Config.configureLogging
|
test
|
def configureLogging(self):
"""Configure logging for nose, or optionally other packages. Any logger
name may be set with the debug option, and that logger will be set to
debug level and be assigned the same handler as the nose loggers, unless
it already has a handler.
"""
if self.loggingConfig:
from logging.config import fileConfig
fileConfig(self.loggingConfig)
return
format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
if self.debugLog:
handler = logging.FileHandler(self.debugLog)
else:
handler = logging.StreamHandler(self.logStream)
handler.setFormatter(format)
logger = logging.getLogger('nose')
logger.propagate = 0
# only add our default handler if there isn't already one there
# this avoids annoying duplicate log messages.
if handler not in logger.handlers:
logger.addHandler(handler)
# default level
lvl = logging.WARNING
if self.verbosity >= 5:
lvl = 0
|
python
|
{
"resource": ""
}
|
q279877
|
Config.configureWhere
|
test
|
def configureWhere(self, where):
"""Configure the working directory or directories for the test run.
"""
from nose.importer import add_path
self.workingDir = None
where = tolist(where)
warned = False
for path in where:
if not self.workingDir:
abs_path = absdir(path)
if abs_path is None:
raise ValueError("Working directory %s not found, or "
"not a directory" % path)
log.info("Set working dir to %s", abs_path)
self.workingDir = abs_path
if self.addPaths and \
os.path.exists(os.path.join(abs_path, '__init__.py')):
log.info("Working directory %s is a package; "
"adding to sys.path" % abs_path)
|
python
|
{
"resource": ""
}
|
q279878
|
page_dumb
|
test
|
def page_dumb(strng, start=0, screen_lines=25):
"""Very dumb 'pager' in Python, for when nothing else works.
Only moves forward, same interface as page(), except for pager_cmd and
mode."""
out_ln = strng.splitlines()[start:]
screens = chop(out_ln,screen_lines-1)
if len(screens) == 1:
print >>io.stdout, os.linesep.join(screens[0])
else:
last_escape = ""
for scr in screens[0:-1]:
hunk = os.linesep.join(scr)
print >>io.stdout, last_escape + hunk
|
python
|
{
"resource": ""
}
|
q279879
|
page
|
test
|
def page(strng, start=0, screen_lines=0, pager_cmd=None):
"""Print a string, piping through a pager after a certain length.
The screen_lines parameter specifies the number of *usable* lines of your
terminal screen (total lines minus lines you need to reserve to show other
information).
If you set screen_lines to a number <=0, page() will try to auto-determine
your screen size and will only use up to (screen_size+screen_lines) for
printing, paging after that. That is, if you want auto-detection but need
to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
auto-detection without any lines reserved simply use screen_lines = 0.
If a string won't fit in the allowed lines, it is sent through the
specified pager command. If none given, look for PAGER in the environment,
and ultimately default to less.
If no system pager works, the string is sent through a 'dumb pager'
written in python, very simplistic.
"""
# Some routines may auto-compute start offsets incorrectly and pass a
# negative value. Offset to 0 for robustness.
start = max(0, start)
# first, try the hook
ip = ipapi.get()
if ip:
try:
ip.hooks.show_in_pager(strng)
return
except TryNext:
pass
# Ugly kludge, but calling curses.initscr() flat out crashes in emacs
TERM = os.environ.get('TERM','dumb')
if TERM in ['dumb','emacs'] and os.name != 'nt':
print strng
return
# chop off the topmost part of the string we don't want to see
str_lines = strng.splitlines()[start:]
str_toprint = os.linesep.join(str_lines)
num_newlines = len(str_lines)
len_str = len(str_toprint)
# Dumb heuristics to guesstimate number of on-screen lines the string
# takes. Very basic, but good enough for docstrings in reasonable
# terminals. If someone later feels like refining it, it's not hard.
numlines = max(num_newlines,int(len_str/80)+1)
screen_lines_def = get_terminal_size()[1]
# auto-determine screen size
if screen_lines <= 0:
try:
screen_lines += _detect_screen_size(use_curses, screen_lines_def)
except (TypeError, UnsupportedOperation):
print >>io.stdout, str_toprint
return
#print 'numlines',numlines,'screenlines',screen_lines # dbg
if numlines <= screen_lines :
#print '*** normal print' # dbg
print >>io.stdout, str_toprint
else:
# Try to open pager and default to internal one if that fails.
# All failure modes are tagged as 'retval=1', to match the return
# value of a failed system command. If any intermediate attempt
# sets retval to 1, at the end we resort to our own page_dumb() pager.
|
python
|
{
"resource": ""
}
|
q279880
|
page_file
|
test
|
def page_file(fname, start=0, pager_cmd=None):
"""Page a file, using an optional pager command and starting line.
"""
pager_cmd = get_pager_cmd(pager_cmd)
pager_cmd += ' ' + get_pager_start(pager_cmd,start)
try:
if os.environ['TERM'] in ['emacs','dumb']:
raise EnvironmentError
|
python
|
{
"resource": ""
}
|
q279881
|
get_pager_cmd
|
test
|
def get_pager_cmd(pager_cmd=None):
"""Return a pager command.
Makes some attempts at finding an OS-correct one.
"""
if os.name == 'posix':
default_pager_cmd = 'less -r' # -r for color control sequences
elif os.name in ['nt','dos']:
default_pager_cmd = 'type'
if pager_cmd is
|
python
|
{
"resource": ""
}
|
q279882
|
get_pager_start
|
test
|
def get_pager_start(pager, start):
"""Return the string for paging files with an offset.
This is the '+N' argument which less and more (under Unix) accept.
"""
if pager in ['less','more']:
if start:
|
python
|
{
"resource": ""
}
|
q279883
|
snip_print
|
test
|
def snip_print(str,width = 75,print_full = 0,header = ''):
"""Print a string snipping the midsection to fit in width.
print_full: mode control:
- 0: only snip long strings
- 1: send to page() directly.
- 2: snip long strings and ask for full length viewing with page()
Return 1 if snipping was necessary, 0 otherwise."""
if print_full == 1:
page(header+str)
return 0
print header,
if len(str) < width:
print str
snip = 0
|
python
|
{
"resource": ""
}
|
q279884
|
print_basic_unicode
|
test
|
def print_basic_unicode(o, p, cycle):
"""A function to pretty print sympy Basic objects."""
if cycle:
return p.text('Basic(...)')
|
python
|
{
"resource": ""
}
|
q279885
|
print_png
|
test
|
def print_png(o):
"""
A function to display sympy expression using inline style LaTeX in PNG.
"""
s = latex(o, mode='inline')
# mathtext does not understand certain latex flags, so we try to replace
# them with
|
python
|
{
"resource": ""
}
|
q279886
|
print_display_png
|
test
|
def print_display_png(o):
"""
A function to display sympy expression using display style LaTeX in PNG.
"""
s = latex(o, mode='plain')
s = s.strip('$')
# As matplotlib does not support
|
python
|
{
"resource": ""
}
|
q279887
|
can_print_latex
|
test
|
def can_print_latex(o):
"""
Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of o
can be printed with LaTeX.
"""
import sympy
if isinstance(o, (list, tuple, set, frozenset)):
return all(can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all((isinstance(i, basestring)
|
python
|
{
"resource": ""
}
|
q279888
|
print_latex
|
test
|
def print_latex(o):
"""A function to generate the latex representation of sympy
expressions."""
if can_print_latex(o):
|
python
|
{
"resource": ""
}
|
q279889
|
Plugin.add_options
|
test
|
def add_options(self, parser, env=None):
"""Non-camel-case version of func name for backwards compatibility.
.. warning ::
DEPRECATED: Do not use this method,
use :meth:`options <nose.plugins.base.IPluginInterface.options>`
instead.
"""
# FIXME raise deprecation warning if wasn't called by wrapper
if env is None:
env = os.environ
try:
self.options(parser, env)
|
python
|
{
"resource": ""
}
|
q279890
|
validate_string_list
|
test
|
def validate_string_list(lst):
"""Validate that the input is a list of strings.
Raises ValueError if not."""
if not isinstance(lst, list):
|
python
|
{
"resource": ""
}
|
q279891
|
validate_string_dict
|
test
|
def validate_string_dict(dct):
"""Validate that the input is a dict with string keys and values.
Raises ValueError if not."""
for k,v in dct.iteritems():
|
python
|
{
"resource": ""
}
|
q279892
|
ZMQSocketChannel._run_loop
|
test
|
def _run_loop(self):
"""Run my loop, ignoring EINTR events in the poller"""
while True:
try:
self.ioloop.start()
except ZMQError as e:
if e.errno == errno.EINTR:
continue
else:
|
python
|
{
"resource": ""
}
|
q279893
|
ZMQSocketChannel._handle_recv
|
test
|
def _handle_recv(self, msg):
"""callback for stream.on_recv
unpacks message, and calls handlers with it.
"""
|
python
|
{
"resource": ""
}
|
q279894
|
ShellSocketChannel.execute
|
test
|
def execute(self, code, silent=False,
user_variables=None, user_expressions=None, allow_stdin=None):
"""Execute code in the kernel.
Parameters
----------
code : str
A string of Python code.
silent : bool, optional (default False)
If set, the kernel will execute the code as quietly possible.
user_variables : list, optional
A list of variable names to pull from the user's namespace. They
will come back as a dict with these names as keys and their
:func:`repr` as values.
user_expressions : dict, optional
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
allow_stdin : bool, optional
Flag for
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
Returns
-------
The msg_id of the message sent.
"""
if user_variables is None:
|
python
|
{
"resource": ""
}
|
q279895
|
ShellSocketChannel.complete
|
test
|
def complete(self, text, line, cursor_pos, block=None):
"""Tab complete text in the kernel's namespace.
Parameters
----------
text : str
The text to complete.
line : str
The full line of text that is the surrounding context for the
text to complete.
cursor_pos : int
The position of the cursor in the line where the completion was
requested.
block : str, optional
The full block of code in which the completion is
|
python
|
{
"resource": ""
}
|
q279896
|
ShellSocketChannel.object_info
|
test
|
def object_info(self, oname, detail_level=0):
"""Get metadata information about an object.
Parameters
----------
oname : str
A string specifying the object name.
detail_level : int, optional
The level of detail for the introspection (0-2)
Returns
-------
The msg_id of the message sent.
"""
content =
|
python
|
{
"resource": ""
}
|
q279897
|
ShellSocketChannel.history
|
test
|
def history(self, raw=True, output=False, hist_access_type='range', **kwargs):
"""Get entries from the history list.
Parameters
----------
raw : bool
If True, return the raw input.
output : bool
If True, then return the output as well.
hist_access_type : str
'range' (fill in session, start and stop params), 'tail' (fill in n)
or 'search' (fill in pattern param).
session : int
For a range request, the session from which to get lines. Session
numbers are positive integers; negative ones count back from the
current session.
start : int
The first line number of a history range.
stop : int
The final (excluded) line number of a history range.
n : int
The number of lines of history to get for a tail request.
|
python
|
{
"resource": ""
}
|
q279898
|
ShellSocketChannel.shutdown
|
test
|
def shutdown(self, restart=False):
"""Request an immediate kernel shutdown.
Upon receipt of the (empty) reply, client code can safely assume that
the kernel has shut down and it's safe to forcefully terminate it if
it's still alive.
The kernel will send the reply via a function registered
|
python
|
{
"resource": ""
}
|
q279899
|
SubSocketChannel.flush
|
test
|
def flush(self, timeout=1.0):
"""Immediately processes all pending messages on the SUB channel.
Callers should use this method to ensure that :method:`call_handlers`
has been called for all messages that have been received on the
0MQ SUB socket of this channel.
This method is thread safe.
Parameters
----------
timeout : float, optional
The maximum amount of time to spend flushing, in seconds. The
default is one second.
"""
# We do the IOLoop
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.