_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q279700
|
RichIPythonWidget._copy_image
|
test
|
def _copy_image(self, name):
""" Copies the ImageResource with 'name' to the clipboard.
"""
image = self._get_image(name)
QtGui.QApplication.clipboard().setImage(image)
|
python
|
{
"resource": ""
}
|
q279701
|
RichIPythonWidget._get_image
|
test
|
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image
|
python
|
{
"resource": ""
}
|
q279702
|
RichIPythonWidget._insert_img
|
test
|
def _insert_img(self, cursor, img, fmt):
""" insert a raw image, jpg or png """
try:
image = QtGui.QImage()
image.loadFromData(img, fmt.upper())
except ValueError:
self._insert_plain_text(cursor, 'Received invalid %s data.'%fmt)
else:
format = self._add_image(image)
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
|
python
|
{
"resource": ""
}
|
q279703
|
RichIPythonWidget._insert_svg
|
test
|
def _insert_svg(self, cursor, svg):
""" Insert raw SVG data into the widet.
"""
try:
image = svg_to_image(svg)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid SVG data.')
else:
format = self._add_image(image)
self._name_to_svg_map[format.name()] = svg
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
|
python
|
{
"resource": ""
}
|
q279704
|
RichIPythonWidget._save_image
|
test
|
def _save_image(self, name, format='PNG'):
""" Shows a save dialog for the ImageResource with 'name'.
"""
dialog = QtGui.QFileDialog(self._control, 'Save Image')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.setDefaultSuffix(format.lower())
dialog.setNameFilter('%s file (*.%s)' % (format, format.lower()))
if dialog.exec_():
filename = dialog.selectedFiles()[0]
image = self._get_image(name)
image.save(filename, format)
|
python
|
{
"resource": ""
}
|
q279705
|
ZMQInteractiveShell._exit_now_changed
|
test
|
def _exit_now_changed(self, name, old, new):
"""stop eventloop when exit_now fires"""
if new:
loop = ioloop.IOLoop.instance()
loop.add_timeout(time.time()+0.1, loop.stop)
|
python
|
{
"resource": ""
}
|
q279706
|
ZMQInteractiveShell.init_environment
|
test
|
def init_environment(self):
"""Configure the user's environment.
"""
env = os.environ
# These two ensure 'ls' produces nice coloring on BSD-derived systems
env['TERM'] = 'xterm-color'
env['CLICOLOR'] = '1'
# Since normal pagers don't work at all (over pexpect we don't have
# single-key control of the subprocess), try to disable paging in
# subprocesses as much as possible.
env['PAGER'] = 'cat'
env['GIT_PAGER'] = 'cat'
# And install the payload version of page.
install_payload_page()
|
python
|
{
"resource": ""
}
|
q279707
|
ZMQInteractiveShell.auto_rewrite_input
|
test
|
def auto_rewrite_input(self, cmd):
"""Called to show the auto-rewritten input for autocall and friends.
FIXME: this payload is currently not correctly processed by the
frontend.
"""
new = self.prompt_manager.render('rewrite') + cmd
payload = dict(
source='IPython.zmq.zmqshell.ZMQInteractiveShell.auto_rewrite_input',
transformed_input=new,
)
self.payload_manager.write_payload(payload)
|
python
|
{
"resource": ""
}
|
q279708
|
ZMQInteractiveShell.ask_exit
|
test
|
def ask_exit(self):
"""Engage the exit actions."""
self.exit_now = True
payload = dict(
source='IPython.zmq.zmqshell.ZMQInteractiveShell.ask_exit',
exit=True,
keepkernel=self.keepkernel_on_exit,
)
self.payload_manager.write_payload(payload)
|
python
|
{
"resource": ""
}
|
q279709
|
ZMQInteractiveShell.set_next_input
|
test
|
def set_next_input(self, text):
"""Send the specified text to the frontend to be presented at the next
input cell."""
payload = dict(
source='IPython.zmq.zmqshell.ZMQInteractiveShell.set_next_input',
text=text
)
self.payload_manager.write_payload(payload)
|
python
|
{
"resource": ""
}
|
q279710
|
HandyConfigParser.read
|
test
|
def read(self, filename):
"""Read a filename as UTF-8 configuration data."""
kwargs = {}
if sys.version_info >= (3, 2):
kwargs['encoding'] = "utf-8"
return configparser.RawConfigParser.read(self, filename, **kwargs)
|
python
|
{
"resource": ""
}
|
q279711
|
HandyConfigParser.getlist
|
test
|
def getlist(self, section, option):
"""Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of whitespace.
Returns the list of strings.
"""
value_list = self.get(section, option)
values = []
for value_line in value_list.split('\n'):
for value in value_line.split(','):
value = value.strip()
if value:
values.append(value)
return values
|
python
|
{
"resource": ""
}
|
q279712
|
HandyConfigParser.getlinelist
|
test
|
def getlinelist(self, section, option):
"""Read a list of full-line strings.
The value of `section` and `option` is treated as a newline-separated
list of strings. Each value is stripped of whitespace.
Returns the list of strings.
"""
value_list = self.get(section, option)
return list(filter(None, value_list.split('\n')))
|
python
|
{
"resource": ""
}
|
q279713
|
CoverageConfig.from_environment
|
test
|
def from_environment(self, env_var):
"""Read configuration from the `env_var` environment variable."""
# Timidity: for nose users, read an environment variable. This is a
# cheap hack, since the rest of the command line arguments aren't
# recognized, but it solves some users' problems.
env = os.environ.get(env_var, '')
if env:
self.timid = ('--timid' in env)
|
python
|
{
"resource": ""
}
|
q279714
|
CoverageConfig.from_args
|
test
|
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
for k, v in iitems(kwargs):
if v is not None:
if k in self.MUST_BE_LIST and isinstance(v, string_class):
v = [v]
setattr(self, k, v)
|
python
|
{
"resource": ""
}
|
q279715
|
CoverageConfig.from_file
|
test
|
def from_file(self, filename):
"""Read configuration from a .rc file.
`filename` is a file name to read.
"""
self.attempted_config_files.append(filename)
cp = HandyConfigParser()
files_read = cp.read(filename)
if files_read is not None: # return value changed in 2.4
self.config_files.extend(files_read)
for option_spec in self.CONFIG_FILE_OPTIONS:
self.set_attr_from_config_option(cp, *option_spec)
# [paths] is special
if cp.has_section('paths'):
for option in cp.options('paths'):
self.paths[option] = cp.getlist('paths', option)
|
python
|
{
"resource": ""
}
|
q279716
|
CoverageConfig.set_attr_from_config_option
|
test
|
def set_attr_from_config_option(self, cp, attr, where, type_=''):
"""Set an attribute on self if it exists in the ConfigParser."""
section, option = where.split(":")
if cp.has_option(section, option):
method = getattr(cp, 'get'+type_)
setattr(self, attr, method(section, option))
|
python
|
{
"resource": ""
}
|
q279717
|
expand_user
|
test
|
def expand_user(path):
"""Expand '~'-style usernames in strings.
This is similar to :func:`os.path.expanduser`, but it computes and returns
extra information that will be useful if the input was being used in
computing completions, and you wish to return the completions with the
original '~' instead of its expanded value.
Parameters
----------
path : str
String to be expanded. If no ~ is present, the output is the same as the
input.
Returns
-------
newpath : str
Result of ~ expansion in the input path.
tilde_expand : bool
Whether any expansion was performed or not.
tilde_val : str
The value that ~ was replaced with.
"""
# Default values
tilde_expand = False
tilde_val = ''
newpath = path
if path.startswith('~'):
tilde_expand = True
rest = len(path)-1
newpath = os.path.expanduser(path)
if rest:
tilde_val = newpath[:-rest]
else:
tilde_val = newpath
return newpath, tilde_expand, tilde_val
|
python
|
{
"resource": ""
}
|
q279718
|
CompletionSplitter.delims
|
test
|
def delims(self, delims):
"""Set the delimiters for line splitting."""
expr = '[' + ''.join('\\'+ c for c in delims) + ']'
self._delim_re = re.compile(expr)
self._delims = delims
self._delim_expr = expr
|
python
|
{
"resource": ""
}
|
q279719
|
CompletionSplitter.split_line
|
test
|
def split_line(self, line, cursor_pos=None):
"""Split a line of text with a cursor at the given position.
"""
l = line if cursor_pos is None else line[:cursor_pos]
return self._delim_re.split(l)[-1]
|
python
|
{
"resource": ""
}
|
q279720
|
Completer.global_matches
|
test
|
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace or self.global_namespace that match.
"""
#print 'Completer->global_matches, txt=%r' % text # dbg
matches = []
match_append = matches.append
n = len(text)
for lst in [keyword.kwlist,
__builtin__.__dict__.keys(),
self.namespace.keys(),
self.global_namespace.keys()]:
for word in lst:
if word[:n] == text and word != "__builtins__":
match_append(word)
return matches
|
python
|
{
"resource": ""
}
|
q279721
|
Completer.attr_matches
|
test
|
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace or self.global_namespace, it will be
evaluated and its attributes (as revealed by dir()) are used as
possible completions. (For class instances, class members are are
also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
#io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
# Another option, seems to work great. Catches things like ''.<tab>
m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
if m:
expr, attr = m.group(1, 3)
elif self.greedy:
m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
if not m2:
return []
expr, attr = m2.group(1,2)
else:
return []
try:
obj = eval(expr, self.namespace)
except:
try:
obj = eval(expr, self.global_namespace)
except:
return []
if self.limit_to__all__ and hasattr(obj, '__all__'):
words = get__all__entries(obj)
else:
words = dir2(obj)
try:
words = generics.complete_object(obj, words)
except TryNext:
pass
except Exception:
# Silence errors from completion function
#raise # dbg
pass
# Build match list to return
n = len(attr)
res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
return res
|
python
|
{
"resource": ""
}
|
q279722
|
IPCompleter._greedy_changed
|
test
|
def _greedy_changed(self, name, old, new):
"""update the splitter and readline delims when greedy is changed"""
if new:
self.splitter.delims = GREEDY_DELIMS
else:
self.splitter.delims = DELIMS
if self.readline:
self.readline.set_completer_delims(self.splitter.delims)
|
python
|
{
"resource": ""
}
|
q279723
|
IPCompleter.file_matches
|
test
|
def file_matches(self, text):
"""Match filenames, expanding ~USER type strings.
Most of the seemingly convoluted logic in this completer is an
attempt to handle filenames with spaces in them. And yet it's not
quite perfect, because Python's readline doesn't expose all of the
GNU readline details needed for this to be done correctly.
For a filename with a space in it, the printed completions will be
only the parts after what's already been typed (instead of the
full completions, as is normally done). I don't think with the
current (as of Python 2.3) Python readline it's possible to do
better."""
#io.rprint('Completer->file_matches: <%r>' % text) # dbg
# chars that require escaping with backslash - i.e. chars
# that readline treats incorrectly as delimiters, but we
# don't want to treat as delimiters in filename matching
# when escaped with backslash
if text.startswith('!'):
text = text[1:]
text_prefix = '!'
else:
text_prefix = ''
text_until_cursor = self.text_until_cursor
# track strings with open quotes
open_quotes = has_open_quotes(text_until_cursor)
if '(' in text_until_cursor or '[' in text_until_cursor:
lsplit = text
else:
try:
# arg_split ~ shlex.split, but with unicode bugs fixed by us
lsplit = arg_split(text_until_cursor)[-1]
except ValueError:
# typically an unmatched ", or backslash without escaped char.
if open_quotes:
lsplit = text_until_cursor.split(open_quotes)[-1]
else:
return []
except IndexError:
# tab pressed on empty line
lsplit = ""
if not open_quotes and lsplit != protect_filename(lsplit):
# if protectables are found, do matching on the whole escaped name
has_protectables = True
text0,text = text,lsplit
else:
has_protectables = False
text = os.path.expanduser(text)
if text == "":
return [text_prefix + protect_filename(f) for f in self.glob("*")]
# Compute the matches from the filesystem
m0 = self.clean_glob(text.replace('\\',''))
if has_protectables:
# If we had protectables, we need to revert our changes to the
# beginning of filename so that we don't double-write the part
# of the filename we have so far
len_lsplit = len(lsplit)
matches = [text_prefix + text0 +
protect_filename(f[len_lsplit:]) for f in m0]
else:
if open_quotes:
# if we have a string with an open quote, we don't need to
# protect the names at all (and we _shouldn't_, as it
# would cause bugs when the filesystem call is made).
matches = m0
else:
matches = [text_prefix +
protect_filename(f) for f in m0]
#io.rprint('mm', matches) # dbg
# Mark directories in input list by appending '/' to their names.
matches = [x+'/' if os.path.isdir(x) else x for x in matches]
return matches
|
python
|
{
"resource": ""
}
|
q279724
|
IPCompleter.alias_matches
|
test
|
def alias_matches(self, text):
"""Match internal system aliases"""
#print 'Completer->alias_matches:',text,'lb',self.text_until_cursor # dbg
# if we are not in the first 'item', alias matching
# doesn't make sense - unless we are starting with 'sudo' command.
main_text = self.text_until_cursor.lstrip()
if ' ' in main_text and not main_text.startswith('sudo'):
return []
text = os.path.expanduser(text)
aliases = self.alias_table.keys()
if text == '':
return aliases
else:
return [a for a in aliases if a.startswith(text)]
|
python
|
{
"resource": ""
}
|
q279725
|
IPCompleter.python_matches
|
test
|
def python_matches(self,text):
"""Match attributes or global python names"""
#io.rprint('Completer->python_matches, txt=%r' % text) # dbg
if "." in text:
try:
matches = self.attr_matches(text)
if text.endswith('.') and self.omit__names:
if self.omit__names == 1:
# true if txt is _not_ a __ name, false otherwise:
no__name = (lambda txt:
re.match(r'.*\.__.*?__',txt) is None)
else:
# true if txt is _not_ a _ name, false otherwise:
no__name = (lambda txt:
re.match(r'.*\._.*?',txt) is None)
matches = filter(no__name, matches)
except NameError:
# catches <undefined attributes>.<tab>
matches = []
else:
matches = self.global_matches(text)
return matches
|
python
|
{
"resource": ""
}
|
q279726
|
IPCompleter._default_arguments
|
test
|
def _default_arguments(self, obj):
"""Return the list of default arguments of obj if it is callable,
or empty list otherwise."""
if not (inspect.isfunction(obj) or inspect.ismethod(obj)):
# for classes, check for __init__,__new__
if inspect.isclass(obj):
obj = (getattr(obj,'__init__',None) or
getattr(obj,'__new__',None))
# for all others, check if they are __call__able
elif hasattr(obj, '__call__'):
obj = obj.__call__
# XXX: is there a way to handle the builtins ?
try:
args,_,_1,defaults = inspect.getargspec(obj)
if defaults:
return args[-len(defaults):]
except TypeError: pass
return []
|
python
|
{
"resource": ""
}
|
q279727
|
IPCompleter.complete
|
test
|
def complete(self, text=None, line_buffer=None, cursor_pos=None):
"""Find completions for the given text and line context.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
Note that both the text and the line_buffer are optional, but at least
one of them must be given.
Parameters
----------
text : string, optional
Text to perform the completion on. If not given, the line buffer
is split using the instance's CompletionSplitter object.
line_buffer : string, optional
If not given, the completer attempts to obtain the current line
buffer via readline. This keyword allows clients which are
requesting for text completions in non-readline contexts to inform
the completer of the entire text.
cursor_pos : int, optional
Index of the cursor in the full line buffer. Should be provided by
remote frontends where kernel has no access to frontend state.
Returns
-------
text : str
Text that was actually used in the completion.
matches : list
A list of completion matches.
"""
#io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
# if the cursor position isn't given, the only sane assumption we can
# make is that it's at the end of the line (the common case)
if cursor_pos is None:
cursor_pos = len(line_buffer) if text is None else len(text)
# if text is either None or an empty string, rely on the line buffer
if not text:
text = self.splitter.split_line(line_buffer, cursor_pos)
# If no line buffer is given, assume the input text is all there was
if line_buffer is None:
line_buffer = text
self.line_buffer = line_buffer
self.text_until_cursor = self.line_buffer[:cursor_pos]
#io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
# Start with a clean slate of completions
self.matches[:] = []
custom_res = self.dispatch_custom_completer(text)
if custom_res is not None:
# did custom completers produce something?
self.matches = custom_res
else:
# Extend the list of completions with the results of each
# matcher, so we return results to the user from all
# namespaces.
if self.merge_completions:
self.matches = []
for matcher in self.matchers:
try:
self.matches.extend(matcher(text))
except:
# Show the ugly traceback if the matcher causes an
# exception, but do NOT crash the kernel!
sys.excepthook(*sys.exc_info())
else:
for matcher in self.matchers:
self.matches = matcher(text)
if self.matches:
break
# FIXME: we should extend our api to return a dict with completions for
# different types of objects. The rlcomplete() method could then
# simply collapse the dict into a list for readline, but we'd have
# richer completion semantics in other evironments.
self.matches = sorted(set(self.matches))
#io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
return text, self.matches
|
python
|
{
"resource": ""
}
|
q279728
|
IPCompleter.rlcomplete
|
test
|
def rlcomplete(self, text, state):
"""Return the state-th possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
Parameters
----------
text : string
Text to perform the completion on.
state : int
Counter used by readline.
"""
if state==0:
self.line_buffer = line_buffer = self.readline.get_line_buffer()
cursor_pos = self.readline.get_endidx()
#io.rprint("\nRLCOMPLETE: %r %r %r" %
# (text, line_buffer, cursor_pos) ) # dbg
# if there is only a tab on a line with only whitespace, instead of
# the mostly useless 'do you want to see all million completions'
# message, just do the right thing and give the user his tab!
# Incidentally, this enables pasting of tabbed text from an editor
# (as long as autoindent is off).
# It should be noted that at least pyreadline still shows file
# completions - is there a way around it?
# don't apply this on 'dumb' terminals, such as emacs buffers, so
# we don't interfere with their own tab-completion mechanism.
if not (self.dumb_terminal or line_buffer.strip()):
self.readline.insert_text('\t')
sys.stdout.flush()
return None
# Note: debugging exceptions that may occur in completion is very
# tricky, because readline unconditionally silences them. So if
# during development you suspect a bug in the completion code, turn
# this flag on temporarily by uncommenting the second form (don't
# flip the value in the first line, as the '# dbg' marker can be
# automatically detected and is used elsewhere).
DEBUG = False
#DEBUG = True # dbg
if DEBUG:
try:
self.complete(text, line_buffer, cursor_pos)
except:
import traceback; traceback.print_exc()
else:
# The normal production version is here
# This method computes the self.matches array
self.complete(text, line_buffer, cursor_pos)
try:
return self.matches[state]
except IndexError:
return None
|
python
|
{
"resource": ""
}
|
q279729
|
DictDB._match_one
|
test
|
def _match_one(self, rec, tests):
"""Check if a specific record matches tests."""
for key,test in tests.iteritems():
if not test(rec.get(key, None)):
return False
return True
|
python
|
{
"resource": ""
}
|
q279730
|
DictDB._match
|
test
|
def _match(self, check):
"""Find all the matches for a check dict."""
matches = []
tests = {}
for k,v in check.iteritems():
if isinstance(v, dict):
tests[k] = CompositeFilter(v)
else:
tests[k] = lambda o: o==v
for rec in self._records.itervalues():
if self._match_one(rec, tests):
matches.append(copy(rec))
return matches
|
python
|
{
"resource": ""
}
|
q279731
|
DictDB._extract_subdict
|
test
|
def _extract_subdict(self, rec, keys):
"""extract subdict of keys"""
d = {}
d['msg_id'] = rec['msg_id']
for key in keys:
d[key] = rec[key]
return copy(d)
|
python
|
{
"resource": ""
}
|
q279732
|
DisplayHook.quiet
|
test
|
def quiet(self):
"""Should we silence the display hook because of ';'?"""
# do not print output if input ends in ';'
try:
cell = self.shell.history_manager.input_hist_parsed[self.prompt_count]
if cell.rstrip().endswith(';'):
return True
except IndexError:
# some uses of ipshellembed may fail here
pass
return False
|
python
|
{
"resource": ""
}
|
q279733
|
DisplayHook.write_output_prompt
|
test
|
def write_output_prompt(self):
"""Write the output prompt.
The default implementation simply writes the prompt to
``io.stdout``.
"""
# Use write, not print which adds an extra space.
io.stdout.write(self.shell.separate_out)
outprompt = self.shell.prompt_manager.render('out')
if self.do_full_cache:
io.stdout.write(outprompt)
|
python
|
{
"resource": ""
}
|
q279734
|
DisplayHook.write_format_data
|
test
|
def write_format_data(self, format_dict):
"""Write the format data dict to the frontend.
This default version of this method simply writes the plain text
representation of the object to ``io.stdout``. Subclasses should
override this method to send the entire `format_dict` to the
frontends.
Parameters
----------
format_dict : dict
The format dict for the object passed to `sys.displayhook`.
"""
# We want to print because we want to always make sure we have a
# newline, even if all the prompt separators are ''. This is the
# standard IPython behavior.
result_repr = format_dict['text/plain']
if '\n' in result_repr:
# So that multi-line strings line up with the left column of
# the screen, instead of having the output prompt mess up
# their first line.
# We use the prompt template instead of the expanded prompt
# because the expansion may add ANSI escapes that will interfere
# with our ability to determine whether or not we should add
# a newline.
prompt_template = self.shell.prompt_manager.out_template
if prompt_template and not prompt_template.endswith('\n'):
# But avoid extraneous empty lines.
result_repr = '\n' + result_repr
print >>io.stdout, result_repr
|
python
|
{
"resource": ""
}
|
q279735
|
DisplayHook.log_output
|
test
|
def log_output(self, format_dict):
"""Log the output."""
if self.shell.logger.log_output:
self.shell.logger.log_write(format_dict['text/plain'], 'output')
self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
format_dict['text/plain']
|
python
|
{
"resource": ""
}
|
q279736
|
Freezable.raise_if_freezed
|
test
|
def raise_if_freezed(self):
'''raise `InvalidOperationException` if is freezed.'''
if self.is_freezed:
name = type(self).__name__
raise InvalidOperationException('obj {name} is freezed.'.format(name=name))
|
python
|
{
"resource": ""
}
|
q279737
|
mysql_timestamp_converter
|
test
|
def mysql_timestamp_converter(s):
"""Convert a MySQL TIMESTAMP to a Timestamp object."""
# MySQL>4.1 returns TIMESTAMP in the same format as DATETIME
if s[4] == '-': return DateTime_or_None(s)
s = s + "0"*(14-len(s)) # padding
parts = map(int, filter(None, (s[:4],s[4:6],s[6:8],
s[8:10],s[10:12],s[12:14])))
try:
return Timestamp(*parts)
except (SystemExit, KeyboardInterrupt):
raise
except:
return None
|
python
|
{
"resource": ""
}
|
q279738
|
Kernel._eventloop_changed
|
test
|
def _eventloop_changed(self, name, old, new):
"""schedule call to eventloop from IOLoop"""
loop = ioloop.IOLoop.instance()
loop.add_timeout(time.time()+0.1, self.enter_eventloop)
|
python
|
{
"resource": ""
}
|
q279739
|
Kernel.dispatch_control
|
test
|
def dispatch_control(self, msg):
"""dispatch control requests"""
idents,msg = self.session.feed_identities(msg, copy=False)
try:
msg = self.session.unserialize(msg, content=True, copy=False)
except:
self.log.error("Invalid Control Message", exc_info=True)
return
self.log.debug("Control received: %s", msg)
header = msg['header']
msg_id = header['msg_id']
msg_type = header['msg_type']
handler = self.control_handlers.get(msg_type, None)
if handler is None:
self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type)
else:
try:
handler(self.control_stream, idents, msg)
except Exception:
self.log.error("Exception in control handler:", exc_info=True)
|
python
|
{
"resource": ""
}
|
q279740
|
Kernel.dispatch_shell
|
test
|
def dispatch_shell(self, stream, msg):
"""dispatch shell requests"""
# flush control requests first
if self.control_stream:
self.control_stream.flush()
idents,msg = self.session.feed_identities(msg, copy=False)
try:
msg = self.session.unserialize(msg, content=True, copy=False)
except:
self.log.error("Invalid Message", exc_info=True)
return
header = msg['header']
msg_id = header['msg_id']
msg_type = msg['header']['msg_type']
# Print some info about this message and leave a '--->' marker, so it's
# easier to trace visually the message chain when debugging. Each
# handler prints its message at the end.
self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
self.log.debug(' Content: %s\n --->\n ', msg['content'])
if msg_id in self.aborted:
self.aborted.remove(msg_id)
# is it safe to assume a msg_id will not be resubmitted?
reply_type = msg_type.split('_')[0] + '_reply'
status = {'status' : 'aborted'}
sub = {'engine' : self.ident}
sub.update(status)
reply_msg = self.session.send(stream, reply_type, subheader=sub,
content=status, parent=msg, ident=idents)
return
handler = self.shell_handlers.get(msg_type, None)
if handler is None:
self.log.error("UNKNOWN MESSAGE TYPE: %r", msg_type)
else:
# ensure default_int_handler during handler call
sig = signal(SIGINT, default_int_handler)
try:
handler(stream, idents, msg)
except Exception:
self.log.error("Exception in message handler:", exc_info=True)
finally:
signal(SIGINT, sig)
|
python
|
{
"resource": ""
}
|
q279741
|
Kernel.start
|
test
|
def start(self):
"""register dispatchers for streams"""
self.shell.exit_now = False
if self.control_stream:
self.control_stream.on_recv(self.dispatch_control, copy=False)
def make_dispatcher(stream):
def dispatcher(msg):
return self.dispatch_shell(stream, msg)
return dispatcher
for s in self.shell_streams:
s.on_recv(make_dispatcher(s), copy=False)
|
python
|
{
"resource": ""
}
|
q279742
|
Kernel.do_one_iteration
|
test
|
def do_one_iteration(self):
"""step eventloop just once"""
if self.control_stream:
self.control_stream.flush()
for stream in self.shell_streams:
# handle at most one request per iteration
stream.flush(zmq.POLLIN, 1)
stream.flush(zmq.POLLOUT)
|
python
|
{
"resource": ""
}
|
q279743
|
Kernel._publish_pyin
|
test
|
def _publish_pyin(self, code, parent, execution_count):
"""Publish the code request on the pyin stream."""
self.session.send(self.iopub_socket, u'pyin',
{u'code':code, u'execution_count': execution_count},
parent=parent, ident=self._topic('pyin')
)
|
python
|
{
"resource": ""
}
|
q279744
|
Kernel.abort_request
|
test
|
def abort_request(self, stream, ident, parent):
"""abort a specifig msg by id"""
msg_ids = parent['content'].get('msg_ids', None)
if isinstance(msg_ids, basestring):
msg_ids = [msg_ids]
if not msg_ids:
self.abort_queues()
for mid in msg_ids:
self.aborted.add(str(mid))
content = dict(status='ok')
reply_msg = self.session.send(stream, 'abort_reply', content=content,
parent=parent, ident=ident)
self.log.debug("%s", reply_msg)
|
python
|
{
"resource": ""
}
|
q279745
|
Kernel.clear_request
|
test
|
def clear_request(self, stream, idents, parent):
"""Clear our namespace."""
self.shell.reset(False)
msg = self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
content = dict(status='ok'))
|
python
|
{
"resource": ""
}
|
q279746
|
Kernel._topic
|
test
|
def _topic(self, topic):
"""prefixed topic for IOPub messages"""
if self.int_id >= 0:
base = "engine.%i" % self.int_id
else:
base = "kernel.%s" % self.ident
return py3compat.cast_bytes("%s.%s" % (base, topic))
|
python
|
{
"resource": ""
}
|
q279747
|
Kernel._at_shutdown
|
test
|
def _at_shutdown(self):
"""Actions taken at shutdown by the kernel, called by python's atexit.
"""
# io.rprint("Kernel at_shutdown") # dbg
if self._shutdown_message is not None:
self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown'))
self.log.debug("%s", self._shutdown_message)
[ s.flush(zmq.POLLOUT) for s in self.shell_streams ]
|
python
|
{
"resource": ""
}
|
q279748
|
IsolationPlugin.beforeContext
|
test
|
def beforeContext(self):
"""Copy sys.modules onto my mod stack
"""
mods = sys.modules.copy()
self._mod_stack.append(mods)
|
python
|
{
"resource": ""
}
|
q279749
|
IsolationPlugin.afterContext
|
test
|
def afterContext(self):
"""Pop my mod stack and restore sys.modules to the state
it was in when mod stack was pushed.
"""
mods = self._mod_stack.pop()
to_del = [ m for m in sys.modules.keys() if m not in mods ]
if to_del:
log.debug('removing sys modules entries: %s', to_del)
for mod in to_del:
del sys.modules[mod]
sys.modules.update(mods)
|
python
|
{
"resource": ""
}
|
q279750
|
absdir
|
test
|
def absdir(path):
"""Return absolute, normalized path to directory, if it exists; None
otherwise.
"""
if not os.path.isabs(path):
path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
path)))
if path is None or not os.path.isdir(path):
return None
return path
|
python
|
{
"resource": ""
}
|
q279751
|
file_like
|
test
|
def file_like(name):
"""A name is file-like if it is a path that exists, or it has a
directory part, or it ends in .py, or it isn't a legal python
identifier.
"""
return (os.path.exists(name)
or os.path.dirname(name)
or name.endswith('.py')
or not ident_re.match(os.path.splitext(name)[0]))
|
python
|
{
"resource": ""
}
|
q279752
|
isclass
|
test
|
def isclass(obj):
"""Is obj a class? Inspect's isclass is too liberal and returns True
for objects that can't be subclasses of anything.
"""
obj_type = type(obj)
return obj_type in class_types or issubclass(obj_type, type)
|
python
|
{
"resource": ""
}
|
q279753
|
ispackage
|
test
|
def ispackage(path):
"""
Is this path a package directory?
>>> ispackage('nose')
True
>>> ispackage('unit_tests')
False
>>> ispackage('nose/plugins')
True
>>> ispackage('nose/loader.py')
False
"""
if os.path.isdir(path):
# at least the end of the path must be a legal python identifier
# and __init__.py[co] must exist
end = os.path.basename(path)
if ident_re.match(end):
for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
if os.path.isfile(os.path.join(path, init)):
return True
if sys.platform.startswith('java') and \
os.path.isfile(os.path.join(path, '__init__$py.class')):
return True
return False
|
python
|
{
"resource": ""
}
|
q279754
|
getpackage
|
test
|
def getpackage(filename):
"""
Find the full dotted package name for a given python source file
name. Returns None if the file is not a python source file.
>>> getpackage('foo.py')
'foo'
>>> getpackage('biff/baf.py')
'baf'
>>> getpackage('nose/util.py')
'nose.util'
Works for directories too.
>>> getpackage('nose')
'nose'
>>> getpackage('nose/plugins')
'nose.plugins'
And __init__ files stuck onto directories
>>> getpackage('nose/plugins/__init__.py')
'nose.plugins'
Absolute paths also work.
>>> path = os.path.abspath(os.path.join('nose', 'plugins'))
>>> getpackage(path)
'nose.plugins'
"""
src_file = src(filename)
if not src_file.endswith('.py') and not ispackage(src_file):
return None
base, ext = os.path.splitext(os.path.basename(src_file))
if base == '__init__':
mod_parts = []
else:
mod_parts = [base]
path, part = os.path.split(os.path.split(src_file)[0])
while part:
if ispackage(os.path.join(path, part)):
mod_parts.append(part)
else:
break
path, part = os.path.split(path)
mod_parts.reverse()
return '.'.join(mod_parts)
|
python
|
{
"resource": ""
}
|
q279755
|
ln
|
test
|
def ln(label):
"""Draw a 70-char-wide divider, with label in the middle.
>>> ln('hello there')
'---------------------------- hello there -----------------------------'
"""
label_len = len(label) + 2
chunk = (70 - label_len) // 2
out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
pad = 70 - len(out)
if pad > 0:
out = out + ('-' * pad)
return out
|
python
|
{
"resource": ""
}
|
q279756
|
regex_last_key
|
test
|
def regex_last_key(regex):
"""Sort key function factory that puts items that match a
regular expression last.
>>> from nose.config import Config
>>> from nose.pyversion import sort_list
>>> c = Config()
>>> regex = c.testMatch
>>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
>>> sort_list(entries, regex_last_key(regex))
>>> entries
['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
"""
def k(obj):
if regex.search(obj):
return (1, obj)
return (0, obj)
return k
|
python
|
{
"resource": ""
}
|
q279757
|
transplant_func
|
test
|
def transplant_func(func, module):
"""
Make a function imported from module A appear as if it is located
in module B.
>>> from pprint import pprint
>>> pprint.__module__
'pprint'
>>> pp = transplant_func(pprint, __name__)
>>> pp.__module__
'nose.util'
The original function is not modified.
>>> pprint.__module__
'pprint'
Calling the transplanted function calls the original.
>>> pp([1, 2])
[1, 2]
>>> pprint([1,2])
[1, 2]
"""
from nose.tools import make_decorator
def newfunc(*arg, **kw):
return func(*arg, **kw)
newfunc = make_decorator(func)(newfunc)
newfunc.__module__ = module
return newfunc
|
python
|
{
"resource": ""
}
|
q279758
|
transplant_class
|
test
|
def transplant_class(cls, module):
"""
Make a class appear to reside in `module`, rather than the module in which
it is actually defined.
>>> from nose.failure import Failure
>>> Failure.__module__
'nose.failure'
>>> Nf = transplant_class(Failure, __name__)
>>> Nf.__module__
'nose.util'
>>> Nf.__name__
'Failure'
"""
class C(cls):
pass
C.__module__ = module
C.__name__ = cls.__name__
return C
|
python
|
{
"resource": ""
}
|
q279759
|
get_system_cpu_times
|
test
|
def get_system_cpu_times():
"""Return system CPU times as a namedtuple."""
user, nice, system, idle = _psutil_osx.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle)
|
python
|
{
"resource": ""
}
|
q279760
|
Process.get_process_cmdline
|
test
|
def get_process_cmdline(self):
"""Return process cmdline as a list of arguments."""
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._process_name)
return _psutil_osx.get_process_cmdline(self.pid)
|
python
|
{
"resource": ""
}
|
q279761
|
Process.get_open_files
|
test
|
def get_open_files(self):
"""Return files opened by process."""
if self.pid == 0:
return []
files = []
rawlist = _psutil_osx.get_process_open_files(self.pid)
for path, fd in rawlist:
if isfile_strict(path):
ntuple = nt_openfile(path, fd)
files.append(ntuple)
return files
|
python
|
{
"resource": ""
}
|
q279762
|
Process.get_connections
|
test
|
def get_connections(self, kind='inet'):
"""Return etwork connections opened by a process as a list of
namedtuples.
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
ret = _psutil_osx.get_process_connections(self.pid, families, types)
return [nt_connection(*conn) for conn in ret]
|
python
|
{
"resource": ""
}
|
q279763
|
user_has_group
|
test
|
def user_has_group(user, group, superuser_skip=True):
"""
Check if a user is in a certaing group.
By default, the check is skipped for superusers.
"""
if user.is_superuser and superuser_skip:
return True
return user.groups.filter(name=group).exists()
|
python
|
{
"resource": ""
}
|
q279764
|
resolve_class
|
test
|
def resolve_class(class_path):
"""
Load a class by a fully qualified class_path,
eg. myapp.models.ModelName
"""
modulepath, classname = class_path.rsplit('.', 1)
module = __import__(modulepath, fromlist=[classname])
return getattr(module, classname)
|
python
|
{
"resource": ""
}
|
q279765
|
usage_percent
|
test
|
def usage_percent(used, total, _round=None):
"""Calculate percentage usage of 'used' against 'total'."""
try:
ret = (used / total) * 100
except ZeroDivisionError:
ret = 0
if _round is not None:
return round(ret, _round)
else:
return ret
|
python
|
{
"resource": ""
}
|
q279766
|
memoize
|
test
|
def memoize(f):
"""A simple memoize decorator for functions."""
cache= {}
def memf(*x):
if x not in cache:
cache[x] = f(*x)
return cache[x]
return memf
|
python
|
{
"resource": ""
}
|
q279767
|
deprecated
|
test
|
def deprecated(replacement=None):
"""A decorator which can be used to mark functions as deprecated."""
def outer(fun):
msg = "psutil.%s is deprecated" % fun.__name__
if replacement is not None:
msg += "; use %s instead" % replacement
if fun.__doc__ is None:
fun.__doc__ = msg
@wraps(fun)
def inner(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return fun(*args, **kwargs)
return inner
return outer
|
python
|
{
"resource": ""
}
|
q279768
|
Communicator._login
|
test
|
def _login(self):
"""
Login into Google Docs with user authentication info.
"""
try:
self.gd_client = gdata.docs.client.DocsClient()
self.gd_client.ClientLogin(self.email, self.password, self.source)
except RequestError as e:
raise PODocsError(e)
|
python
|
{
"resource": ""
}
|
q279769
|
Communicator._get_gdocs_key
|
test
|
def _get_gdocs_key(self):
"""
Parse GDocs key from Spreadsheet url.
"""
try:
args = urlparse.parse_qs(urlparse.urlparse(self.url).query)
self.key = args['key'][0]
except KeyError as e:
raise PODocsError(e)
|
python
|
{
"resource": ""
}
|
q279770
|
Communicator._ensure_temp_path_exists
|
test
|
def _ensure_temp_path_exists(self):
"""
Make sure temp directory exists and create one if it does not.
"""
try:
if not os.path.exists(self.temp_path):
os.mkdir(self.temp_path)
except OSError as e:
raise PODocsError(e)
|
python
|
{
"resource": ""
}
|
q279771
|
Communicator._clear_temp
|
test
|
def _clear_temp(self):
"""
Clear temp directory from created csv and ods files during
communicator operations.
"""
temp_files = [LOCAL_ODS, GDOCS_TRANS_CSV, GDOCS_META_CSV,
LOCAL_TRANS_CSV, LOCAL_META_CSV]
for temp_file in temp_files:
file_path = os.path.join(self.temp_path, temp_file)
if os.path.exists(file_path):
os.remove(file_path)
|
python
|
{
"resource": ""
}
|
q279772
|
Communicator._upload_file_to_gdoc
|
test
|
def _upload_file_to_gdoc(
self, file_path,
content_type='application/x-vnd.oasis.opendocument.spreadsheet'):
"""
Uploads file to GDocs spreadsheet.
Content type can be provided as argument, default is ods.
"""
try:
entry = self.gd_client.GetResourceById(self.key)
media = gdata.data.MediaSource(
file_path=file_path, content_type=content_type)
self.gd_client.UpdateResource(
entry, media=media, update_metadata=True)
except (RequestError, IOError) as e:
raise PODocsError(e)
|
python
|
{
"resource": ""
}
|
q279773
|
Communicator.synchronize
|
test
|
def synchronize(self):
"""
Synchronize local po files with translations on GDocs Spreadsheet.
Downloads two csv files, merges them and converts into po files
structure. If new msgids appeared in po files, this method creates
new ods with appended content and sends it to GDocs.
"""
gdocs_trans_csv = os.path.join(self.temp_path, GDOCS_TRANS_CSV)
gdocs_meta_csv = os.path.join(self.temp_path, GDOCS_META_CSV)
local_trans_csv = os.path.join(self.temp_path, LOCAL_TRANS_CSV)
local_meta_csv = os.path.join(self.temp_path, LOCAL_META_CSV)
try:
entry = self._download_csv_from_gdocs(gdocs_trans_csv,
gdocs_meta_csv)
except PODocsError as e:
if 'Sheet 1 not found' in str(e) \
or 'Conversion failed unexpectedly' in str(e):
self.upload()
else:
raise PODocsError(e)
else:
self._merge_local_and_gdoc(entry, local_trans_csv, local_meta_csv,
gdocs_trans_csv, gdocs_meta_csv)
try:
csv_to_po(local_trans_csv, local_meta_csv,
self.locale_root, self.po_files_path, self.header)
except IOError as e:
raise PODocsError(e)
self._clear_temp()
|
python
|
{
"resource": ""
}
|
q279774
|
Communicator.download
|
test
|
def download(self):
"""
Download csv files from GDocs and convert them into po files structure.
"""
trans_csv_path = os.path.realpath(
os.path.join(self.temp_path, GDOCS_TRANS_CSV))
meta_csv_path = os.path.realpath(
os.path.join(self.temp_path, GDOCS_META_CSV))
self._download_csv_from_gdocs(trans_csv_path, meta_csv_path)
try:
csv_to_po(trans_csv_path, meta_csv_path,
self.locale_root, self.po_files_path, header=self.header)
except IOError as e:
raise PODocsError(e)
self._clear_temp()
|
python
|
{
"resource": ""
}
|
q279775
|
Communicator.upload
|
test
|
def upload(self):
"""
Upload all po files to GDocs ignoring conflicts.
This method looks for all msgids in po_files and sends them
as ods to GDocs Spreadsheet.
"""
local_ods_path = os.path.join(self.temp_path, LOCAL_ODS)
try:
po_to_ods(self.languages, self.locale_root,
self.po_files_path, local_ods_path)
except (IOError, OSError) as e:
raise PODocsError(e)
self._upload_file_to_gdoc(local_ods_path)
self._clear_temp()
|
python
|
{
"resource": ""
}
|
q279776
|
Communicator.clear
|
test
|
def clear(self):
"""
Clear GDoc Spreadsheet by sending empty csv file.
"""
empty_file_path = os.path.join(self.temp_path, 'empty.csv')
try:
empty_file = open(empty_file_path, 'w')
empty_file.write(',')
empty_file.close()
except IOError as e:
raise PODocsError(e)
self._upload_file_to_gdoc(empty_file_path, content_type='text/csv')
os.remove(empty_file_path)
|
python
|
{
"resource": ""
}
|
q279777
|
InternalIPKernel.new_qt_console
|
test
|
def new_qt_console(self, evt=None):
"""start a new qtconsole connected to our kernel"""
return connect_qtconsole(self.ipkernel.connection_file, profile=self.ipkernel.profile)
|
python
|
{
"resource": ""
}
|
q279778
|
check_url_accessibility
|
test
|
def check_url_accessibility(url, timeout=10):
'''
Check whether the URL accessible and returns HTTP 200 OK or not
if not raises ValidationError
'''
if(url=='localhost'):
url = 'http://127.0.0.1'
try:
req = urllib2.urlopen(url, timeout=timeout)
if (req.getcode()==200):
return True
except Exception:
pass
fail("URL '%s' is not accessible from this machine" % url)
|
python
|
{
"resource": ""
}
|
q279779
|
url_has_contents
|
test
|
def url_has_contents(url, contents, case_sensitive=False, timeout=10):
'''
Check whether the HTML page contains the content or not and return boolean
'''
try:
req = urllib2.urlopen(url, timeout=timeout)
except Exception, _:
False
else:
rep = req.read()
if (not case_sensitive and rep.lower().find(contents.lower()) >= 0) or (case_sensitive and rep.find(contents) >= 0):
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q279780
|
get_response_code
|
test
|
def get_response_code(url, timeout=10):
'''
Visit the URL and return the HTTP response code in 'int'
'''
try:
req = urllib2.urlopen(url, timeout=timeout)
except HTTPError, e:
return e.getcode()
except Exception, _:
fail("Couldn't reach the URL '%s'" % url)
else:
return req.getcode()
|
python
|
{
"resource": ""
}
|
q279781
|
compare_content_type
|
test
|
def compare_content_type(url, content_type):
'''
Compare the content type header of url param with content_type param and returns boolean
@param url -> string e.g. http://127.0.0.1/index
@param content_type -> string e.g. text/html
'''
try:
response = urllib2.urlopen(url)
except:
return False
return response.headers.type == content_type
|
python
|
{
"resource": ""
}
|
q279782
|
compare_response_code
|
test
|
def compare_response_code(url, code):
'''
Compare the response code of url param with code param and returns boolean
@param url -> string e.g. http://127.0.0.1/index
@param content_type -> int e.g. 404, 500, 400 ..etc
'''
try:
response = urllib2.urlopen(url)
except HTTPError as e:
return e.code == code
except:
return False
return response.code == code
|
python
|
{
"resource": ""
}
|
q279783
|
DisplayPublisher._validate_data
|
test
|
def _validate_data(self, source, data, metadata=None):
"""Validate the display data.
Parameters
----------
source : str
The fully dotted name of the callable that created the data, like
:func:`foo.bar.my_formatter`.
data : dict
The formata data dictionary.
metadata : dict
Any metadata for the data.
"""
if not isinstance(source, basestring):
raise TypeError('source must be a str, got: %r' % source)
if not isinstance(data, dict):
raise TypeError('data must be a dict, got: %r' % data)
if metadata is not None:
if not isinstance(metadata, dict):
raise TypeError('metadata must be a dict, got: %r' % data)
|
python
|
{
"resource": ""
}
|
q279784
|
DisplayPublisher.clear_output
|
test
|
def clear_output(self, stdout=True, stderr=True, other=True):
"""Clear the output of the cell receiving output."""
if stdout:
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
if stderr:
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
|
python
|
{
"resource": ""
}
|
q279785
|
find_cmd
|
test
|
def find_cmd(cmd):
"""Find absolute path to executable cmd in a cross platform manner.
This function tries to determine the full path to a command line program
using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
time it will use the version that is first on the users `PATH`. If
cmd is `python` return `sys.executable`.
Warning, don't use this to find IPython command line programs as there
is a risk you will find the wrong one. Instead find those using the
following code and looking for the application itself::
from IPython.utils.path import get_ipython_module_path
from IPython.utils.process import pycmd2argv
argv = pycmd2argv(get_ipython_module_path('IPython.frontend.terminal.ipapp'))
Parameters
----------
cmd : str
The command line program to look for.
"""
if cmd == 'python':
return os.path.abspath(sys.executable)
try:
path = _find_cmd(cmd).rstrip()
except OSError:
raise FindCmdError('command could not be found: %s' % cmd)
# which returns empty if not found
if path == '':
raise FindCmdError('command could not be found: %s' % cmd)
return os.path.abspath(path)
|
python
|
{
"resource": ""
}
|
q279786
|
code_unit_factory
|
test
|
def code_unit_factory(morfs, file_locator):
"""Construct a list of CodeUnits from polymorphic inputs.
`morfs` is a module or a filename, or a list of same.
`file_locator` is a FileLocator that can help resolve filenames.
Returns a list of CodeUnit objects.
"""
# Be sure we have a list.
if not isinstance(morfs, (list, tuple)):
morfs = [morfs]
# On Windows, the shell doesn't expand wildcards. Do it here.
globbed = []
for morf in morfs:
if isinstance(morf, string_class) and ('?' in morf or '*' in morf):
globbed.extend(glob.glob(morf))
else:
globbed.append(morf)
morfs = globbed
code_units = [CodeUnit(morf, file_locator) for morf in morfs]
return code_units
|
python
|
{
"resource": ""
}
|
q279787
|
CodeUnit.flat_rootname
|
test
|
def flat_rootname(self):
"""A base for a flat filename to correspond to this code unit.
Useful for writing files about the code where you want all the files in
the same directory, but need to differentiate same-named files from
different directories.
For example, the file a/b/c.py might return 'a_b_c'
"""
if self.modname:
return self.modname.replace('.', '_')
else:
root = os.path.splitdrive(self.name)[1]
return root.replace('\\', '_').replace('/', '_').replace('.', '_')
|
python
|
{
"resource": ""
}
|
q279788
|
CodeUnit.source_file
|
test
|
def source_file(self):
"""Return an open file for reading the source of the code unit."""
if os.path.exists(self.filename):
# A regular text file: open it.
return open_source(self.filename)
# Maybe it's in a zip file?
source = self.file_locator.get_zip_data(self.filename)
if source is not None:
return StringIO(source)
# Couldn't find source.
raise CoverageException(
"No source for code '%s'." % self.filename
)
|
python
|
{
"resource": ""
}
|
q279789
|
CodeUnit.should_be_python
|
test
|
def should_be_python(self):
"""Does it seem like this file should contain Python?
This is used to decide if a file reported as part of the exection of
a program was really likely to have contained Python in the first
place.
"""
# Get the file extension.
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
if ext.startswith('.py'):
return True
# A file with no extension should be Python.
if not ext:
return True
# Everything else is probably not Python.
return False
|
python
|
{
"resource": ""
}
|
q279790
|
_total_seconds
|
test
|
def _total_seconds(td):
"""timedelta.total_seconds was added in 2.7"""
try:
# Python >= 2.7
return td.total_seconds()
except AttributeError:
# Python 2.6
return 1e-6 * (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6)
|
python
|
{
"resource": ""
}
|
q279791
|
AsyncResult.get
|
test
|
def get(self, timeout=-1):
"""Return the result when it arrives.
If `timeout` is not ``None`` and the result does not arrive within
`timeout` seconds then ``TimeoutError`` is raised. If the
remote call raised an exception then that exception will be reraised
by get() inside a `RemoteError`.
"""
if not self.ready():
self.wait(timeout)
if self._ready:
if self._success:
return self._result
else:
raise self._exception
else:
raise error.TimeoutError("Result not ready.")
|
python
|
{
"resource": ""
}
|
q279792
|
AsyncResult.wait
|
test
|
def wait(self, timeout=-1):
"""Wait until the result is available or until `timeout` seconds pass.
This method always returns None.
"""
if self._ready:
return
self._ready = self._client.wait(self.msg_ids, timeout)
if self._ready:
try:
results = map(self._client.results.get, self.msg_ids)
self._result = results
if self._single_result:
r = results[0]
if isinstance(r, Exception):
raise r
else:
results = error.collect_exceptions(results, self._fname)
self._result = self._reconstruct_result(results)
except Exception, e:
self._exception = e
self._success = False
else:
self._success = True
finally:
self._metadata = map(self._client.metadata.get, self.msg_ids)
self._wait_for_outputs(10)
|
python
|
{
"resource": ""
}
|
q279793
|
AsyncResult.get_dict
|
test
|
def get_dict(self, timeout=-1):
"""Get the results as a dict, keyed by engine_id.
timeout behavior is described in `get()`.
"""
results = self.get(timeout)
engine_ids = [ md['engine_id'] for md in self._metadata ]
bycount = sorted(engine_ids, key=lambda k: engine_ids.count(k))
maxcount = bycount.count(bycount[-1])
if maxcount > 1:
raise ValueError("Cannot build dict, %i jobs ran on engine #%i"%(
maxcount, bycount[-1]))
return dict(zip(engine_ids,results))
|
python
|
{
"resource": ""
}
|
q279794
|
AsyncResult.abort
|
test
|
def abort(self):
"""abort my tasks."""
assert not self.ready(), "Can't abort, I am already done!"
return self._client.abort(self.msg_ids, targets=self._targets, block=True)
|
python
|
{
"resource": ""
}
|
q279795
|
AsyncResult.elapsed
|
test
|
def elapsed(self):
"""elapsed time since initial submission"""
if self.ready():
return self.wall_time
now = submitted = datetime.now()
for msg_id in self.msg_ids:
if msg_id in self._client.metadata:
stamp = self._client.metadata[msg_id]['submitted']
if stamp and stamp < submitted:
submitted = stamp
return _total_seconds(now-submitted)
|
python
|
{
"resource": ""
}
|
q279796
|
AsyncResult.wait_interactive
|
test
|
def wait_interactive(self, interval=1., timeout=None):
"""interactive wait, printing progress at regular intervals"""
N = len(self)
tic = time.time()
while not self.ready() and (timeout is None or time.time() - tic <= timeout):
self.wait(interval)
clear_output()
print("%4i/%i tasks finished after %4i s" % (self.progress, N, self.elapsed), end="")
sys.stdout.flush()
print()
print("done")
|
python
|
{
"resource": ""
}
|
q279797
|
AsyncResult._republish_displaypub
|
test
|
def _republish_displaypub(self, content, eid):
"""republish individual displaypub content dicts"""
try:
ip = get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
md = content['metadata'] or {}
md['engine'] = eid
ip.display_pub.publish(content['source'], content['data'], md)
|
python
|
{
"resource": ""
}
|
q279798
|
AsyncResult._wait_for_outputs
|
test
|
def _wait_for_outputs(self, timeout=-1):
"""wait for the 'status=idle' message that indicates we have all outputs
"""
if not self._success:
# don't wait on errors
return
tic = time.time()
while not all(md['outputs_ready'] for md in self._metadata):
time.sleep(0.01)
self._client._flush_iopub(self._client._iopub_socket)
if timeout >= 0 and time.time() > tic + timeout:
break
|
python
|
{
"resource": ""
}
|
q279799
|
AsyncHubResult.wait
|
test
|
def wait(self, timeout=-1):
"""wait for result to complete."""
start = time.time()
if self._ready:
return
local_ids = filter(lambda msg_id: msg_id in self._client.outstanding, self.msg_ids)
local_ready = self._client.wait(local_ids, timeout)
if local_ready:
remote_ids = filter(lambda msg_id: msg_id not in self._client.results, self.msg_ids)
if not remote_ids:
self._ready = True
else:
rdict = self._client.result_status(remote_ids, status_only=False)
pending = rdict['pending']
while pending and (timeout < 0 or time.time() < start+timeout):
rdict = self._client.result_status(remote_ids, status_only=False)
pending = rdict['pending']
if pending:
time.sleep(0.1)
if not pending:
self._ready = True
if self._ready:
try:
results = map(self._client.results.get, self.msg_ids)
self._result = results
if self._single_result:
r = results[0]
if isinstance(r, Exception):
raise r
else:
results = error.collect_exceptions(results, self._fname)
self._result = self._reconstruct_result(results)
except Exception, e:
self._exception = e
self._success = False
else:
self._success = True
finally:
self._metadata = map(self._client.metadata.get, self.msg_ids)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.