_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q280000
|
interpret_distro_name
|
test
|
def interpret_distro_name(location, basename, metadata,
py_version=None, precedence=SOURCE_DIST, platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version:
for i,p in enumerate(parts[2:]):
if len(p)==5 and p.startswith('py2.'):
return # It's a bdist_dumb, not an sdist -- bail out
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
|
python
|
{
"resource": ""
}
|
q280001
|
open_with_auth
|
test
|
def open_with_auth(url):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse.urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = urllib2.splituser(netloc)
else:
auth = None
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlparse.urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = urllib2.urlopen(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse.urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlparse.urlunparse((s2,netloc,path2,param2,query2,frag2))
return fp
|
python
|
{
"resource": ""
}
|
q280002
|
PackageIndex.fetch_distribution
|
test
|
def fetch_distribution(self,
requirement, tmpdir, force_scan=False, source=False, develop_ok=False,
local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
self.info("Best match: %s", dist)
return dist.clone(
location=self.download(dist.location, tmpdir)
)
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None and self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
return dist
|
python
|
{
"resource": ""
}
|
q280003
|
get_parent
|
test
|
def get_parent(obj):
'''
get parent from obj.
'''
names = obj.__qualname__.split('.')[:-1]
if '<locals>' in names: # locals function
raise ValueError('cannot get parent from locals object.')
module = sys.modules[obj.__module__]
parent = module
while names:
parent = getattr(parent, names.pop(0))
return parent
|
python
|
{
"resource": ""
}
|
q280004
|
EnginePUBHandler.root_topic
|
test
|
def root_topic(self):
"""this is a property, in case the handler is created
before the engine gets registered with an id"""
if isinstance(getattr(self.engine, 'id', None), int):
return "engine.%i"%self.engine.id
else:
return "engine"
|
python
|
{
"resource": ""
}
|
q280005
|
render_template
|
test
|
def render_template(content, context):
""" renders context aware template """
rendered = Template(content).render(Context(context))
return rendered
|
python
|
{
"resource": ""
}
|
q280006
|
Capture.configure
|
test
|
def configure(self, options, conf):
"""Configure plugin. Plugin is enabled by default.
"""
self.conf = conf
if not options.capture:
self.enabled = False
|
python
|
{
"resource": ""
}
|
q280007
|
Capture.formatError
|
test
|
def formatError(self, test, err):
"""Add captured output to error report.
"""
test.capturedOutput = output = self.buffer
self._buf = None
if not output:
# Don't return None as that will prevent other
# formatters from formatting and remove earlier formatters
# formats, instead return the err we got
return err
ec, ev, tb = err
return (ec, self.addCaptureToErr(ev, output), tb)
|
python
|
{
"resource": ""
}
|
q280008
|
splitBy
|
test
|
def splitBy(data, num):
""" Turn a list to list of list """
return [data[i:i + num] for i in range(0, len(data), num)]
|
python
|
{
"resource": ""
}
|
q280009
|
convert_to_this_nbformat
|
test
|
def convert_to_this_nbformat(nb, orig_version=2, orig_minor=0):
"""Convert a notebook to the v3 format.
Parameters
----------
nb : NotebookNode
The Python representation of the notebook to convert.
orig_version : int
The original version of the notebook to convert.
orig_minor : int
The original minor version of the notebook to convert (only relevant for v >= 3).
"""
if orig_version == 1:
nb = v2.convert_to_this_nbformat(nb)
orig_version = 2
if orig_version == 2:
# Mark the original nbformat so consumers know it has been converted.
nb.nbformat = nbformat
nb.nbformat_minor = nbformat_minor
nb.orig_nbformat = 2
return nb
elif orig_version == 3:
if orig_minor != nbformat_minor:
nb.orig_nbformat_minor = orig_minor
nb.nbformat_minor = nbformat_minor
return nb
else:
raise ValueError('Cannot convert a notebook from v%s to v3' % orig_version)
|
python
|
{
"resource": ""
}
|
q280010
|
hex_to_rgb
|
test
|
def hex_to_rgb(color):
"""Convert a hex color to rgb integer tuple."""
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
color = ''.join([c*2 for c in color])
if len(color) != 6:
return False
try:
r = int(color[:2],16)
g = int(color[2:4],16)
b = int(color[4:],16)
except ValueError:
return False
else:
return r,g,b
|
python
|
{
"resource": ""
}
|
q280011
|
get_colors
|
test
|
def get_colors(stylename):
"""Construct the keys to be used building the base stylesheet
from a templatee."""
style = get_style_by_name(stylename)
fgcolor = style.style_for_token(Token.Text)['color'] or ''
if len(fgcolor) in (3,6):
# could be 'abcdef' or 'ace' hex, which needs '#' prefix
try:
int(fgcolor, 16)
except TypeError:
pass
else:
fgcolor = "#"+fgcolor
return dict(
bgcolor = style.background_color,
select = style.highlight_color,
fgcolor = fgcolor
)
|
python
|
{
"resource": ""
}
|
q280012
|
get_font
|
test
|
def get_font(family, fallback=None):
"""Return a font of the requested family, using fallback as alternative.
If a fallback is provided, it is used in case the requested family isn't
found. If no fallback is given, no alternative is chosen and Qt's internal
algorithms may automatically choose a fallback font.
Parameters
----------
family : str
A font name.
fallback : str
A font name.
Returns
-------
font : QFont object
"""
font = QtGui.QFont(family)
# Check whether we got what we wanted using QFontInfo, since exactMatch()
# is overly strict and returns false in too many cases.
font_info = QtGui.QFontInfo(font)
if fallback is not None and font_info.family() != family:
font = QtGui.QFont(fallback)
return font
|
python
|
{
"resource": ""
}
|
q280013
|
IPythonWidget._handle_execute_reply
|
test
|
def _handle_execute_reply(self, msg):
""" Reimplemented to support prompt requests.
"""
msg_id = msg['parent_header'].get('msg_id')
info = self._request_info['execute'].get(msg_id)
if info and info.kind == 'prompt':
number = msg['content']['execution_count'] + 1
self._show_interpreter_prompt(number)
self._request_info['execute'].pop(msg_id)
else:
super(IPythonWidget, self)._handle_execute_reply(msg)
|
python
|
{
"resource": ""
}
|
q280014
|
IPythonWidget._handle_history_reply
|
test
|
def _handle_history_reply(self, msg):
""" Implemented to handle history tail replies, which are only supported
by the IPython kernel.
"""
content = msg['content']
if 'history' not in content:
self.log.error("History request failed: %r"%content)
if content.get('status', '') == 'aborted' and \
not self._retrying_history_request:
# a *different* action caused this request to be aborted, so
# we should try again.
self.log.error("Retrying aborted history request")
# prevent multiple retries of aborted requests:
self._retrying_history_request = True
# wait out the kernel's queue flush, which is currently timed at 0.1s
time.sleep(0.25)
self.kernel_manager.shell_channel.history(hist_access_type='tail',n=1000)
else:
self._retrying_history_request = False
return
# reset retry flag
self._retrying_history_request = False
history_items = content['history']
self.log.debug("Received history reply with %i entries", len(history_items))
items = []
last_cell = u""
for _, _, cell in history_items:
cell = cell.rstrip()
if cell != last_cell:
items.append(cell)
last_cell = cell
self._set_history(items)
|
python
|
{
"resource": ""
}
|
q280015
|
IPythonWidget._handle_pyout
|
test
|
def _handle_pyout(self, msg):
""" Reimplemented for IPython-style "display hook".
"""
self.log.debug("pyout: %s", msg.get('content', ''))
if not self._hidden and self._is_from_this_session(msg):
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
if data.has_key('text/html'):
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
html = data['text/html']
self._append_plain_text('\n', True)
self._append_html(html + self.output_sep2, True)
elif data.has_key('text/plain'):
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
text = data['text/plain']
# If the repr is multiline, make sure we start on a new line,
# so that its lines are aligned.
if "\n" in text and not self.output_sep.endswith("\n"):
self._append_plain_text('\n', True)
self._append_plain_text(text + self.output_sep2, True)
|
python
|
{
"resource": ""
}
|
q280016
|
IPythonWidget._handle_display_data
|
test
|
def _handle_display_data(self, msg):
""" The base handler for the ``display_data`` message.
"""
self.log.debug("display: %s", msg.get('content', ''))
# For now, we don't display data from other frontends, but we
# eventually will as this allows all frontends to monitor the display
# data. But we need to figure out how to handle this in the GUI.
if not self._hidden and self._is_from_this_session(msg):
source = msg['content']['source']
data = msg['content']['data']
metadata = msg['content']['metadata']
# In the regular IPythonWidget, we simply print the plain text
# representation.
if data.has_key('text/html'):
html = data['text/html']
self._append_html(html, True)
elif data.has_key('text/plain'):
text = data['text/plain']
self._append_plain_text(text, True)
# This newline seems to be needed for text and html output.
self._append_plain_text(u'\n', True)
|
python
|
{
"resource": ""
}
|
q280017
|
IPythonWidget._started_channels
|
test
|
def _started_channels(self):
"""Reimplemented to make a history request and load %guiref."""
super(IPythonWidget, self)._started_channels()
self._load_guiref_magic()
self.kernel_manager.shell_channel.history(hist_access_type='tail',
n=1000)
|
python
|
{
"resource": ""
}
|
q280018
|
IPythonWidget.execute_file
|
test
|
def execute_file(self, path, hidden=False):
""" Reimplemented to use the 'run' magic.
"""
# Use forward slashes on Windows to avoid escaping each separator.
if sys.platform == 'win32':
path = os.path.normpath(path).replace('\\', '/')
# Perhaps we should not be using %run directly, but while we
# are, it is necessary to quote or escape filenames containing spaces
# or quotes.
# In earlier code here, to minimize escaping, we sometimes quoted the
# filename with single quotes. But to do this, this code must be
# platform-aware, because run uses shlex rather than python string
# parsing, so that:
# * In Win: single quotes can be used in the filename without quoting,
# and we cannot use single quotes to quote the filename.
# * In *nix: we can escape double quotes in a double quoted filename,
# but can't escape single quotes in a single quoted filename.
# So to keep this code non-platform-specific and simple, we now only
# use double quotes to quote filenames, and escape when needed:
if ' ' in path or "'" in path or '"' in path:
path = '"%s"' % path.replace('"', '\\"')
self.execute('%%run %s' % path, hidden=hidden)
|
python
|
{
"resource": ""
}
|
q280019
|
IPythonWidget._process_execute_error
|
test
|
def _process_execute_error(self, msg):
""" Reimplemented for IPython-style traceback formatting.
"""
content = msg['content']
traceback = '\n'.join(content['traceback']) + '\n'
if False:
# FIXME: For now, tracebacks come as plain text, so we can't use
# the html renderer yet. Once we refactor ultratb to produce
# properly styled tracebacks, this branch should be the default
traceback = traceback.replace(' ', ' ')
traceback = traceback.replace('\n', '<br/>')
ename = content['ename']
ename_styled = '<span class="error">%s</span>' % ename
traceback = traceback.replace(ename, ename_styled)
self._append_html(traceback)
else:
# This is the fallback for now, using plain text with ansi escapes
self._append_plain_text(traceback)
|
python
|
{
"resource": ""
}
|
q280020
|
IPythonWidget._process_execute_payload
|
test
|
def _process_execute_payload(self, item):
""" Reimplemented to dispatch payloads to handler methods.
"""
handler = self._payload_handlers.get(item['source'])
if handler is None:
# We have no handler for this type of payload, simply ignore it
return False
else:
handler(item)
return True
|
python
|
{
"resource": ""
}
|
q280021
|
IPythonWidget.set_default_style
|
test
|
def set_default_style(self, colors='lightbg'):
""" Sets the widget style to the class defaults.
Parameters:
-----------
colors : str, optional (default lightbg)
Whether to use the default IPython light background or dark
background or B&W style.
"""
colors = colors.lower()
if colors=='lightbg':
self.style_sheet = styles.default_light_style_sheet
self.syntax_style = styles.default_light_syntax_style
elif colors=='linux':
self.style_sheet = styles.default_dark_style_sheet
self.syntax_style = styles.default_dark_syntax_style
elif colors=='nocolor':
self.style_sheet = styles.default_bw_style_sheet
self.syntax_style = styles.default_bw_syntax_style
else:
raise KeyError("No such color scheme: %s"%colors)
|
python
|
{
"resource": ""
}
|
q280022
|
IPythonWidget._edit
|
test
|
def _edit(self, filename, line=None):
""" Opens a Python script for editing.
Parameters:
-----------
filename : str
A path to a local system file.
line : int, optional
A line of interest in the file.
"""
if self.custom_edit:
self.custom_edit_requested.emit(filename, line)
elif not self.editor:
self._append_plain_text('No default editor available.\n'
'Specify a GUI text editor in the `IPythonWidget.editor` '
'configurable to enable the %edit magic')
else:
try:
filename = '"%s"' % filename
if line and self.editor_line:
command = self.editor_line.format(filename=filename,
line=line)
else:
try:
command = self.editor.format()
except KeyError:
command = self.editor.format(filename=filename)
else:
command += ' ' + filename
except KeyError:
self._append_plain_text('Invalid editor command.\n')
else:
try:
Popen(command, shell=True)
except OSError:
msg = 'Opening editor with command "%s" failed.\n'
self._append_plain_text(msg % command)
|
python
|
{
"resource": ""
}
|
q280023
|
IPythonWidget._make_in_prompt
|
test
|
def _make_in_prompt(self, number):
""" Given a prompt number, returns an HTML In prompt.
"""
try:
body = self.in_prompt % number
except TypeError:
# allow in_prompt to leave out number, e.g. '>>> '
body = self.in_prompt
return '<span class="in-prompt">%s</span>' % body
|
python
|
{
"resource": ""
}
|
q280024
|
IPythonWidget._make_continuation_prompt
|
test
|
def _make_continuation_prompt(self, prompt):
""" Given a plain text version of an In prompt, returns an HTML
continuation prompt.
"""
end_chars = '...: '
space_count = len(prompt.lstrip('\n')) - len(end_chars)
body = ' ' * space_count + end_chars
return '<span class="in-prompt">%s</span>' % body
|
python
|
{
"resource": ""
}
|
q280025
|
IPythonWidget._style_sheet_changed
|
test
|
def _style_sheet_changed(self):
""" Set the style sheets of the underlying widgets.
"""
self.setStyleSheet(self.style_sheet)
if self._control is not None:
self._control.document().setDefaultStyleSheet(self.style_sheet)
bg_color = self._control.palette().window().color()
self._ansi_processor.set_background_color(bg_color)
if self._page_control is not None:
self._page_control.document().setDefaultStyleSheet(self.style_sheet)
|
python
|
{
"resource": ""
}
|
q280026
|
IPythonWidget._syntax_style_changed
|
test
|
def _syntax_style_changed(self):
""" Set the style for the syntax highlighter.
"""
if self._highlighter is None:
# ignore premature calls
return
if self.syntax_style:
self._highlighter.set_style(self.syntax_style)
else:
self._highlighter.set_style_sheet(self.style_sheet)
|
python
|
{
"resource": ""
}
|
q280027
|
CloudStack._handle_response
|
test
|
async def _handle_response(self, response: aiohttp.client_reqrep.ClientResponse, await_final_result: bool) -> dict:
"""
Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which
means that the API call returns just a job id. The actually expected API response is postponed and a specific
asyncJobResults API has to be polled using the job id to get the final result once the API call has been
processed.
:param response: The response returned by the aiohttp call.
:type response: aiohttp.client_reqrep.ClientResponse
:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API
until the asynchronous API call has been processed
:type await_final_result: bool
:return: Dictionary containing the JSON response of the API call
:rtype: dict
"""
try:
data = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
text = await response.text()
logging.debug('Content returned by server not of type "application/json"\n Content: {}'.format(text))
raise CloudStackClientException(message="Could not decode content. Server did not return json content!")
else:
data = self._transform_data(data)
if response.status != 200:
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode", response.status),
error_text=data.get("errortext"),
response=data)
while await_final_result and ('jobid' in data):
await asyncio.sleep(self.async_poll_latency)
data = await self.queryAsyncJobResult(jobid=data['jobid'])
if data['jobstatus']: # jobstatus is 0 for pending async CloudStack calls
if not data['jobresultcode']: # exit code is zero
try:
return data['jobresult']
except KeyError:
pass
logging.debug("Async CloudStack call returned {}".format(str(data)))
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode"),
error_text=data.get("errortext"),
response=data)
return data
|
python
|
{
"resource": ""
}
|
q280028
|
CloudStack._sign
|
test
|
def _sign(self, url_parameters: dict) -> dict:
"""
According to the CloudStack documentation, each request needs to be signed in order to authenticate the user
account executing the API command. The signature is generated using a combination of the api secret and a SHA-1
hash of the url parameters including the command string. In order to generate a unique identifier, the url
parameters have to be transformed to lower case and ordered alphabetically.
:param url_parameters: The url parameters of the API call including the command string
:type url_parameters: dict
:return: The url parameters including a new key, which contains the signature
:rtype: dict
"""
if url_parameters:
url_parameters.pop('signature', None) # remove potential existing signature from url parameters
request_string = urlencode(sorted(url_parameters.items()), safe='.-*_', quote_via=quote).lower()
digest = hmac.new(self.api_secret.encode('utf-8'), request_string.encode('utf-8'), hashlib.sha1).digest()
url_parameters['signature'] = base64.b64encode(digest).decode('utf-8').strip()
return url_parameters
|
python
|
{
"resource": ""
}
|
q280029
|
CloudStack._transform_data
|
test
|
def _transform_data(data: dict) -> dict:
"""
Each CloudStack API call returns a nested dictionary structure. The first level contains only one key indicating
the API that originated the response. This function removes that first level from the data returned to the
caller.
:param data: Response of the API call
:type data: dict
:return: Simplified response without the information about the API that originated the response.
:rtype: dict
"""
for key in data.keys():
return_value = data[key]
if isinstance(return_value, dict):
return return_value
return data
|
python
|
{
"resource": ""
}
|
q280030
|
virtual_memory
|
test
|
def virtual_memory():
"""System virtual memory as a namedutple."""
mem = _psutil_bsd.get_virtual_mem()
total, free, active, inactive, wired, cached, buffers, shared = mem
avail = inactive + cached + free
used = active + wired + cached
percent = usage_percent((total - avail), total, _round=1)
return nt_virtmem_info(total, avail, percent, used, free,
active, inactive, buffers, cached, shared, wired)
|
python
|
{
"resource": ""
}
|
q280031
|
get_system_cpu_times
|
test
|
def get_system_cpu_times():
"""Return system per-CPU times as a named tuple"""
user, nice, system, idle, irq = _psutil_bsd.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle, irq)
|
python
|
{
"resource": ""
}
|
q280032
|
Process.get_process_uids
|
test
|
def get_process_uids(self):
"""Return real, effective and saved user ids."""
real, effective, saved = _psutil_bsd.get_process_uids(self.pid)
return nt_uids(real, effective, saved)
|
python
|
{
"resource": ""
}
|
q280033
|
Process.get_process_gids
|
test
|
def get_process_gids(self):
"""Return real, effective and saved group ids."""
real, effective, saved = _psutil_bsd.get_process_gids(self.pid)
return nt_gids(real, effective, saved)
|
python
|
{
"resource": ""
}
|
q280034
|
Process.get_process_threads
|
test
|
def get_process_threads(self):
"""Return the number of threads belonging to the process."""
rawlist = _psutil_bsd.get_process_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = nt_thread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
|
python
|
{
"resource": ""
}
|
q280035
|
Process.get_open_files
|
test
|
def get_open_files(self):
"""Return files opened by process as a list of namedtuples."""
# XXX - C implementation available on FreeBSD >= 8 only
# else fallback on lsof parser
if hasattr(_psutil_bsd, "get_process_open_files"):
rawlist = _psutil_bsd.get_process_open_files(self.pid)
return [nt_openfile(path, fd) for path, fd in rawlist]
else:
lsof = _psposix.LsofParser(self.pid, self._process_name)
return lsof.get_process_open_files()
|
python
|
{
"resource": ""
}
|
q280036
|
pkg_commit_hash
|
test
|
def pkg_commit_hash(pkg_path):
"""Get short form of commit hash given directory `pkg_path`
We get the commit hash from (in order of preference):
* IPython.utils._sysinfo.commit
* git output, if we are in a git repository
If these fail, we return a not-found placeholder tuple
Parameters
----------
pkg_path : str
directory containing package
only used for getting commit from active repo
Returns
-------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
"""
# Try and get commit from written commit text file
if _sysinfo.commit:
return "installation", _sysinfo.commit
# maybe we are in a repository
proc = subprocess.Popen('git rev-parse --short HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=pkg_path, shell=True)
repo_commit, _ = proc.communicate()
if repo_commit:
return 'repository', repo_commit.strip()
return '(none found)', '<not found>'
|
python
|
{
"resource": ""
}
|
q280037
|
pkg_info
|
test
|
def pkg_info(pkg_path):
"""Return dict describing the context of this package
Parameters
----------
pkg_path : str
path containing __init__.py for package
Returns
-------
context : dict
with named parameters of interest
"""
src, hsh = pkg_commit_hash(pkg_path)
return dict(
ipython_version=release.version,
ipython_path=pkg_path,
commit_source=src,
commit_hash=hsh,
sys_version=sys.version,
sys_executable=sys.executable,
sys_platform=sys.platform,
platform=platform.platform(),
os_name=os.name,
default_encoding=encoding.DEFAULT_ENCODING,
)
|
python
|
{
"resource": ""
}
|
q280038
|
sys_info
|
test
|
def sys_info():
"""Return useful information about IPython and the system, as a string.
Example
-------
In [2]: print sys_info()
{'commit_hash': '144fdae', # random
'commit_source': 'repository',
'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',
'ipython_version': '0.11.dev',
'os_name': 'posix',
'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',
'sys_executable': '/usr/bin/python',
'sys_platform': 'linux2',
'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
"""
p = os.path
path = p.dirname(p.abspath(p.join(__file__, '..')))
return pprint.pformat(pkg_info(path))
|
python
|
{
"resource": ""
}
|
q280039
|
_num_cpus_darwin
|
test
|
def _num_cpus_darwin():
"""Return the number of active CPUs on a Darwin system."""
p = subprocess.Popen(['sysctl','-n','hw.ncpu'],stdout=subprocess.PIPE)
return p.stdout.read()
|
python
|
{
"resource": ""
}
|
q280040
|
num_cpus
|
test
|
def num_cpus():
"""Return the effective number of CPUs in the system as an integer.
This cross-platform function makes an attempt at finding the total number of
available CPUs in the system, as returned by various underlying system and
python calls.
If it can't find a sensible answer, it returns 1 (though an error *may* make
it return a large positive number that's actually incorrect).
"""
# Many thanks to the Parallel Python project (http://www.parallelpython.com)
# for the names of the keys we needed to look up for this function. This
# code was inspired by their equivalent function.
ncpufuncs = {'Linux':_num_cpus_unix,
'Darwin':_num_cpus_darwin,
'Windows':_num_cpus_windows,
# On Vista, python < 2.5.2 has a bug and returns 'Microsoft'
# See http://bugs.python.org/issue1082 for details.
'Microsoft':_num_cpus_windows,
}
ncpufunc = ncpufuncs.get(platform.system(),
# default to unix version (Solaris, AIX, etc)
_num_cpus_unix)
try:
ncpus = max(1,int(ncpufunc()))
except:
ncpus = 1
return ncpus
|
python
|
{
"resource": ""
}
|
q280041
|
BaseCursor.nextset
|
test
|
def nextset(self):
"""Advance to the next result set.
Returns None if there are no more result sets.
"""
if self._executed:
self.fetchall()
del self.messages[:]
db = self._get_db()
nr = db.next_result()
if nr == -1:
return None
self._do_get_result()
self._post_get_result()
self._warning_check()
return 1
|
python
|
{
"resource": ""
}
|
q280042
|
CursorUseResultMixIn.fetchone
|
test
|
def fetchone(self):
"""Fetches a single row from the cursor."""
self._check_executed()
r = self._fetch_row(1)
if not r:
self._warning_check()
return None
self.rownumber = self.rownumber + 1
return r[0]
|
python
|
{
"resource": ""
}
|
q280043
|
CursorUseResultMixIn.fetchmany
|
test
|
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
r = self._fetch_row(size or self.arraysize)
self.rownumber = self.rownumber + len(r)
if not r:
self._warning_check()
return r
|
python
|
{
"resource": ""
}
|
q280044
|
CursorUseResultMixIn.fetchall
|
test
|
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
r = self._fetch_row(0)
self.rownumber = self.rownumber + len(r)
self._warning_check()
return r
|
python
|
{
"resource": ""
}
|
q280045
|
connect
|
test
|
def connect(com, peers, tree, pub_url, root_id):
"""this function will be called on the engines"""
com.connect(peers, tree, pub_url, root_id)
|
python
|
{
"resource": ""
}
|
q280046
|
reads_json
|
test
|
def reads_json(s, **kwargs):
"""Read a JSON notebook from a string and return the NotebookNode object."""
nbf, minor, d = parse_json(s, **kwargs)
if nbf == 1:
nb = v1.to_notebook_json(d, **kwargs)
nb = v3.convert_to_this_nbformat(nb, orig_version=1)
elif nbf == 2:
nb = v2.to_notebook_json(d, **kwargs)
nb = v3.convert_to_this_nbformat(nb, orig_version=2)
elif nbf == 3:
nb = v3.to_notebook_json(d, **kwargs)
nb = v3.convert_to_this_nbformat(nb, orig_version=3, orig_minor=minor)
else:
raise NBFormatError('Unsupported JSON nbformat version: %i' % nbf)
return nb
|
python
|
{
"resource": ""
}
|
q280047
|
reads_py
|
test
|
def reads_py(s, **kwargs):
"""Read a .py notebook from a string and return the NotebookNode object."""
nbf, nbm, s = parse_py(s, **kwargs)
if nbf == 2:
nb = v2.to_notebook_py(s, **kwargs)
elif nbf == 3:
nb = v3.to_notebook_py(s, **kwargs)
else:
raise NBFormatError('Unsupported PY nbformat version: %i' % nbf)
return nb
|
python
|
{
"resource": ""
}
|
q280048
|
reads
|
test
|
def reads(s, format, **kwargs):
"""Read a notebook from a string and return the NotebookNode object.
This function properly handles notebooks of any version. The notebook
returned will always be in the current version's format.
Parameters
----------
s : unicode
The raw unicode string to read the notebook from.
format : (u'json', u'ipynb', u'py')
The format that the string is in.
Returns
-------
nb : NotebookNode
The notebook that was read.
"""
format = unicode(format)
if format == u'json' or format == u'ipynb':
return reads_json(s, **kwargs)
elif format == u'py':
return reads_py(s, **kwargs)
else:
raise NBFormatError('Unsupported format: %s' % format)
|
python
|
{
"resource": ""
}
|
q280049
|
writes
|
test
|
def writes(nb, format, **kwargs):
"""Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string.
"""
format = unicode(format)
if format == u'json' or format == u'ipynb':
return writes_json(nb, **kwargs)
elif format == u'py':
return writes_py(nb, **kwargs)
else:
raise NBFormatError('Unsupported format: %s' % format)
|
python
|
{
"resource": ""
}
|
q280050
|
write
|
test
|
def write(nb, fp, format, **kwargs):
"""Write a notebook to a file in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
fp : file
Any file-like object with a write method.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string.
"""
return fp.write(writes(nb, format, **kwargs))
|
python
|
{
"resource": ""
}
|
q280051
|
_convert_to_metadata
|
test
|
def _convert_to_metadata():
"""Convert to a notebook having notebook metadata."""
import glob
for fname in glob.glob('*.ipynb'):
print('Converting file:',fname)
with open(fname,'r') as f:
nb = read(f,u'json')
md = new_metadata()
if u'name' in nb:
md.name = nb.name
del nb[u'name']
nb.metadata = md
with open(fname,'w') as f:
write(nb, f, u'json')
|
python
|
{
"resource": ""
}
|
q280052
|
Box.load_from_dict
|
test
|
def load_from_dict(self, src: dict, key):
'''
try load value from dict.
if key is not exists, mark as state unset.
'''
if key in src:
self.value = src[key]
else:
self.reset()
|
python
|
{
"resource": ""
}
|
q280053
|
Selector.matches
|
test
|
def matches(self, name):
"""Does the name match my requirements?
To match, a name must match config.testMatch OR config.include
and it must not match config.exclude
"""
return ((self.match.search(name)
or (self.include and
filter(None,
[inc.search(name) for inc in self.include])))
and ((not self.exclude)
or not filter(None,
[exc.search(name) for exc in self.exclude])
))
|
python
|
{
"resource": ""
}
|
q280054
|
Selector.wantClass
|
test
|
def wantClass(self, cls):
"""Is the class a wanted test class?
A class must be a unittest.TestCase subclass, or match test name
requirements. Classes that start with _ are always excluded.
"""
declared = getattr(cls, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = (not cls.__name__.startswith('_')
and (issubclass(cls, unittest.TestCase)
or self.matches(cls.__name__)))
plug_wants = self.plugins.wantClass(cls)
if plug_wants is not None:
log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
wanted = plug_wants
log.debug("wantClass %s? %s", cls, wanted)
return wanted
|
python
|
{
"resource": ""
}
|
q280055
|
Selector.wantDirectory
|
test
|
def wantDirectory(self, dirname):
"""Is the directory a wanted test directory?
All package directories match, so long as they do not match exclude.
All other directories must match test requirements.
"""
tail = op_basename(dirname)
if ispackage(dirname):
wanted = (not self.exclude
or not filter(None,
[exc.search(tail) for exc in self.exclude]
))
else:
wanted = (self.matches(tail)
or (self.config.srcDirs
and tail in self.config.srcDirs))
plug_wants = self.plugins.wantDirectory(dirname)
if plug_wants is not None:
log.debug("Plugin setting selection of %s to %s",
dirname, plug_wants)
wanted = plug_wants
log.debug("wantDirectory %s? %s", dirname, wanted)
return wanted
|
python
|
{
"resource": ""
}
|
q280056
|
Selector.wantFile
|
test
|
def wantFile(self, file):
"""Is the file a wanted test file?
The file must be a python source file and match testMatch or
include, and not match exclude. Files that match ignore are *never*
wanted, regardless of plugin, testMatch, include or exclude settings.
"""
# never, ever load files that match anything in ignore
# (.* _* and *setup*.py by default)
base = op_basename(file)
ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
if ignore_this.search(base) ]
if ignore_matches:
log.debug('%s matches ignoreFiles pattern; skipped',
base)
return False
if not self.config.includeExe and os.access(file, os.X_OK):
log.info('%s is executable; skipped', file)
return False
dummy, ext = op_splitext(base)
pysrc = ext == '.py'
wanted = pysrc and self.matches(base)
plug_wants = self.plugins.wantFile(file)
if plug_wants is not None:
log.debug("plugin setting want %s to %s", file, plug_wants)
wanted = plug_wants
log.debug("wantFile %s? %s", file, wanted)
return wanted
|
python
|
{
"resource": ""
}
|
q280057
|
Selector.wantFunction
|
test
|
def wantFunction(self, function):
"""Is the function a test function?
"""
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
return False
declared = getattr(function, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = not funcname.startswith('_') and self.matches(funcname)
plug_wants = self.plugins.wantFunction(function)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantFunction %s? %s", function, wanted)
return wanted
|
python
|
{
"resource": ""
}
|
q280058
|
Selector.wantMethod
|
test
|
def wantMethod(self, method):
"""Is the method a test method?
"""
try:
method_name = method.__name__
except AttributeError:
# not a method
return False
if method_name.startswith('_'):
# never collect 'private' methods
return False
declared = getattr(method, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(method_name)
plug_wants = self.plugins.wantMethod(method)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantMethod %s? %s", method, wanted)
return wanted
|
python
|
{
"resource": ""
}
|
q280059
|
Selector.wantModule
|
test
|
def wantModule(self, module):
"""Is the module a test module?
The tail of the module name must match test requirements. One exception:
we always want __main__.
"""
declared = getattr(module, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(module.__name__.split('.')[-1]) \
or module.__name__ == '__main__'
plug_wants = self.plugins.wantModule(module)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantModule %s? %s", module, wanted)
return wanted
|
python
|
{
"resource": ""
}
|
q280060
|
_file_lines
|
test
|
def _file_lines(fname):
"""Return the contents of a named file as a list of lines.
This function never raises an IOError exception: if the file can't be
read, it simply returns an empty list."""
try:
outfile = open(fname)
except IOError:
return []
else:
out = outfile.readlines()
outfile.close()
return out
|
python
|
{
"resource": ""
}
|
q280061
|
Pdb.list_command_pydb
|
test
|
def list_command_pydb(self, arg):
"""List command to use if we have a newer pydb installed"""
filename, first, last = OldPdb.parse_list_cmd(self, arg)
if filename is not None:
self.print_list_lines(filename, first, last)
|
python
|
{
"resource": ""
}
|
q280062
|
Pdb.print_list_lines
|
test
|
def print_list_lines(self, filename, first, last):
"""The printing (as opposed to the parsing part of a 'list'
command."""
try:
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
src = []
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno)
if not line:
break
if lineno == self.curframe.f_lineno:
line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
else:
line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
src.append(line)
self.lineno = lineno
print >>io.stdout, ''.join(src)
except KeyboardInterrupt:
pass
|
python
|
{
"resource": ""
}
|
q280063
|
Pdb.do_pdef
|
test
|
def do_pdef(self, arg):
"""The debugger interface to magic_pdef"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
|
python
|
{
"resource": ""
}
|
q280064
|
conversion_factor
|
test
|
def conversion_factor(from_symbol, to_symbol, date):
"""
Generates a multiplying factor used to convert two currencies
"""
from_currency = Currency.objects.get(symbol=from_symbol)
try:
from_currency_price = CurrencyPrice.objects.get(currency=from_currency, date=date).mid_price
except CurrencyPrice.DoesNotExist:
print "Cannot fetch prices for %s on %s" % (str(from_currency), str(date))
return None
to_currency = Currency.objects.get(symbol=to_symbol)
try:
to_currency_price = CurrencyPrice.objects.get(currency=to_currency, date=date).mid_price
except CurrencyPrice.DoesNotExist:
print "Cannot fetch prices for %s on %s" % (str(to_currency), str(date))
return None
return to_currency_price / from_currency_price
|
python
|
{
"resource": ""
}
|
q280065
|
convert_currency
|
test
|
def convert_currency(from_symbol, to_symbol, value, date):
"""
Converts an amount of money from one currency to another on a specified date.
"""
if from_symbol == to_symbol:
return value
factor = conversion_factor(from_symbol, to_symbol, date)
if type(value) == float:
output = value * float(factor)
elif type(value) == Decimal:
output = Decimal(format(value * factor, '.%sf' % str(PRICE_PRECISION)))
elif type(value) in [np.float16, np.float32, np.float64, np.float128, np.float]:
output = float(value) * float(factor)
else:
output = None
return output
|
python
|
{
"resource": ""
}
|
q280066
|
Currency.compute_return
|
test
|
def compute_return(self, start_date, end_date, rate="MID"):
"""
Compute the return of the currency between two dates
"""
if rate not in ["MID", "ASK", "BID"]:
raise ValueError("Unknown rate type (%s)- must be 'MID', 'ASK' or 'BID'" % str(rate))
if end_date <= start_date:
raise ValueError("End date must be on or after start date")
df = self.generate_dataframe(start_date=start_date, end_date=end_date)
start_price = df.ix[start_date][rate]
end_price = df.ix[end_date][rate]
currency_return = (end_price / start_price) - 1.0
return currency_return
|
python
|
{
"resource": ""
}
|
q280067
|
get_stream_enc
|
test
|
def get_stream_enc(stream, default=None):
"""Return the given stream's encoding or a default.
There are cases where sys.std* might not actually be a stream, so
check for the encoding attribute prior to returning it, and return
a default if it doesn't exist or evaluates as False. `default'
is None if not provided.
"""
if not hasattr(stream, 'encoding') or not stream.encoding:
return default
else:
return stream.encoding
|
python
|
{
"resource": ""
}
|
q280068
|
getdefaultencoding
|
test
|
def getdefaultencoding():
"""Return IPython's guess for the default encoding for bytes as text.
Asks for stdin.encoding first, to match the calling Terminal, but that
is often None for subprocesses. Fall back on locale.getpreferredencoding()
which should be a sensible platform default (that respects LANG environment),
and finally to sys.getdefaultencoding() which is the most conservative option,
and usually ASCII.
"""
enc = get_stream_enc(sys.stdin)
if not enc or enc=='ascii':
try:
# There are reports of getpreferredencoding raising errors
# in some cases, which may well be fixed, but let's be conservative here.
enc = locale.getpreferredencoding()
except Exception:
pass
return enc or sys.getdefaultencoding()
|
python
|
{
"resource": ""
}
|
q280069
|
KernelApp.write_connection_file
|
test
|
def write_connection_file(self):
"""write connection info to JSON file"""
if os.path.basename(self.connection_file) == self.connection_file:
cf = os.path.join(self.profile_dir.security_dir, self.connection_file)
else:
cf = self.connection_file
write_connection_file(cf, ip=self.ip, key=self.session.key,
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
iopub_port=self.iopub_port)
self._full_connection_file = cf
|
python
|
{
"resource": ""
}
|
q280070
|
KernelApp.init_heartbeat
|
test
|
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i"%self.hb_port)
self.heartbeat.start()
# Helper to make it easier to connect to an existing kernel.
# set log-level to critical, to make sure it is output
self.log.critical("To connect another client to this kernel, use:")
|
python
|
{
"resource": ""
}
|
q280071
|
KernelApp.log_connection_info
|
test
|
def log_connection_info(self):
"""display connection info, and store ports"""
basename = os.path.basename(self.connection_file)
if basename == self.connection_file or \
os.path.dirname(self.connection_file) == self.profile_dir.security_dir:
# use shortname
tail = basename
if self.profile != 'default':
tail += " --profile %s" % self.profile
else:
tail = self.connection_file
self.log.critical("--existing %s", tail)
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
stdin=self.stdin_port, hb=self.hb_port)
|
python
|
{
"resource": ""
}
|
q280072
|
KernelApp.init_session
|
test
|
def init_session(self):
"""create our session object"""
default_secure(self.config)
self.session = Session(config=self.config, username=u'kernel')
|
python
|
{
"resource": ""
}
|
q280073
|
KernelApp.init_io
|
test
|
def init_io(self):
"""Redirect input streams and set a display hook."""
if self.outstream_class:
outstream_factory = import_item(str(self.outstream_class))
sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout')
sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr')
if self.displayhook_class:
displayhook_factory = import_item(str(self.displayhook_class))
sys.displayhook = displayhook_factory(self.session, self.iopub_socket)
|
python
|
{
"resource": ""
}
|
q280074
|
KernelApp.init_kernel
|
test
|
def init_kernel(self):
"""Create the Kernel object itself"""
kernel_factory = import_item(str(self.kernel_class))
self.kernel = kernel_factory(config=self.config, session=self.session,
shell_socket=self.shell_socket,
iopub_socket=self.iopub_socket,
stdin_socket=self.stdin_socket,
log=self.log
)
self.kernel.record_ports(self.ports)
|
python
|
{
"resource": ""
}
|
q280075
|
EngineFactory.init_connector
|
test
|
def init_connector(self):
"""construct connection function, which handles tunnels."""
self.using_ssh = bool(self.sshkey or self.sshserver)
if self.sshkey and not self.sshserver:
# We are using ssh directly to the controller, tunneling localhost to localhost
self.sshserver = self.url.split('://')[1].split(':')[0]
if self.using_ssh:
if tunnel.try_passwordless_ssh(self.sshserver, self.sshkey, self.paramiko):
password=False
else:
password = getpass("SSH Password for %s: "%self.sshserver)
else:
password = False
def connect(s, url):
url = disambiguate_url(url, self.location)
if self.using_ssh:
self.log.debug("Tunneling connection to %s via %s"%(url, self.sshserver))
return tunnel.tunnel_connection(s, url, self.sshserver,
keyfile=self.sshkey, paramiko=self.paramiko,
password=password,
)
else:
return s.connect(url)
def maybe_tunnel(url):
"""like connect, but don't complete the connection (for use by heartbeat)"""
url = disambiguate_url(url, self.location)
if self.using_ssh:
self.log.debug("Tunneling connection to %s via %s"%(url, self.sshserver))
url,tunnelobj = tunnel.open_tunnel(url, self.sshserver,
keyfile=self.sshkey, paramiko=self.paramiko,
password=password,
)
return url
return connect, maybe_tunnel
|
python
|
{
"resource": ""
}
|
q280076
|
EngineFactory.register
|
test
|
def register(self):
"""send the registration_request"""
self.log.info("Registering with controller at %s"%self.url)
ctx = self.context
connect,maybe_tunnel = self.init_connector()
reg = ctx.socket(zmq.DEALER)
reg.setsockopt(zmq.IDENTITY, self.bident)
connect(reg, self.url)
self.registrar = zmqstream.ZMQStream(reg, self.loop)
content = dict(queue=self.ident, heartbeat=self.ident, control=self.ident)
self.registrar.on_recv(lambda msg: self.complete_registration(msg, connect, maybe_tunnel))
# print (self.session.key)
self.session.send(self.registrar, "registration_request",content=content)
|
python
|
{
"resource": ""
}
|
q280077
|
html_to_text
|
test
|
def html_to_text(content):
""" Converts html content to plain text """
text = None
h2t = html2text.HTML2Text()
h2t.ignore_links = False
text = h2t.handle(content)
return text
|
python
|
{
"resource": ""
}
|
q280078
|
md_to_text
|
test
|
def md_to_text(content):
""" Converts markdown content to text """
text = None
html = markdown.markdown(content)
if html:
text = html_to_text(content)
return text
|
python
|
{
"resource": ""
}
|
q280079
|
domain_to_fqdn
|
test
|
def domain_to_fqdn(domain, proto=None):
""" returns a fully qualified app domain name """
from .generic import get_site_proto
proto = proto or get_site_proto()
fdqn = '{proto}://{domain}'.format(proto=proto, domain=domain)
return fdqn
|
python
|
{
"resource": ""
}
|
q280080
|
NoseExclude.options
|
test
|
def options(self, parser, env=os.environ):
"""Define the command line options for the plugin."""
super(NoseExclude, self).options(parser, env)
env_dirs = []
if 'NOSE_EXCLUDE_DIRS' in env:
exclude_dirs = env.get('NOSE_EXCLUDE_DIRS','')
env_dirs.extend(exclude_dirs.split(';'))
parser.add_option(
"--exclude-dir", action="append",
dest="exclude_dirs",
default=env_dirs,
help="Directory to exclude from test discovery. \
Path can be relative to current working directory \
or an absolute path. May be specified multiple \
times. [NOSE_EXCLUDE_DIRS]")
parser.add_option(
"--exclude-dir-file", type="string",
dest="exclude_dir_file",
default=env.get('NOSE_EXCLUDE_DIRS_FILE', False),
help="A file containing a list of directories to exclude \
from test discovery. Paths can be relative to current \
working directory or an absolute path. \
[NOSE_EXCLUDE_DIRS_FILE]")
|
python
|
{
"resource": ""
}
|
q280081
|
NoseExclude.configure
|
test
|
def configure(self, options, conf):
"""Configure plugin based on command line options"""
super(NoseExclude, self).configure(options, conf)
self.exclude_dirs = {}
# preload directories from file
if options.exclude_dir_file:
if not options.exclude_dirs:
options.exclude_dirs = []
new_dirs = self._load_from_file(options.exclude_dir_file)
options.exclude_dirs.extend(new_dirs)
if not options.exclude_dirs:
self.enabled = False
return
self.enabled = True
root = os.getcwd()
log.debug('cwd: %s' % root)
# Normalize excluded directory names for lookup
for exclude_param in options.exclude_dirs:
# when using setup.cfg, you can specify only one 'exclude-dir'
# separated by some character (new line is good enough)
for d in exclude_param.split('\n'):
d = d.strip()
abs_d = self._force_to_abspath(d)
if abs_d:
self.exclude_dirs[abs_d] = True
exclude_str = "excluding dirs: %s" % ",".join(self.exclude_dirs.keys())
log.debug(exclude_str)
|
python
|
{
"resource": ""
}
|
q280082
|
NoseExclude.wantDirectory
|
test
|
def wantDirectory(self, dirname):
"""Check if directory is eligible for test discovery"""
if dirname in self.exclude_dirs:
log.debug("excluded: %s" % dirname)
return False
else:
return None
|
python
|
{
"resource": ""
}
|
q280083
|
build_ext.links_to_dynamic
|
test
|
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1]+[''])
for libname in ext.libraries:
if pkg+libname in libnames: return True
return False
|
python
|
{
"resource": ""
}
|
q280084
|
call_each
|
test
|
def call_each(funcs: list, *args, **kwargs):
'''
call each func from func list.
return the last func value or None if func list is empty.
'''
ret = None
for func in funcs:
ret = func(*args, **kwargs)
return ret
|
python
|
{
"resource": ""
}
|
q280085
|
call_each_reversed
|
test
|
def call_each_reversed(funcs: list, *args, **kwargs):
'''
call each func from reversed func list.
return the last func value or None if func list is empty.
'''
ret = None
for func in reversed(funcs):
ret = func(*args, **kwargs)
return ret
|
python
|
{
"resource": ""
}
|
q280086
|
CallableList.append_func
|
test
|
def append_func(self, func, *args, **kwargs):
'''
append func with given arguments and keywords.
'''
wraped_func = partial(func, *args, **kwargs)
self.append(wraped_func)
|
python
|
{
"resource": ""
}
|
q280087
|
CallableList.insert_func
|
test
|
def insert_func(self, index, func, *args, **kwargs):
'''
insert func with given arguments and keywords.
'''
wraped_func = partial(func, *args, **kwargs)
self.insert(index, wraped_func)
|
python
|
{
"resource": ""
}
|
q280088
|
PrettyHelpFormatter.format_usage
|
test
|
def format_usage(self, usage):
"""
ensure there is only one newline between usage and the first heading
if there is no description
"""
msg = 'Usage: %s' % usage
if self.parser.description:
msg += '\n'
return msg
|
python
|
{
"resource": ""
}
|
q280089
|
BaseParallelApplication.initialize
|
test
|
def initialize(self, argv=None):
"""initialize the app"""
super(BaseParallelApplication, self).initialize(argv)
self.to_work_dir()
self.reinit_logging()
|
python
|
{
"resource": ""
}
|
q280090
|
BaseParallelApplication.write_pid_file
|
test
|
def write_pid_file(self, overwrite=False):
"""Create a .pid file in the pid_dir with my pid.
This must be called after pre_construct, which sets `self.pid_dir`.
This raises :exc:`PIDFileError` if the pid file exists already.
"""
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
pid = self.get_pid_from_file()
if not overwrite:
raise PIDFileError(
'The pid file [%s] already exists. \nThis could mean that this '
'server is already running with [pid=%s].' % (pid_file, pid)
)
with open(pid_file, 'w') as f:
self.log.info("Creating pid file: %s" % pid_file)
f.write(repr(os.getpid())+'\n')
|
python
|
{
"resource": ""
}
|
q280091
|
BaseParallelApplication.remove_pid_file
|
test
|
def remove_pid_file(self):
"""Remove the pid file.
This should be called at shutdown by registering a callback with
:func:`reactor.addSystemEventTrigger`. This needs to return
``None``.
"""
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
try:
self.log.info("Removing pid file: %s" % pid_file)
os.remove(pid_file)
except:
self.log.warn("Error removing the pid file: %s" % pid_file)
|
python
|
{
"resource": ""
}
|
q280092
|
BaseParallelApplication.get_pid_from_file
|
test
|
def get_pid_from_file(self):
"""Get the pid from the pid file.
If the pid file doesn't exist a :exc:`PIDFileError` is raised.
"""
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
with open(pid_file, 'r') as f:
s = f.read().strip()
try:
pid = int(s)
except:
raise PIDFileError("invalid pid file: %s (contents: %r)"%(pid_file, s))
return pid
else:
raise PIDFileError('pid file not found: %s' % pid_file)
|
python
|
{
"resource": ""
}
|
q280093
|
construct_parser
|
test
|
def construct_parser(magic_func):
""" Construct an argument parser using the function decorations.
"""
kwds = getattr(magic_func, 'argcmd_kwds', {})
if 'description' not in kwds:
kwds['description'] = getattr(magic_func, '__doc__', None)
arg_name = real_name(magic_func)
parser = MagicArgumentParser(arg_name, **kwds)
# Reverse the list of decorators in order to apply them in the
# order in which they appear in the source.
group = None
for deco in magic_func.decorators[::-1]:
result = deco.add_to_parser(parser, group)
if result is not None:
group = result
# Replace the starting 'usage: ' with IPython's %.
help_text = parser.format_help()
if help_text.startswith('usage: '):
help_text = help_text.replace('usage: ', '%', 1)
else:
help_text = '%' + help_text
# Replace the magic function's docstring with the full help text.
magic_func.__doc__ = help_text
return parser
|
python
|
{
"resource": ""
}
|
q280094
|
real_name
|
test
|
def real_name(magic_func):
""" Find the real name of the magic.
"""
magic_name = magic_func.__name__
if magic_name.startswith('magic_'):
magic_name = magic_name[len('magic_'):]
return getattr(magic_func, 'argcmd_name', magic_name)
|
python
|
{
"resource": ""
}
|
q280095
|
FrontendHighlighter.highlightBlock
|
test
|
def highlightBlock(self, string):
""" Highlight a block of text. Reimplemented to highlight selectively.
"""
if not self.highlighting_on:
return
# The input to this function is a unicode string that may contain
# paragraph break characters, non-breaking spaces, etc. Here we acquire
# the string as plain text so we can compare it.
current_block = self.currentBlock()
string = self._frontend._get_block_plain_text(current_block)
# Decide whether to check for the regular or continuation prompt.
if current_block.contains(self._frontend._prompt_pos):
prompt = self._frontend._prompt
else:
prompt = self._frontend._continuation_prompt
# Only highlight if we can identify a prompt, but make sure not to
# highlight the prompt.
if string.startswith(prompt):
self._current_offset = len(prompt)
string = string[len(prompt):]
super(FrontendHighlighter, self).highlightBlock(string)
|
python
|
{
"resource": ""
}
|
q280096
|
FrontendHighlighter.rehighlightBlock
|
test
|
def rehighlightBlock(self, block):
""" Reimplemented to temporarily enable highlighting if disabled.
"""
old = self.highlighting_on
self.highlighting_on = True
super(FrontendHighlighter, self).rehighlightBlock(block)
self.highlighting_on = old
|
python
|
{
"resource": ""
}
|
q280097
|
FrontendHighlighter.setFormat
|
test
|
def setFormat(self, start, count, format):
""" Reimplemented to highlight selectively.
"""
start += self._current_offset
super(FrontendHighlighter, self).setFormat(start, count, format)
|
python
|
{
"resource": ""
}
|
q280098
|
FrontendWidget.copy
|
test
|
def copy(self):
""" Copy the currently selected text to the clipboard, removing prompts.
"""
if self._page_control is not None and self._page_control.hasFocus():
self._page_control.copy()
elif self._control.hasFocus():
text = self._control.textCursor().selection().toPlainText()
if text:
lines = map(self._transform_prompt, text.splitlines())
text = '\n'.join(lines)
QtGui.QApplication.clipboard().setText(text)
else:
self.log.debug("frontend widget : unknown copy target")
|
python
|
{
"resource": ""
}
|
q280099
|
FrontendWidget._execute
|
test
|
def _execute(self, source, hidden):
""" Execute 'source'. If 'hidden', do not show any output.
See parent class :meth:`execute` docstring for full details.
"""
msg_id = self.kernel_manager.shell_channel.execute(source, hidden)
self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id, 'user')
self._hidden = hidden
if not hidden:
self.executing.emit(source)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.