_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q276600
|
_iter_module_files
|
test
|
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
old = None
|
python
|
{
"resource": ""
}
|
q276601
|
ReloaderLoop.restart_with_reloader
|
test
|
def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with %s' % self.name)
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
|
python
|
{
"resource": ""
}
|
q276602
|
to_text
|
test
|
def to_text(s, blank_if_none=True):
"""Wrapper around six.text_type to convert None to empty string"""
if s is None:
if blank_if_none:
return ""
else:
|
python
|
{
"resource": ""
}
|
q276603
|
find_ca_bundle
|
test
|
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
if os.name=='nt':
return get_win_certfile()
else:
for cert_path in cert_paths:
if os.path.isfile(cert_path):
return cert_path
|
python
|
{
"resource": ""
}
|
q276604
|
parse
|
test
|
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object
|
python
|
{
"resource": ""
}
|
q276605
|
HTMLParser.parse
|
test
|
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM
|
python
|
{
"resource": ""
}
|
q276606
|
HTMLParser.parseFragment
|
test
|
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding
|
python
|
{
"resource": ""
}
|
q276607
|
Translator.translate
|
test
|
def translate(self, word):
"""
pass in a word string that you
would like to see probable matches for.
"""
if (word not in self.transmissions):
raise NoMatchError('no matches found')
else:
trans = self.transmissions[word]
|
python
|
{
"resource": ""
}
|
q276608
|
Translator.convertArgsToTokens
|
test
|
def convertArgsToTokens(self, data):
"""
this converts the readin lines from
sys to useable format, returns list
of token and dict of tokens
"""
|
python
|
{
"resource": ""
}
|
q276609
|
HTTPServiceProvider.bind
|
test
|
def bind(self):
"""Bind and activate HTTP server."""
HTTPServer.__init__(self, (self.host, self.port),
|
python
|
{
"resource": ""
}
|
q276610
|
HTTPServiceProvider.report
|
test
|
def report(self):
"""Report startup info to stdout."""
print(
self.report_message.format(
|
python
|
{
"resource": ""
}
|
q276611
|
Bucket.load_bytecode
|
test
|
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
|
python
|
{
"resource": ""
}
|
q276612
|
_stylesheet_param_dict
|
test
|
def _stylesheet_param_dict(paramsDict, kwargsDict):
"""Return a copy of paramsDict, updated with kwargsDict entries, wrapped as
stylesheet arguments.
kwargsDict entries with a value of None are ignored.
"""
# beware of changing mutable default arg
paramsDict = dict(paramsDict)
for k, v in kwargsDict.items():
|
python
|
{
"resource": ""
}
|
q276613
|
VersionControl.run_command
|
test
|
def run_command(self, cmd, show_stdout=True, cwd=None,
raise_on_returncode=True,
command_level=logging.DEBUG, command_desc=None,
extra_environ=None):
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
|
python
|
{
"resource": ""
}
|
q276614
|
get_impl_ver
|
test
|
def get_impl_ver():
"""Return implementation version."""
impl_ver = sysconfig.get_config_var("py_version_nodot")
|
python
|
{
"resource": ""
}
|
q276615
|
distros_for_location
|
test
|
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
|
python
|
{
"resource": ""
}
|
q276616
|
find_external_links
|
test
|
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
|
python
|
{
"resource": ""
}
|
q276617
|
local_open
|
test
|
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
with open(os.path.join(filename,f),'r') as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
|
python
|
{
"resource": ""
}
|
q276618
|
PackageIndex.process_url
|
test
|
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url #
|
python
|
{
"resource": ""
}
|
q276619
|
removeduppaths
|
test
|
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
|
python
|
{
"resource": ""
}
|
q276620
|
_init_pathinfo
|
test
|
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
|
python
|
{
"resource": ""
}
|
q276621
|
addpackage
|
test
|
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
|
python
|
{
"resource": ""
}
|
q276622
|
addsitedir
|
test
|
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
|
python
|
{
"resource": ""
}
|
q276623
|
check_enableusersite
|
test
|
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
|
python
|
{
"resource": ""
}
|
q276624
|
addusersitepackages
|
test
|
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
|
python
|
{
"resource": ""
}
|
q276625
|
setquit
|
test
|
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells
|
python
|
{
"resource": ""
}
|
q276626
|
aliasmbcs
|
test
|
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
|
python
|
{
"resource": ""
}
|
q276627
|
setencoding
|
test
|
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion
|
python
|
{
"resource": ""
}
|
q276628
|
force_global_eggs_after_local_site_packages
|
test
|
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys,
|
python
|
{
"resource": ""
}
|
q276629
|
fixclasspath
|
test
|
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
|
python
|
{
"resource": ""
}
|
q276630
|
Popen_nonblocking
|
test
|
def Popen_nonblocking(*args, **kwargs):
"""
Open a subprocess without blocking. Return a process handle with any
output streams replaced by queues of lines from that stream.
Usage::
proc = Popen_nonblocking(..., stdout=subprocess.PIPE)
try:
out_line = proc.stdout.get_nowait()
except queue.Empty:
"no output available"
else:
handle_output(out_line)
"""
|
python
|
{
"resource": ""
}
|
q276631
|
have_pyrex
|
test
|
def have_pyrex():
"""
Return True if Cython or Pyrex can be imported.
"""
pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'
for pyrex_impl in pyrex_impls:
try:
# from (pyrex_impl) import build_ext
|
python
|
{
"resource": ""
}
|
q276632
|
Extension._convert_pyx_sources_to_lang
|
test
|
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
|
python
|
{
"resource": ""
}
|
q276633
|
DebuggedApplication.debug_application
|
test
|
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(skip=1, show_hidden_frames=
self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
|
python
|
{
"resource": ""
}
|
q276634
|
DebuggedApplication.get_resource
|
test
|
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
|
python
|
{
"resource": ""
}
|
q276635
|
user_agent
|
test
|
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
distro = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], platform.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
|
python
|
{
"resource": ""
}
|
q276636
|
is_url
|
test
|
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
|
python
|
{
"resource": ""
}
|
q276637
|
unpack_file_url
|
test
|
def unpack_file_url(link, location, download_dir=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir."""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path =
|
python
|
{
"resource": ""
}
|
q276638
|
_download_http_url
|
test
|
def _download_http_url(link, session, temp_dir):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully
|
python
|
{
"resource": ""
}
|
q276639
|
_check_download_dir
|
test
|
def _check_download_dir(link, download_dir):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
|
python
|
{
"resource": ""
}
|
q276640
|
CurrencyHandler.currencyFormat
|
test
|
def currencyFormat(_context, code, symbol, format,
currency_digits=True, decimal_quantization=True,
name=''):
"""Handle currencyFormat subdirectives."""
_context.action(
discriminator=('currency', name, code),
|
python
|
{
"resource": ""
}
|
q276641
|
CurrencyHandler.exchange
|
test
|
def exchange(_context, component, backend, base, name=''):
"""Handle exchange subdirectives."""
_context.action(
discriminator=('currency', 'exchange', component),
|
python
|
{
"resource": ""
}
|
q276642
|
HTTPResponse._decode
|
test
|
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to
|
python
|
{
"resource": ""
}
|
q276643
|
_default_template_ctx_processor
|
test
|
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
|
python
|
{
"resource": ""
}
|
q276644
|
_render
|
test
|
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
|
python
|
{
"resource": ""
}
|
q276645
|
render_template
|
test
|
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
|
python
|
{
"resource": ""
}
|
q276646
|
render_template_string
|
test
|
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
|
python
|
{
"resource": ""
}
|
q276647
|
parse_version
|
test
|
def parse_version(version):
"""Use parse_version from pkg_resources or distutils as available."""
global parse_version
|
python
|
{
"resource": ""
}
|
q276648
|
Identifiers.is_declared
|
test
|
def is_declared(self, name):
"""Check if a name is declared in this or an outer scope."""
|
python
|
{
"resource": ""
}
|
q276649
|
FrameIdentifierVisitor.visit_Name
|
test
|
def visit_Name(self, node):
"""All assignments to names go through this function."""
if node.ctx == 'store':
self.identifiers.declared_locally.add(node.name)
|
python
|
{
"resource": ""
}
|
q276650
|
CodeGenerator.visit_Include
|
test
|
def visit_Include(self, node, frame):
"""Handles includes."""
if node.with_context:
self.unoptimize_scope(frame)
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
|
python
|
{
"resource": ""
}
|
q276651
|
CodeGenerator.visit_FromImport
|
test
|
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True)')
else:
self.write('module')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('l_%s = getattr(included_template, '
'%r, missing)' % (alias, name))
self.writeline('if l_%s is missing:' % alias)
self.indent()
self.writeline('l_%s = environment.undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(alias, 'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
|
python
|
{
"resource": ""
}
|
q276652
|
make_wheelfile_inner
|
test
|
def make_wheelfile_inner(base_name, base_dir='.'):
"""Create a whl file from all the files under 'base_dir'.
Places .dist-info at the end of the archive."""
zip_filename = base_name + ".whl"
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
# XXX support bz2, xz when available
zip = zipfile.ZipFile(open(zip_filename, "wb+"), "w",
compression=zipfile.ZIP_DEFLATED)
score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3}
deferred = []
def writefile(path):
zip.write(path, path)
log.info("adding '%s'" % path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
|
python
|
{
"resource": ""
}
|
q276653
|
atomize
|
test
|
def atomize(f, lock=None):
"""
Decorate a function with a reentrant lock to prevent multiple
threads from calling said thread simultaneously.
|
python
|
{
"resource": ""
}
|
q276654
|
service_factory
|
test
|
def service_factory(app, host, port,
report_message='service factory port {port}',
provider_cls=HTTPServiceProvider):
"""Create service, start server.
:param app: application to instantiate a service
:param host: interface to bound provider
:param port: port to bound provider
:param report_message: message format to report port
|
python
|
{
"resource": ""
}
|
q276655
|
unicode_urlencode
|
test
|
def unicode_urlencode(obj, charset='utf-8'):
"""URL escapes a single bytestring or unicode string with the
given charset if applicable to URL safe quoting under all rules
that need to be considered under all supported Python versions.
If non strings are provided they are converted to their unicode
representation first.
"""
if
|
python
|
{
"resource": ""
}
|
q276656
|
matches_requirement
|
test
|
def matches_requirement(req, wheels):
"""List of wheels matching a requirement.
:param req: The requirement to satisfy
:param wheels: List of wheels to search.
"""
try:
from pkg_resources import Distribution, Requirement
except ImportError:
raise RuntimeError("Cannot use requirements without pkg_resources")
req = Requirement.parse(req)
selected = []
|
python
|
{
"resource": ""
}
|
q276657
|
RequirementCommand.populate_requirement_set
|
test
|
def populate_requirement_set(requirement_set, args, options, finder,
session, name, wheel_cache):
"""
Marshal cmd line args into a requirement set.
"""
for req in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
req, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for req in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(
req,
default_vcs=options.default_vcs,
isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
found_req_in_file = False
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder, options=options, session=session,
|
python
|
{
"resource": ""
}
|
q276658
|
Bazaar.export
|
test
|
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
|
python
|
{
"resource": ""
}
|
q276659
|
AmazonAPI.lookup
|
test
|
def lookup(self, ResponseGroup="Large", **kwargs):
"""Lookup an Amazon Product.
:return:
An instance of :class:`~.AmazonProduct` if one item was returned,
or a list of :class:`~.AmazonProduct` instances if multiple
items where returned.
"""
response = self.api.ItemLookup(ResponseGroup=ResponseGroup, **kwargs)
root = objectify.fromstring(response)
if root.Items.Request.IsValid == 'False':
code = root.Items.Request.Errors.Error.Code
msg = root.Items.Request.Errors.Error.Message
raise LookupException(
"Amazon Product Lookup Error: '{0}', '{1}'".format(code, msg))
if not hasattr(root.Items, 'Item'):
|
python
|
{
"resource": ""
}
|
q276660
|
AmazonSearch.iterate_pages
|
test
|
def iterate_pages(self):
"""Iterate Pages.
A generator which iterates over all pages.
Keep in mind that Amazon limits the number of pages it makes available.
:return:
Yields lxml root elements.
"""
try:
while True:
|
python
|
{
"resource": ""
}
|
q276661
|
AmazonBrowseNode.ancestor
|
test
|
def ancestor(self):
"""This browse node's immediate ancestor in the browse node tree.
:return:
The ancestor as an :class:`~.AmazonBrowseNode`, or None.
|
python
|
{
"resource": ""
}
|
q276662
|
AmazonBrowseNode.children
|
test
|
def children(self):
"""This browse node's children in the browse node tree.
:return:
A list of this browse node's children in the browse node tree.
"""
children = []
child_nodes = getattr(self.element, 'Children')
|
python
|
{
"resource": ""
}
|
q276663
|
AmazonProduct._safe_get_element
|
test
|
def _safe_get_element(self, path, root=None):
"""Safe Get Element.
Get a child element of root (multiple levels deep) failing silently
if any descendant does not exist.
:param root:
Lxml element.
:param path:
|
python
|
{
"resource": ""
}
|
q276664
|
AmazonProduct._safe_get_element_text
|
test
|
def _safe_get_element_text(self, path, root=None):
"""Safe get element text.
Get element as string or None,
:param root:
Lxml element.
:param path:
String path (i.e. 'Items.Item.Offers.Offer').
:return:
|
python
|
{
"resource": ""
}
|
q276665
|
AmazonProduct._safe_get_element_date
|
test
|
def _safe_get_element_date(self, path, root=None):
"""Safe get elemnent date.
Get element as datetime.date or None,
:param root:
Lxml element.
:param path:
String path (i.e. 'Items.Item.Offers.Offer').
:return:
datetime.date or None.
"""
|
python
|
{
"resource": ""
}
|
q276666
|
AmazonProduct.price_and_currency
|
test
|
def price_and_currency(self):
"""Get Offer Price and Currency.
Return price according to the following process:
* If product has a sale return Sales Price, otherwise,
* Return Price, otherwise,
* Return lowest offer price, otherwise,
* Return None.
:return:
A tuple containing:
1. Float representation of price.
2. ISO Currency code (string).
"""
price = self._safe_get_element_text(
'Offers.Offer.OfferListing.SalePrice.Amount')
|
python
|
{
"resource": ""
}
|
q276667
|
AmazonProduct.list_price
|
test
|
def list_price(self):
"""List Price.
:return:
A tuple containing:
1. Float representation of price.
2. ISO Currency code (string).
"""
price = self._safe_get_element_text('ItemAttributes.ListPrice.Amount')
currency = self._safe_get_element_text(
|
python
|
{
"resource": ""
}
|
q276668
|
CacheControlAdapter.send
|
test
|
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
|
python
|
{
"resource": ""
}
|
q276669
|
CacheControlAdapter.build_response
|
test
|
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
# We always cache the 301 responses
elif response.status == 301:
self.controller.cache_response(request, response)
else:
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# Wrap the response file with a wrapper that will cache the
#
|
python
|
{
"resource": ""
}
|
q276670
|
make_attrgetter
|
test
|
def make_attrgetter(environment, attribute):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
if not isinstance(attribute, string_types) \
or ('.' not in attribute and not attribute.isdigit()):
return lambda x: environment.getitem(x,
|
python
|
{
"resource": ""
}
|
q276671
|
do_title
|
test
|
def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
|
python
|
{
"resource": ""
}
|
q276672
|
do_sort
|
test
|
def do_sort(environment, value, reverse=False, case_sensitive=False,
attribute=None):
"""Sort an iterable. Per default it sorts ascending, if you pass it
true as first argument it will reverse the sorting.
If the iterable is made of strings the third parameter can be used to
control the case sensitiveness of the comparison which is disabled by
default.
.. sourcecode:: jinja
{% for item in iterable|sort %}
...
{% endfor %}
It is also possible to sort by an attribute (for example to sort
by the date of an object) by specifying the `attribute` parameter:
.. sourcecode:: jinja
{% for item in iterable|sort(attribute='date') %}
...
{% endfor %}
|
python
|
{
"resource": ""
}
|
q276673
|
do_groupby
|
test
|
def do_groupby(environment, value, attribute):
"""Group a sequence of objects by a common attribute.
If you for example have a list of dicts or objects that represent persons
with `gender`, `first_name` and `last_name` attributes and you want to
group all users by genders you can do something like the following
snippet:
.. sourcecode:: html+jinja
<ul>
{% for group in persons|groupby('gender') %}
<li>{{ group.grouper }}<ul>
{% for person in group.list %}
<li>{{ person.first_name }} {{ person.last_name }}</li>
|
python
|
{
"resource": ""
}
|
q276674
|
do_map
|
test
|
def do_map(*args, **kwargs):
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
|
python
|
{
"resource": ""
}
|
q276675
|
create_logger
|
test
|
def create_logger(app):
"""Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if x.level == 0 and app.debug:
return DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
|
python
|
{
"resource": ""
}
|
q276676
|
constant_time_compare
|
test
|
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match. Do
not use this function for anything else than comparision with known
length targets.
This is should be implemented in C in order to get it completely right.
"""
if _builtin_constant_time_compare is not None:
return
|
python
|
{
"resource": ""
}
|
q276677
|
SigningAlgorithm.verify_signature
|
test
|
def verify_signature(self, key, value, sig):
"""Verifies the given signature matches the expected signature"""
|
python
|
{
"resource": ""
}
|
q276678
|
Signer.derive_key
|
test
|
def derive_key(self):
"""This method is called to derive the key. If you're unhappy with
the default key derivation choices you can override them here.
Keep in mind that the key derivation in itsdangerous is not intended
to be used as a security method to make a complex key out of a short
password. Instead you should use large random secret keys.
"""
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
return self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
return self.digest_method(salt + b'signer' +
|
python
|
{
"resource": ""
}
|
q276679
|
Signer.get_signature
|
test
|
def get_signature(self, value):
"""Returns the signature for the given value"""
value = want_bytes(value)
key = self.derive_key()
|
python
|
{
"resource": ""
}
|
q276680
|
Signer.sign
|
test
|
def sign(self, value):
"""Signs the given string."""
return
|
python
|
{
"resource": ""
}
|
q276681
|
Signer.verify_signature
|
test
|
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
|
python
|
{
"resource": ""
}
|
q276682
|
Signer.unsign
|
test
|
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature('No %r found in value' % self.sep)
value, sig = signed_value.rsplit(sep, 1)
|
python
|
{
"resource": ""
}
|
q276683
|
TimestampSigner.sign
|
test
|
def sign(self, value):
"""Signs the given string and also attaches a time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
|
python
|
{
"resource": ""
}
|
q276684
|
TimestampSigner.validate
|
test
|
def validate(self, signed_value, max_age=None):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
|
python
|
{
"resource": ""
}
|
q276685
|
Serializer.dumps
|
test
|
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
|
python
|
{
"resource": ""
}
|
q276686
|
server_error
|
test
|
def server_error(request_id, error):
"""JSON-RPC server error.
:param request_id: JSON-RPC request id
:type request_id: int or str or None
:param error: server error
:type error: Exception
"""
response = {
'jsonrpc': '2.0',
'id': request_id,
|
python
|
{
"resource": ""
}
|
q276687
|
PackageFinder.find
|
test
|
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style)
path; it will be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
|
python
|
{
"resource": ""
}
|
q276688
|
PackageFinder.require_parents
|
test
|
def require_parents(packages):
"""
Exclude any apparent package that apparently doesn't include its
parent.
For example, exclude 'foo.bar' if 'foo' is not present.
"""
found = []
for pkg in
|
python
|
{
"resource": ""
}
|
q276689
|
PackageFinder._all_dirs
|
test
|
def _all_dirs(base_path):
"""
Return all dirs in base_path, relative to base_path
"""
for root, dirs, files in os.walk(base_path, followlinks=True):
|
python
|
{
"resource": ""
}
|
q276690
|
Serializer.prepare_response
|
test
|
def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
|
python
|
{
"resource": ""
}
|
q276691
|
unsign
|
test
|
def unsign(wheelfile):
"""
Remove RECORD.jws from a wheel by truncating the zip file.
RECORD.jws must be at the end of the archive. The zip file must be an
ordinary archive, with the compressed files and the directory in the same
order, and without any non-zip content after the truncation point.
"""
import wheel.install
vzf = wheel.install.VerifyingZipFile(wheelfile, "a")
|
python
|
{
"resource": ""
}
|
q276692
|
unpack
|
test
|
def unpack(wheelfile, dest='.'):
"""Unpack a wheel.
Wheel content will be unpacked to {dest}/{name}-{ver}, where {name}
is the package name and {ver} its version.
:param wheelfile: The path to the wheel.
|
python
|
{
"resource": ""
}
|
q276693
|
install_scripts
|
test
|
def install_scripts(distributions):
"""
Regenerate the entry_points console_scripts for the named distribution.
"""
try:
from setuptools.command import easy_install
import pkg_resources
except ImportError:
raise RuntimeError("'wheel install_scripts' needs setuptools.")
for dist in distributions:
pkg_resources_dist = pkg_resources.get_distribution(dist)
install = wheel.paths.get_install_command(dist)
|
python
|
{
"resource": ""
}
|
q276694
|
Graph.arrange_all
|
test
|
def arrange_all(self):
""" Sets for the _draw_ and _ldraw_ attributes for each of the graph
sub-elements by processing the xdot format of the graph.
"""
import godot.dot_data_parser
parser = godot.dot_data_parser.GodotDataParser()
xdot_data = self.create( format = "xdot" )
# print "GRAPH DOT:\n", str( self )
#
|
python
|
{
"resource": ""
}
|
q276695
|
Graph.redraw_canvas
|
test
|
def redraw_canvas(self):
""" Parses the Xdot attributes of all graph components and adds
the components to a new canvas.
"""
from xdot_parser import XdotAttrParser
xdot_parser = XdotAttrParser()
canvas = self._component_default()
for node in self.nodes:
components = xdot_parser.parse_xdot_data( node._draw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( node._ldraw_ )
canvas.add( *components )
for edge in self.edges:
components = xdot_parser.parse_xdot_data( edge._draw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._ldraw_ )
|
python
|
{
"resource": ""
}
|
q276696
|
Graph.get_node
|
test
|
def get_node(self, ID):
""" Returns a node given an ID or None if no such node exists.
"""
node = super(Graph, self).get_node(ID)
if node is not None:
return node
for graph in self.all_graphs:
|
python
|
{
"resource": ""
}
|
q276697
|
Graph._directed_changed
|
test
|
def _directed_changed(self, new):
""" Sets the connection string for all edges.
"""
if new:
conn = "->"
else:
|
python
|
{
"resource": ""
}
|
q276698
|
Graph._on_edges
|
test
|
def _on_edges(self, object, name, old, new):
""" Handles the list of edges for any graph changing.
"""
if name == "edges_items":
edges = new.added
elif name == "edges":
edges = new
else:
edges = []
all_nodes = [n for g in self.all_graphs for n in g.nodes]
for each_edge in edges:
# Ensure the edge's nodes exist in the graph.
if each_edge.tail_node not in all_nodes:
|
python
|
{
"resource": ""
}
|
q276699
|
ComponentViewer._component_changed
|
test
|
def _component_changed(self, old, new):
""" Handles the component being changed.
"""
canvas = self.canvas
if old is not None:
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.