_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q276500
_get_mro
test
def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__
python
{ "resource": "" }
q276501
_find_adapter
test
def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): if t in registry: return registry[t]
python
{ "resource": "" }
q276502
ensure_directory
test
def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname)
python
{ "resource": "" }
q276503
WorkingSet.iter_entry_points
test
def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name]
python
{ "resource": "" }
q276504
Environment.can_add
test
def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ return (self.python is None or dist.py_version is None or dist.py_version==self.python) \ and compatible_platforms(dist.platform, self.platform)
python
{ "resource": "" }
q276505
Environment.best_match
test
def best_match(self, req, working_set, installer=None): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ dist = working_set.find(req) if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer)
python
{ "resource": "" }
q276506
MarkerEvaluation.evaluate_marker
test
def evaluate_marker(cls, text, extra=None): """ Evaluate a PEP 426 environment marker on CPython 2.4+. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'parser' module, which is not implemented on Jython and has been superseded by the 'ast' module in Python 2.6 and later. """ return cls.interpret(parser.expr(text).totuple(1)[1])
python
{ "resource": "" }
q276507
MarkerEvaluation._markerlib_evaluate
test
def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ from pip._vendor import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except NameError as e: raise SyntaxError(e.args[0]) return result
python
{ "resource": "" }
q276508
IndentingFormatter.format
test
def format(self, record): """ Calls the standard formatter, but will indent all of the log messages by our current indentation level. """ formatted = logging.Formatter.format(self, record) formatted = "".join([ (" " * get_indentation()) + line for line in formatted.splitlines(True) ]) return formatted
python
{ "resource": "" }
q276509
format_currency
test
def format_currency(number, currency, format=None, locale=LC_NUMERIC, currency_digits=True, format_type='standard', decimal_quantization=True): """Return formatted currency value. >>> format_currency(1099.98, 'USD', locale='en_US') u'$1,099.98' >>> format_currency(1099.98, 'USD', locale='es_CO') u'US$\\xa01.099,98' >>> format_currency(1099.98, 'EUR', locale='de_DE') u'1.099,98\\xa0\\u20ac' The format can also be specified explicitly. The currency is placed with the '¤' sign. As the sign gets repeated the format expands (¤ being the symbol, ¤¤ is the currency abbreviation and ¤¤¤ is the full name of the currency): >>> format_currency(1099.98, 'EUR', u'\xa4\xa4 #,##0.00', locale='en_US') u'EUR 1,099.98' >>> format_currency(1099.98, 'EUR', u'#,##0.00 \xa4\xa4\xa4', ... locale='en_US') u'1,099.98 euros' Currencies usually have a specific number of decimal digits. This function favours that information over the given format: >>> format_currency(1099.98, 'JPY', locale='en_US') u'\\xa51,100' >>> format_currency(1099.98, 'COP', u'#,##0.00', locale='es_ES') u'1.100' However, the number of decimal digits can be overriden from the currency information, by setting the last parameter to ``False``: >>> format_currency(1099.98, 'JPY', locale='en_US', currency_digits=False) u'\\xa51,099.98' >>> format_currency(1099.98, 'COP', u'#,##0.00', locale='es_ES', ... currency_digits=False) u'1.099,98' If a format is not specified the type of currency format to use from the locale can be specified: >>> format_currency(1099.98, 'EUR', locale='en_US', format_type='standard') u'\\u20ac1,099.98' When the given currency format type is not available, an exception is raised: >>> format_currency('1099.98', 'EUR', locale='root', format_type='unknown') Traceback (most recent call last): ... UnknownCurrencyFormatError: "'unknown' is not a known currency format type" By default the locale is allowed to truncate and round a high-precision number by forcing its format pattern onto the decimal part. You can bypass this behavior with the `decimal_quantization` parameter: >>> format_currency(1099.9876, 'USD', locale='en_US') u'$1,099.99' >>> format_currency(1099.9876, 'USD', locale='en_US', ... decimal_quantization=False) u'$1,099.9876' :param number: the number to format :param currency: the currency code :param format: the format string to use :param locale: the `Locale` object or locale identifier :param currency_digits: use the currency's natural number of decimal digits :param format_type: the currency format type to use :param decimal_quantization: Truncate and round high-precision numbers to the format pattern. Defaults to `True`. """ locale = Locale.parse(locale) if format: pattern = parse_pattern(format) else: try: p = locale.currency_formats[format_type] pattern = NumberPattern( p.pattern, p.prefix, p.suffix, p.grouping, p.int_prec, p.frac_prec, p.exp_prec, p.exp_plus) except KeyError: raise UnknownCurrencyFormatError( "%r is not a known currency format type" % format_type) return pattern.apply( number, locale, currency=currency, currency_digits=currency_digits, decimal_quantization=decimal_quantization)
python
{ "resource": "" }
q276510
parse_pattern
test
def parse_pattern(pattern): """Parse number format patterns""" if isinstance(pattern, NumberPattern): return pattern def _match_number(pattern): rv = number_re.search(pattern) if rv is None: raise ValueError('Invalid number pattern %r' % pattern) return rv.groups() pos_pattern = pattern # Do we have a negative subpattern? if ';' in pattern: pos_pattern, neg_pattern = pattern.split(';', 1) pos_prefix, number, pos_suffix = _match_number(pos_pattern) neg_prefix, _, neg_suffix = _match_number(neg_pattern) else: pos_prefix, number, pos_suffix = _match_number(pos_pattern) neg_prefix = '-' + pos_prefix neg_suffix = pos_suffix if 'E' in number: number, exp = number.split('E', 1) else: exp = None if '@' in number: if '.' in number and '0' in number: raise ValueError('Significant digit patterns can not contain ' '"@" or "0"') if '.' in number: integer, fraction = number.rsplit('.', 1) else: integer = number fraction = '' def parse_precision(p): """Calculate the min and max allowed digits""" min = max = 0 for c in p: if c in '@0': min += 1 max += 1 elif c == '#': max += 1 elif c == ',': continue else: break return min, max int_prec = parse_precision(integer) frac_prec = parse_precision(fraction) if exp: exp_plus = exp.startswith('+') exp = exp.lstrip('+') exp_prec = parse_precision(exp) else: exp_plus = None exp_prec = None grouping = babel.numbers.parse_grouping(integer) return NumberPattern(pattern, (pos_prefix, neg_prefix), (pos_suffix, neg_suffix), grouping, int_prec, frac_prec, exp_prec, exp_plus)
python
{ "resource": "" }
q276511
get_decimal_quantum
test
def get_decimal_quantum(precision): """Return minimal quantum of a number, as defined by precision.""" assert isinstance(precision, (int, decimal.Decimal)) return decimal.Decimal(10) ** (-precision)
python
{ "resource": "" }
q276512
get_decimal_precision
test
def get_decimal_precision(number): """Return maximum precision of a decimal instance's fractional part. Precision is extracted from the fractional part only. """ # Copied from: https://github.com/mahmoud/boltons/pull/59 assert isinstance(number, decimal.Decimal) decimal_tuple = number.normalize().as_tuple() if decimal_tuple.exponent >= 0: return 0 return abs(decimal_tuple.exponent)
python
{ "resource": "" }
q276513
NumberPattern.scientific_notation_elements
test
def scientific_notation_elements(self, value, locale): """ Returns normalized scientific notation components of a value.""" # Normalize value to only have one lead digit. exp = value.adjusted() value = value * get_decimal_quantum(exp) assert value.adjusted() == 0 # Shift exponent and value by the minimum number of leading digits # imposed by the rendering pattern. And always make that number # greater or equal to 1. lead_shift = max([1, min(self.int_prec)]) - 1 exp = exp - lead_shift value = value * get_decimal_quantum(-lead_shift) # Get exponent sign symbol. exp_sign = '' if exp < 0: exp_sign = babel.numbers.get_minus_sign_symbol(locale) elif self.exp_plus: exp_sign = babel.numbers.get_plus_sign_symbol(locale) # Normalize exponent value now that we have the sign. exp = abs(exp) return value, exp, exp_sign
python
{ "resource": "" }
q276514
total_seconds
test
def total_seconds(td): """Python 2.6 compatability""" if hasattr(td, 'total_seconds'): return td.total_seconds() ms = td.microseconds secs = (td.seconds + td.days * 24 * 3600) return (ms + secs * 10**6) / 10**6
python
{ "resource": "" }
q276515
parse_requirements
test
def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): items = [] while not TERMINATOR(line, p): if CONTINUE(line, p): try: line = next(lines) p = 0 except StopIteration: msg = "\\ must not appear on the last nonblank line" raise RequirementParseError(msg) match = ITEM(line, p) if not match: msg = "Expected " + item_name + " in" raise RequirementParseError(msg, line, "at", line[p:]) items.append(match.group(*groups)) p = match.end() match = COMMA(line, p) if match: # skip the comma p = match.end() elif not TERMINATOR(line, p): msg = "Expected ',' or end-of-list in" raise RequirementParseError(msg, line, "at", line[p:]) match = TERMINATOR(line, p) # skip the terminator, if any if match: p = match.end() return line, p, items for line in lines: match = DISTRO(line) if not match: raise RequirementParseError("Missing distribution spec", line) project_name = match.group(1) p = match.end() extras = [] match = OBRACKET(line, p) if match: p = match.end() line, p, extras = scan_list( DISTRO, CBRACKET, line, p, (1,), "'extra' name" ) line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), "version spec") specs = [(op, val) for op, val in specs] yield Requirement(project_name, specs, extras)
python
{ "resource": "" }
q276516
_get_unpatched
test
def _get_unpatched(cls): """Protect against re-patching the distutils if reloaded Also ensures that no other distutils extension monkeypatched the distutils first. """ while cls.__module__.startswith('setuptools'): cls, = cls.__bases__ if not cls.__module__.startswith('distutils'): raise AssertionError( "distutils has already been patched by %r" % cls ) return cls
python
{ "resource": "" }
q276517
check_requirements
test
def check_requirements(dist, attr, value): """Verify that install_requires is a valid requirements list""" try: list(pkg_resources.parse_requirements(value)) except (TypeError, ValueError) as error: tmpl = ( "{attr!r} must be a string or list of strings " "containing valid project/version requirement specifiers; {error}" ) raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
python
{ "resource": "" }
q276518
Distribution.fetch_build_egg
test
def fetch_build_egg(self, req): """Fetch an egg needed for building""" try: cmd = self._egg_fetcher cmd.package_index.to_scan = [] except AttributeError: from setuptools.command.easy_install import easy_install dist = self.__class__({'script_args':['easy_install']}) dist.parse_config_files() opts = dist.get_option_dict('easy_install') keep = ( 'find_links', 'site_dirs', 'index_url', 'optimize', 'site_dirs', 'allow_hosts' ) for key in list(opts): if key not in keep: del opts[key] # don't use any other settings if self.dependency_links: links = self.dependency_links[:] if 'find_links' in opts: links = opts['find_links'][1].split() + links opts['find_links'] = ('setup', links) install_dir = self.get_egg_cache_dir() cmd = easy_install( dist, args=["x"], install_dir=install_dir, exclude_scripts=True, always_copy=False, build_directory=None, editable=False, upgrade=False, multi_version=True, no_report=True, user=False ) cmd.ensure_finalized() self._egg_fetcher = cmd return cmd.easy_install(req)
python
{ "resource": "" }
q276519
do_dice_roll
test
def do_dice_roll(): """ Roll n-sided dice and return each result and the total """ options = get_options() dice = Dice(options.sides) rolls = [dice.roll() for n in range(options.number)] for roll in rolls: print('rolled', roll) if options.number > 1: print('total', sum(rolls))
python
{ "resource": "" }
q276520
price_converter
test
def price_converter(obj): """Ensures that string prices are converted into Price objects.""" if isinstance(obj, str): obj = PriceClass.parse(obj) return obj
python
{ "resource": "" }
q276521
price
test
def price(*args, **kwargs): """Price field for attrs. See `help(attr.ib)` for full signature. Usage: >>> from pricing import fields ... @attr.s ... class Test: ... price: Price = fields.price(default='USD 5.00') ... ... Test() Test(price=USD 5.00) """ kwargs.setdefault('default', 'USD 0.00') kwargs.setdefault('converter', price_converter) if 'validator' in kwargs: validator = kwargs.pop('validator') if not isinstance(validator, (tuple, list)): validator = [validator] else: validator = [] validator.append(instance_of(PriceClass)) return attr.ib(validator=validator, *args, **kwargs)
python
{ "resource": "" }
q276522
Service.validate
test
def validate(self, request): """Validate JSON-RPC request. :param request: RPC request object :type request: dict """ try: validate_version(request) validate_method(request) validate_params(request) validate_id(request) except (AssertionError, KeyError) as error: invalid_request(error)
python
{ "resource": "" }
q276523
Service.get_method
test
def get_method(self, args): """Get request method for service application.""" try: method = self.app[args['method']] except KeyError: method_not_found(args['id']) else: return method
python
{ "resource": "" }
q276524
Service.apply
test
def apply(self, method, args): """Apply application method.""" try: params = args['params'] if isinstance(params, dict): result = method(**params) else: result = method(*params) except Exception as error: server_error(args['id'], error) else: return result
python
{ "resource": "" }
q276525
Request.module
test
def module(self): """The name of the current module if the request was dispatched to an actual module. This is deprecated functionality, use blueprints instead. """ from warnings import warn warn(DeprecationWarning('modules were deprecated in favor of ' 'blueprints. Use request.blueprint ' 'instead.'), stacklevel=2) if self._is_old_module: return self.blueprint
python
{ "resource": "" }
q276526
Request.blueprint
test
def blueprint(self): """The name of the current blueprint""" if self.url_rule and '.' in self.url_rule.endpoint: return self.url_rule.endpoint.rsplit('.', 1)[0]
python
{ "resource": "" }
q276527
attach_enctype_error_multidict
test
def attach_enctype_error_multidict(request): """Since Flask 0.8 we're monkeypatching the files object in case a request is detected that does not use multipart form data but the files object is accessed. """ oldcls = request.files.__class__ class newcls(oldcls): def __getitem__(self, key): try: return oldcls.__getitem__(self, key) except KeyError as e: if key not in request.form: raise raise DebugFilesKeyError(request, key) newcls.__name__ = oldcls.__name__ newcls.__module__ = oldcls.__module__ request.files.__class__ = newcls
python
{ "resource": "" }
q276528
make_abstract_dist
test
def make_abstract_dist(req_to_install): """Factory to make an abstract dist object. Preconditions: Either an editable req with a source_dir, or satisfied_by or a wheel link, or a non-editable req with a source_dir. :return: A concrete DistAbstraction. """ if req_to_install.editable: return IsSDist(req_to_install) elif req_to_install.link and req_to_install.link.is_wheel: return IsWheel(req_to_install) else: return IsSDist(req_to_install)
python
{ "resource": "" }
q276529
RequirementSet.add_requirement
test
def add_requirement(self, install_req, parent_req_name=None): """Add install_req as a requirement to install. :param parent_req_name: The name of the requirement that needed this added. The name is used because when multiple unnamed requirements resolve to the same name, we could otherwise end up with dependency links that point outside the Requirements set. parent_req must already be added. Note that None implies that this is a user supplied requirement, vs an inferred one. :return: Additional requirements to scan. That is either [] if the requirement is not applicable, or [install_req] if the requirement is applicable and has just been added. """ name = install_req.name if not install_req.match_markers(): logger.warning("Ignoring %s: markers %r don't match your " "environment", install_req.name, install_req.markers) return [] install_req.as_egg = self.as_egg install_req.use_user_site = self.use_user_site install_req.target_dir = self.target_dir install_req.pycompile = self.pycompile if not name: # url or path requirement w/o an egg fragment self.unnamed_requirements.append(install_req) return [install_req] else: if parent_req_name is None and self.has_requirement(name): raise InstallationError( 'Double requirement given: %s (already in %s, name=%r)' % (install_req, self.get_requirement(name), name)) if not self.has_requirement(name): # Add requirement self.requirements[name] = install_req # FIXME: what about other normalizations? E.g., _ vs. -? if name.lower() != name: self.requirement_aliases[name.lower()] = name result = [install_req] else: # Canonicalise to the already-added object install_req = self.get_requirement(name) # No need to scan, this is a duplicate requirement. result = [] if parent_req_name: parent_req = self.get_requirement(parent_req_name) self._dependencies[parent_req].append(install_req) return result
python
{ "resource": "" }
q276530
RequirementSet._walk_req_to_install
test
def _walk_req_to_install(self, handler): """Call handler for all pending reqs. :param handler: Handle a single requirement. Should take a requirement to install. Can optionally return an iterable of additional InstallRequirements to cover. """ # The list() here is to avoid potential mutate-while-iterating bugs. discovered_reqs = [] reqs = itertools.chain( list(self.unnamed_requirements), list(self.requirements.values()), discovered_reqs) for req_to_install in reqs: more_reqs = handler(req_to_install) if more_reqs: discovered_reqs.extend(more_reqs)
python
{ "resource": "" }
q276531
RequirementSet._check_skip_installed
test
def _check_skip_installed(self, req_to_install, finder): """Check if req_to_install should be skipped. This will check if the req is installed, and whether we should upgrade or reinstall it, taking into account all the relevant user options. After calling this req_to_install will only have satisfied_by set to None if the req_to_install is to be upgraded/reinstalled etc. Any other value will be a dist recording the current thing installed that satisfies the requirement. Note that for vcs urls and the like we can't assess skipping in this routine - we simply identify that we need to pull the thing down, then later on it is pulled down and introspected to assess upgrade/ reinstalls etc. :return: A text reason for why it was skipped, or None. """ # Check whether to upgrade/reinstall this req or not. req_to_install.check_if_exists() if req_to_install.satisfied_by: skip_reason = 'satisfied (use --upgrade to upgrade)' if self.upgrade: best_installed = False # For link based requirements we have to pull the # tree down and inspect to assess the version #, so # its handled way down. if not (self.force_reinstall or req_to_install.link): try: finder.find_requirement(req_to_install, self.upgrade) except BestVersionAlreadyInstalled: skip_reason = 'up-to-date' best_installed = True except DistributionNotFound: # No distribution found, so we squash the # error - it will be raised later when we # re-try later to do the install. # Why don't we just raise here? pass if not best_installed: # don't uninstall conflict if user install and # conflict is not user install if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)): req_to_install.conflicts_with = \ req_to_install.satisfied_by req_to_install.satisfied_by = None return skip_reason else: return None
python
{ "resource": "" }
q276532
RequirementSet._to_install
test
def _to_install(self): """Create the installation order. The installation order is topological - requirements are installed before the requiring thing. We break cycles at an arbitrary point, and make no other guarantees. """ # The current implementation, which we may change at any point # installs the user specified things in the order given, except when # dependencies must come earlier to achieve topological order. order = [] ordered_reqs = set() def schedule(req): if req.satisfied_by or req in ordered_reqs: return ordered_reqs.add(req) for dep in self._dependencies[req]: schedule(dep) order.append(req) for install_req in self.requirements.values(): schedule(install_req) return order
python
{ "resource": "" }
q276533
install_egg_info._get_all_ns_packages
test
def _get_all_ns_packages(self): """Return sorted list of all package namespaces""" nsp = set() for pkg in self.distribution.namespace_packages or []: pkg = pkg.split('.') while pkg: nsp.add('.'.join(pkg)) pkg.pop() return sorted(nsp)
python
{ "resource": "" }
q276534
JsonResponseEncoder.default
test
def default(self, obj): """ Convert QuerySet objects to their list counter-parts """ if isinstance(obj, models.Model): return self.encode(model_to_dict(obj)) elif isinstance(obj, models.query.QuerySet): return serializers.serialize('json', obj) else: return super(JsonResponseEncoder, self).default(obj)
python
{ "resource": "" }
q276535
tokenize_annotated
test
def tokenize_annotated(doc, annotation): """Tokenize a document and add an annotation attribute to each token """ tokens = tokenize(doc, include_hrefs=False) for tok in tokens: tok.annotation = annotation return tokens
python
{ "resource": "" }
q276536
html_annotate_merge_annotations
test
def html_annotate_merge_annotations(tokens_old, tokens_new): """Merge the annotations from tokens_old into tokens_new, when the tokens in the new document already existed in the old document. """ s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new) commands = s.get_opcodes() for command, i1, i2, j1, j2 in commands: if command == 'equal': eq_old = tokens_old[i1:i2] eq_new = tokens_new[j1:j2] copy_annotations(eq_old, eq_new)
python
{ "resource": "" }
q276537
copy_annotations
test
def copy_annotations(src, dest): """ Copy annotations from the tokens listed in src to the tokens in dest """ assert len(src) == len(dest) for src_tok, dest_tok in zip(src, dest): dest_tok.annotation = src_tok.annotation
python
{ "resource": "" }
q276538
compress_tokens
test
def compress_tokens(tokens): """ Combine adjacent tokens when there is no HTML between the tokens, and they share an annotation """ result = [tokens[0]] for tok in tokens[1:]: if (not result[-1].post_tags and not tok.pre_tags and result[-1].annotation == tok.annotation): compress_merge_back(result, tok) else: result.append(tok) return result
python
{ "resource": "" }
q276539
markup_serialize_tokens
test
def markup_serialize_tokens(tokens, markup_func): """ Serialize the list of tokens into a list of text chunks, calling markup_func around text to add annotations. """ for token in tokens: for pre in token.pre_tags: yield pre html = token.html() html = markup_func(html, token.annotation) if token.trailing_whitespace: html += token.trailing_whitespace yield html for post in token.post_tags: yield post
python
{ "resource": "" }
q276540
expand_tokens
test
def expand_tokens(tokens, equal=False): """Given a list of tokens, return a generator of the chunks of text for the data in the tokens. """ for token in tokens: for pre in token.pre_tags: yield pre if not equal or not token.hide_when_equal: if token.trailing_whitespace: yield token.html() + token.trailing_whitespace else: yield token.html() for post in token.post_tags: yield post
python
{ "resource": "" }
q276541
locate_unbalanced_end
test
def locate_unbalanced_end(unbalanced_end, pre_delete, post_delete): """ like locate_unbalanced_start, except handling end tags and possibly moving the point earlier in the document. """ while 1: if not unbalanced_end: # Success break finding = unbalanced_end[-1] finding_name = finding.split()[0].strip('<>/') if not pre_delete: break next = pre_delete[-1] if next is DEL_END or not next.startswith('</'): # A word or a start tag break name = next.split()[0].strip('<>/') if name == 'ins' or name == 'del': # Can't move into an insert or delete break if name == finding_name: unbalanced_end.pop() post_delete.insert(0, pre_delete.pop()) else: # Found a tag that doesn't match break
python
{ "resource": "" }
q276542
fixup_chunks
test
def fixup_chunks(chunks): """ This function takes a list of chunks and produces a list of tokens. """ tag_accum = [] cur_word = None result = [] for chunk in chunks: if isinstance(chunk, tuple): if chunk[0] == 'img': src = chunk[1] tag, trailing_whitespace = split_trailing_whitespace(chunk[2]) cur_word = tag_token('img', src, html_repr=tag, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace) tag_accum = [] result.append(cur_word) elif chunk[0] == 'href': href = chunk[1] cur_word = href_token(href, pre_tags=tag_accum, trailing_whitespace=" ") tag_accum = [] result.append(cur_word) continue if is_word(chunk): chunk, trailing_whitespace = split_trailing_whitespace(chunk) cur_word = token(chunk, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace) tag_accum = [] result.append(cur_word) elif is_start_tag(chunk): tag_accum.append(chunk) elif is_end_tag(chunk): if tag_accum: tag_accum.append(chunk) else: assert cur_word, ( "Weird state, cur_word=%r, result=%r, chunks=%r of %r" % (cur_word, result, chunk, chunks)) cur_word.post_tags.append(chunk) else: assert(0) if not result: return [token('', pre_tags=tag_accum)] else: result[-1].post_tags.extend(tag_accum) return result
python
{ "resource": "" }
q276543
flatten_el
test
def flatten_el(el, include_hrefs, skip_tag=False): """ Takes an lxml element el, and generates all the text chunks for that tag. Each start tag is a chunk, each word is a chunk, and each end tag is a chunk. If skip_tag is true, then the outermost container tag is not returned (just its contents).""" if not skip_tag: if el.tag == 'img': yield ('img', el.get('src'), start_tag(el)) else: yield start_tag(el) if el.tag in empty_tags and not el.text and not len(el) and not el.tail: return start_words = split_words(el.text) for word in start_words: yield html_escape(word) for child in el: for item in flatten_el(child, include_hrefs=include_hrefs): yield item if el.tag == 'a' and el.get('href') and include_hrefs: yield ('href', el.get('href')) if not skip_tag: yield end_tag(el) end_words = split_words(el.tail) for word in end_words: yield html_escape(word)
python
{ "resource": "" }
q276544
split_words
test
def split_words(text): """ Splits some text into words. Includes trailing whitespace on each word when appropriate. """ if not text or not text.strip(): return [] words = split_words_re.findall(text) return words
python
{ "resource": "" }
q276545
start_tag
test
def start_tag(el): """ The text representation of the start tag for a tag. """ return '<%s%s>' % ( el.tag, ''.join([' %s="%s"' % (name, html_escape(value, True)) for name, value in el.attrib.items()]))
python
{ "resource": "" }
q276546
end_tag
test
def end_tag(el): """ The text representation of an end tag for a tag. Includes trailing whitespace when appropriate. """ if el.tail and start_whitespace_re.search(el.tail): extra = ' ' else: extra = '' return '</%s>%s' % (el.tag, extra)
python
{ "resource": "" }
q276547
serialize_html_fragment
test
def serialize_html_fragment(el, skip_outer=False): """ Serialize a single lxml element as HTML. The serialized form includes the elements tail. If skip_outer is true, then don't serialize the outermost tag """ assert not isinstance(el, basestring), ( "You should pass in an element, not a string like %r" % el) html = etree.tostring(el, method="html", encoding=_unicode) if skip_outer: # Get rid of the extra starting tag: html = html[html.find('>')+1:] # Get rid of the extra end tag: html = html[:html.rfind('<')] return html.strip() else: return html
python
{ "resource": "" }
q276548
_fixup_ins_del_tags
test
def _fixup_ins_del_tags(doc): """fixup_ins_del_tags that works on an lxml document in-place """ for tag in ['ins', 'del']: for el in doc.xpath('descendant-or-self::%s' % tag): if not _contains_block_level_tag(el): continue _move_el_inside_block(el, tag=tag) el.drop_tag()
python
{ "resource": "" }
q276549
extract_constant
test
def extract_constant(code, symbol, default=-1): """Extract the constant value of 'symbol' from 'code' If the name 'symbol' is bound to a constant value by the Python code object 'code', return that value. If 'symbol' is bound to an expression, return 'default'. Otherwise, return 'None'. Return value is based on the first assignment to 'symbol'. 'symbol' must be a global, or at least a non-"fast" local in the code block. That is, only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' must be present in 'code.co_names'. """ if symbol not in code.co_names: # name's not there, can't possibly be an assigment return None name_idx = list(code.co_names).index(symbol) STORE_NAME = 90 STORE_GLOBAL = 97 LOAD_CONST = 100 const = default for op, arg in _iter_code(code): if op==LOAD_CONST: const = code.co_consts[arg] elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL): return const else: const = default
python
{ "resource": "" }
q276550
AmazonCall.cache_url
test
def cache_url(self, **kwargs): """A simplified URL to be used for caching the given query.""" query = { 'Operation': self.Operation, 'Service': "AWSECommerceService", 'Version': self.Version, } query.update(kwargs) service_domain = SERVICE_DOMAINS[self.Region][0] return "http://" + service_domain + "/onca/xml?" + _quote_query(query)
python
{ "resource": "" }
q276551
autolink
test
def autolink(el, link_regexes=_link_regexes, avoid_elements=_avoid_elements, avoid_hosts=_avoid_hosts, avoid_classes=_avoid_classes): """ Turn any URLs into links. It will search for links identified by the given regular expressions (by default mailto and http(s) links). It won't link text in an element in avoid_elements, or an element with a class in avoid_classes. It won't link to anything with a host that matches one of the regular expressions in avoid_hosts (default localhost and 127.0.0.1). If you pass in an element, the element's tail will not be substituted, only the contents of the element. """ if el.tag in avoid_elements: return class_name = el.get('class') if class_name: class_name = class_name.split() for match_class in avoid_classes: if match_class in class_name: return for child in list(el): autolink(child, link_regexes=link_regexes, avoid_elements=avoid_elements, avoid_hosts=avoid_hosts, avoid_classes=avoid_classes) if child.tail: text, tail_children = _link_text( child.tail, link_regexes, avoid_hosts, factory=el.makeelement) if tail_children: child.tail = text index = el.index(child) el[index+1:index+1] = tail_children if el.text: text, pre_children = _link_text( el.text, link_regexes, avoid_hosts, factory=el.makeelement) if pre_children: el.text = text el[:0] = pre_children
python
{ "resource": "" }
q276552
Cleaner.kill_conditional_comments
test
def kill_conditional_comments(self, doc): """ IE conditional comments basically embed HTML that the parser doesn't normally see. We can't allow anything like that, so we'll kill any comments that could be conditional. """ bad = [] self._kill_elements( doc, lambda el: _conditional_comment_re.search(el.text), etree.Comment)
python
{ "resource": "" }
q276553
document_fromstring
test
def document_fromstring(html, guess_charset=True, parser=None): """Parse a whole document into a string.""" if not isinstance(html, _strings): raise TypeError('string required') if parser is None: parser = html_parser return parser.parse(html, useChardet=guess_charset).getroot()
python
{ "resource": "" }
q276554
api_returns
test
def api_returns(return_values): """ Define the return schema of an API. 'return_values' is a dictionary mapping HTTP return code => documentation In addition to validating that the status code of the response belongs to one of the accepted status codes, it also validates that the returned object is JSON (derived from JsonResponse) In debug and test modes, failure to validate the fields will result in a 400 Bad Request response. In production mode, failure to validate will just log a warning, unless overwritten by a 'strict' setting. For example: @api_returns({ 200: 'Operation successful', 403: 'User does not have persion', 404: 'Resource not found', 404: 'User not found', }) def add(request, *args, **kwargs): if not request.user.is_superuser: return JsonResponseForbidden() # 403 return HttpResponse() # 200 """ def decorator(func): @wraps(func) def wrapped_func(request, *args, **kwargs): return_value = func(request, *args, **kwargs) if not isinstance(return_value, JsonResponse): if settings.DEBUG: return JsonResponseBadRequest('API did not return JSON') else: logger.warn('API did not return JSON') accepted_return_codes = return_values.keys() # Never block 500s - these should be handled by other # reporting mechanisms accepted_return_codes.append(500) if return_value.status_code not in accepted_return_codes: if settings.DEBUG: return JsonResponseBadRequest( 'API returned %d instead of acceptable values %s' % (return_value.status_code, accepted_return_codes) ) else: logger.warn( 'API returned %d instead of acceptable values %s', return_value.status_code, accepted_return_codes, ) return return_value return wrapped_func return decorator
python
{ "resource": "" }
q276555
getTreeWalker
test
def getTreeWalker(treeType, implementation=None, **kwargs): """Get a TreeWalker class for various types of tree with built-in support treeType - the name of the tree type required (case-insensitive). Supported values are: "dom" - The xml.dom.minidom DOM implementation "pulldom" - The xml.dom.pulldom event stream "etree" - A generic walker for tree implementations exposing an elementtree-like interface (known to work with ElementTree, cElementTree and lxml.etree). "lxml" - Optimized walker for lxml.etree "genshi" - a Genshi stream implementation - (Currently applies to the "etree" tree type only). A module implementing the tree type e.g. xml.etree.ElementTree or cElementTree.""" treeType = treeType.lower() if treeType not in treeWalkerCache: if treeType in ("dom", "pulldom"): name = "%s.%s" % (__name__, treeType) __import__(name) mod = sys.modules[name] treeWalkerCache[treeType] = mod.TreeWalker elif treeType == "genshi": from . import genshistream treeWalkerCache[treeType] = genshistream.TreeWalker elif treeType == "lxml": from . import lxmletree treeWalkerCache[treeType] = lxmletree.TreeWalker elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # XXX: NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeWalker return treeWalkerCache.get(treeType)
python
{ "resource": "" }
q276556
Subversion.export
test
def export(self, location): """Export the svn repository at the url to the destination location""" url, rev = self.get_url_rev() rev_options = get_rev_options(url, rev) logger.info('Exporting svn repository %s to %s', url, location) with indent_log(): if os.path.exists(location): # Subversion doesn't like to check out over an existing # directory --force fixes this, but was only added in svn 1.5 rmtree(location) self.run_command( ['export'] + rev_options + [url, location], show_stdout=False)
python
{ "resource": "" }
q276557
Subversion.get_revision
test
def get_revision(self, location): """ Return the maximum revision for all files under a given location """ # Note: taken from setuptools.command.egg_info revision = 0 for base, dirs, files in os.walk(location): if self.dirname not in dirs: dirs[:] = [] continue # no sense walking uncontrolled subdirs dirs.remove(self.dirname) entries_fn = os.path.join(base, self.dirname, 'entries') if not os.path.exists(entries_fn): # FIXME: should we warn? continue dirurl, localrev = self._get_svn_url_rev(base) if base == location: base_url = dirurl + '/' # save the root url elif not dirurl or not dirurl.startswith(base_url): dirs[:] = [] continue # not part of the same svn tree, skip it revision = max(revision, localrev) return revision
python
{ "resource": "" }
q276558
setupmethod
test
def setupmethod(f): """Wraps a method so that it performs a check in debug mode if the first request was already handled. """ def wrapper_func(self, *args, **kwargs): if self.debug and self._got_first_request: raise AssertionError('A setup function was called after the ' 'first request was handled. This usually indicates a bug ' 'in the application where a module was not imported ' 'and decorators or other functionality was called too late.\n' 'To fix this make sure to import all your view modules, ' 'database models and everything related at a central place ' 'before the application starts serving requests.') return f(self, *args, **kwargs) return update_wrapper(wrapper_func, f)
python
{ "resource": "" }
q276559
Flask.name
test
def name(self): """The name of the application. This is usually the import name with the difference that it's guessed from the run file if the import name is main. This name is used as a display name when Flask needs the name of the application. It can be set and overridden to change the value. .. versionadded:: 0.8 """ if self.import_name == '__main__': fn = getattr(sys.modules['__main__'], '__file__', None) if fn is None: return '__main__' return os.path.splitext(os.path.basename(fn))[0] return self.import_name
python
{ "resource": "" }
q276560
Flask.propagate_exceptions
test
def propagate_exceptions(self): """Returns the value of the `PROPAGATE_EXCEPTIONS` configuration value in case it's set, otherwise a sensible default is returned. .. versionadded:: 0.7 """ rv = self.config['PROPAGATE_EXCEPTIONS'] if rv is not None: return rv return self.testing or self.debug
python
{ "resource": "" }
q276561
Flask.auto_find_instance_path
test
def auto_find_instance_path(self): """Tries to locate the instance path if it was not provided to the constructor of the application class. It will basically calculate the path to a folder named ``instance`` next to your main file or the package. .. versionadded:: 0.8 """ prefix, package_path = find_package(self.import_name) if prefix is None: return os.path.join(package_path, 'instance') return os.path.join(prefix, 'var', self.name + '-instance')
python
{ "resource": "" }
q276562
Flask.update_template_context
test
def update_template_context(self, context): """Update the template context with some commonly used variables. This injects request, session, config and g into the template context as well as everything template context processors want to inject. Note that the as of Flask 0.6, the original values in the context will not be overridden if a context processor decides to return a value with the same key. :param context: the context as a dictionary that is updated in place to add extra variables. """ funcs = self.template_context_processors[None] reqctx = _request_ctx_stack.top if reqctx is not None: bp = reqctx.request.blueprint if bp is not None and bp in self.template_context_processors: funcs = chain(funcs, self.template_context_processors[bp]) orig_ctx = context.copy() for func in funcs: context.update(func()) # make sure the original values win. This makes it possible to # easier add new variables in context processors without breaking # existing views. context.update(orig_ctx)
python
{ "resource": "" }
q276563
Flask.handle_http_exception
test
def handle_http_exception(self, e): """Handles an HTTP exception. By default this will invoke the registered error handlers and fall back to returning the exception as response. .. versionadded:: 0.3 """ handlers = self.error_handler_spec.get(request.blueprint) # Proxy exceptions don't have error codes. We want to always return # those unchanged as errors if e.code is None: return e if handlers and e.code in handlers: handler = handlers[e.code] else: handler = self.error_handler_spec[None].get(e.code) if handler is None: return e return handler(e)
python
{ "resource": "" }
q276564
Flask.trap_http_exception
test
def trap_http_exception(self, e): """Checks if an HTTP exception should be trapped or not. By default this will return `False` for all exceptions except for a bad request key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`. This is called for all HTTP exceptions raised by a view function. If it returns `True` for any exception the error handler for this exception is not called and it shows up as regular exception in the traceback. This is helpful for debugging implicitly raised HTTP exceptions. .. versionadded:: 0.8 """ if self.config['TRAP_HTTP_EXCEPTIONS']: return True if self.config['TRAP_BAD_REQUEST_ERRORS']: return isinstance(e, BadRequest) return False
python
{ "resource": "" }
q276565
Flask.handle_exception
test
def handle_exception(self, e): """Default exception handling that kicks in when an exception occurs that is not caught. In debug mode the exception will be re-raised immediately, otherwise it is logged and the handler for a 500 internal server error is used. If no such handler exists, a default 500 internal server error message is displayed. .. versionadded:: 0.3 """ exc_type, exc_value, tb = sys.exc_info() got_request_exception.send(self, exception=e) handler = self.error_handler_spec[None].get(500) if self.propagate_exceptions: # if we want to repropagate the exception, we can attempt to # raise it with the whole traceback in case we can do that # (the function was actually called from the except part) # otherwise, we just raise the error again if exc_value is e: reraise(exc_type, exc_value, tb) else: raise e self.log_exception((exc_type, exc_value, tb)) if handler is None: return InternalServerError() return handler(e)
python
{ "resource": "" }
q276566
Flask.raise_routing_exception
test
def raise_routing_exception(self, request): """Exceptions that are recording during routing are reraised with this method. During debug we are not reraising redirect requests for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising a different error instead to help debug situations. :internal: """ if not self.debug \ or not isinstance(request.routing_exception, RequestRedirect) \ or request.method in ('GET', 'HEAD', 'OPTIONS'): raise request.routing_exception from .debughelpers import FormDataRoutingRedirect raise FormDataRoutingRedirect(request)
python
{ "resource": "" }
q276567
Flask.full_dispatch_request
test
def full_dispatch_request(self): """Dispatches the request and on top of that performs request pre and postprocessing as well as HTTP exception catching and error handling. .. versionadded:: 0.7 """ self.try_trigger_before_first_request_functions() try: request_started.send(self) rv = self.preprocess_request() if rv is None: rv = self.dispatch_request() except Exception as e: rv = self.handle_user_exception(e) response = self.make_response(rv) response = self.process_response(response) request_finished.send(self, response=response) return response
python
{ "resource": "" }
q276568
Flask.make_default_options_response
test
def make_default_options_response(self): """This method is called to create the default `OPTIONS` response. This can be changed through subclassing to change the default behavior of `OPTIONS` responses. .. versionadded:: 0.7 """ adapter = _request_ctx_stack.top.url_adapter if hasattr(adapter, 'allowed_methods'): methods = adapter.allowed_methods() else: # fallback for Werkzeug < 0.7 methods = [] try: adapter.match(method='--') except MethodNotAllowed as e: methods = e.valid_methods except HTTPException as e: pass rv = self.response_class() rv.allow.update(methods) return rv
python
{ "resource": "" }
q276569
Flask.create_url_adapter
test
def create_url_adapter(self, request): """Creates a URL adapter for the given request. The URL adapter is created at a point where the request context is not yet set up so the request is passed explicitly. .. versionadded:: 0.6 .. versionchanged:: 0.9 This can now also be called without a request object when the URL adapter is created for the application context. """ if request is not None: return self.url_map.bind_to_environ(request.environ, server_name=self.config['SERVER_NAME']) # We need at the very least the server name to be set for this # to work. if self.config['SERVER_NAME'] is not None: return self.url_map.bind( self.config['SERVER_NAME'], script_name=self.config['APPLICATION_ROOT'] or '/', url_scheme=self.config['PREFERRED_URL_SCHEME'])
python
{ "resource": "" }
q276570
Flask.inject_url_defaults
test
def inject_url_defaults(self, endpoint, values): """Injects the URL defaults for the given endpoint directly into the values dictionary passed. This is used internally and automatically called on URL building. .. versionadded:: 0.7 """ funcs = self.url_default_functions.get(None, ()) if '.' in endpoint: bp = endpoint.rsplit('.', 1)[0] funcs = chain(funcs, self.url_default_functions.get(bp, ())) for func in funcs: func(endpoint, values)
python
{ "resource": "" }
q276571
unique
test
def unique(iterable): """ Yield unique values in iterable, preserving order. """ seen = set() for value in iterable: if not value in seen: seen.add(value) yield value
python
{ "resource": "" }
q276572
handle_requires
test
def handle_requires(metadata, pkg_info, key): """ Place the runtime requirements from pkg_info into metadata. """ may_requires = defaultdict(list) for value in pkg_info.get_all(key): extra_match = EXTRA_RE.search(value) if extra_match: groupdict = extra_match.groupdict() condition = groupdict['condition'] extra = groupdict['extra'] package = groupdict['package'] if condition.endswith(' and '): condition = condition[:-5] else: condition, extra = None, None package = value key = MayRequiresKey(condition, extra) may_requires[key].append(package) if may_requires: metadata['run_requires'] = [] for key, value in may_requires.items(): may_requirement = {'requires':value} if key.extra: may_requirement['extra'] = key.extra if key.condition: may_requirement['environment'] = key.condition metadata['run_requires'].append(may_requirement) if not 'extras' in metadata: metadata['extras'] = [] metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra])
python
{ "resource": "" }
q276573
requires_to_requires_dist
test
def requires_to_requires_dist(requirement): """Compose the version predicates for requirement in PEP 345 fashion.""" requires_dist = [] for op, ver in requirement.specs: requires_dist.append(op + ver) if not requires_dist: return '' return " (%s)" % ','.join(requires_dist)
python
{ "resource": "" }
q276574
pkginfo_to_metadata
test
def pkginfo_to_metadata(egg_info_path, pkginfo_path): """ Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka old-draft Metadata 2.0 format. """ pkg_info = read_pkg_info(pkginfo_path) pkg_info.replace_header('Metadata-Version', '2.0') requires_path = os.path.join(egg_info_path, 'requires.txt') if os.path.exists(requires_path): requires = open(requires_path).read() for extra, reqs in pkg_resources.split_sections(requires): condition = '' if extra and ':' in extra: # setuptools extra:condition syntax extra, condition = extra.split(':', 1) if extra: pkg_info['Provides-Extra'] = extra if condition: condition += " and " condition += 'extra == %s' % repr(extra) if condition: condition = '; ' + condition for new_req in convert_requirements(reqs): pkg_info['Requires-Dist'] = new_req + condition description = pkg_info['Description'] if description: pkg_info.set_payload(dedent_description(pkg_info)) del pkg_info['Description'] return pkg_info
python
{ "resource": "" }
q276575
PathFinder.modules
test
def modules(self): """return modules that match module_name""" # since the module has to be importable we go ahead and put the # basepath as the very first path to check as that should minimize # namespace collisions, this is what unittest does also sys.path.insert(0, self.basedir) for p in self.paths(): # http://stackoverflow.com/questions/67631/ try: module_name = self.module_path(p) logger.debug("Importing {} from path {}".format(module_name, p)) m = importlib.import_module(module_name) yield m except Exception as e: logger.warning('Caught exception while importing {}: {}'.format(p, e)) logger.warning(e, exc_info=True) error_info = getattr(self, 'error_info', None) if not error_info: exc_info = sys.exc_info() #raise e.__class__, e, exc_info[2] #self.error_info = (e, exc_info) self.error_info = exc_info continue sys.path.pop(0)
python
{ "resource": "" }
q276576
PathFinder.classes
test
def classes(self): """the partial self.class_name will be used to find actual TestCase classes""" for module in self.modules(): cs = inspect.getmembers(module, inspect.isclass) class_name = getattr(self, 'class_name', '') class_regex = '' if class_name: if class_name.startswith("*"): class_name = class_name.strip("*") class_regex = re.compile(r'.*?{}'.format(class_name), re.I) else: class_regex = re.compile(r'^{}'.format(class_name), re.I) for c_name, c in cs: can_yield = True if class_regex and not class_regex.match(c_name): #if class_name and class_name not in c_name: can_yield = False if can_yield and issubclass(c, unittest.TestCase): if c is not unittest.TestCase: # ignore actual TestCase class logger.debug('class: {} matches {}'.format(c_name, class_name)) yield c
python
{ "resource": "" }
q276577
PathFinder.method_names
test
def method_names(self): """return the actual test methods that matched self.method_name""" for c in self.classes(): #ms = inspect.getmembers(c, inspect.ismethod) # http://stackoverflow.com/questions/17019949/ ms = inspect.getmembers(c, lambda f: inspect.ismethod(f) or inspect.isfunction(f)) method_name = getattr(self, 'method_name', '') method_regex = '' if method_name: if method_name.startswith(self.method_prefix): method_regex = re.compile(r'^{}'.format(method_name), flags=re.I) else: if method_name.startswith("*"): method_name = method_name.strip("*") method_regex = re.compile( r'^{}[_]{{0,1}}.*?{}'.format(self.method_prefix, method_name), flags=re.I ) else: method_regex = re.compile( r'^{}[_]{{0,1}}{}'.format(self.method_prefix, method_name), flags=re.I ) for m_name, m in ms: if not m_name.startswith(self.method_prefix): continue can_yield = True if method_regex and not method_regex.match(m_name): can_yield = False if can_yield: logger.debug('method: {} matches {}'.format(m_name, method_name)) yield c, m_name
python
{ "resource": "" }
q276578
PathFinder._find_basename
test
def _find_basename(self, name, basenames, is_prefix=False): """check if name combined with test prefixes or postfixes is found anywhere in the list of basenames :param name: string, the name you're searching for :param basenames: list, a list of basenames to check :param is_prefix: bool, True if this is a prefix search, which means it will also check if name matches any of the basenames without the prefixes or postfixes, if it is False then the prefixes or postfixes must be present (ie, the module we're looking for is the actual test module, not the parent modules it's contained in) :returns: string, the basename if it is found """ ret = "" fileroots = [(os.path.splitext(n)[0], n) for n in basenames] glob = False if name.startswith("*"): glob = True name = name.strip("*") for fileroot, basename in fileroots: if name in fileroot or fileroot in name: for pf in self.module_postfixes: logger.debug( 'Checking if basename {} starts with {} and ends with {}'.format( basename, name, pf )) if glob: if name in fileroot and fileroot.endswith(pf): ret = basename break else: if fileroot.startswith(name) and fileroot.endswith(pf): ret = basename break if not ret: for pf in self.module_prefixes: n = pf + name logger.debug('Checking if basename {} starts with {}'.format(basename, n)) if glob: if fileroot.startswith(pf) and name in fileroot: ret = basename break else: if fileroot.startswith(n): ret = basename break if not ret: if is_prefix: logger.debug('Checking if basename {} starts with {}'.format(basename, name)) if basename.startswith(name) or (glob and name in basename): ret = basename else: logger.debug( 'Checking if basename {} starts with {} and is a test module'.format( basename, name )) if glob: if name in basename and self._is_module_path(basename): ret = basename else: if basename.startswith(name) and self._is_module_path(basename): ret = basename if ret: logger.debug('Found basename {}'.format(ret)) break return ret
python
{ "resource": "" }
q276579
PathFinder._is_module_path
test
def _is_module_path(self, path): """Returns true if the passed in path is a test module path :param path: string, the path to check, will need to start or end with the module test prefixes or postfixes to be considered valid :returns: boolean, True if a test module path, False otherwise """ ret = False basename = os.path.basename(path) fileroot = os.path.splitext(basename)[0] for pf in self.module_postfixes: if fileroot.endswith(pf): ret = True break if not ret: for pf in self.module_prefixes: if fileroot.startswith(pf): ret = True break return ret
python
{ "resource": "" }
q276580
PathFinder.walk
test
def walk(self, basedir): """Walk all the directories of basedir except hidden directories :param basedir: string, the directory to walk :returns: generator, same as os.walk """ system_d = SitePackagesDir() filter_system_d = system_d and os.path.commonprefix([system_d, basedir]) != system_d for root, dirs, files in os.walk(basedir, topdown=True): # ignore dot directories and private directories (start with underscore) dirs[:] = [d for d in dirs if d[0] != '.' and d[0] != "_"] if filter_system_d: dirs[:] = [d for d in dirs if not d.startswith(system_d)] yield root, dirs, files
python
{ "resource": "" }
q276581
PathFinder.paths
test
def paths(self): ''' given a basedir, yield all test modules paths recursively found in basedir that are test modules return -- generator ''' module_name = getattr(self, 'module_name', '') module_prefix = getattr(self, 'prefix', '') filepath = getattr(self, 'filepath', '') if filepath: if os.path.isabs(filepath): yield filepath else: yield os.path.join(self.basedir, filepath) else: if module_prefix: basedirs = self._find_prefix_paths(self.basedir, module_prefix) else: basedirs = [self.basedir] for basedir in basedirs: try: if module_name: path = self._find_module_path(basedir, module_name) else: path = basedir if os.path.isfile(path): logger.debug('Module path: {}'.format(path)) yield path else: seen_paths = set() for root, dirs, files in self.walk(path): for basename in files: if basename.startswith("__init__"): if self._is_module_path(root): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module package path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath else: fileroot = os.path.splitext(basename)[0] for pf in self.module_postfixes: if fileroot.endswith(pf): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module postfix path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath for pf in self.module_prefixes: if fileroot.startswith(pf): filepath = os.path.join(root, basename) if filepath not in seen_paths: logger.debug('Module prefix path: {}'.format(filepath)) seen_paths.add(filepath) yield filepath except IOError as e: # we failed to find a suitable path logger.warning(e, exc_info=True) pass
python
{ "resource": "" }
q276582
_dump_arg_defaults
test
def _dump_arg_defaults(kwargs): """Inject default arguments for dump functions.""" if current_app: kwargs.setdefault('cls', current_app.json_encoder) if not current_app.config['JSON_AS_ASCII']: kwargs.setdefault('ensure_ascii', False) kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS']) else: kwargs.setdefault('sort_keys', True) kwargs.setdefault('cls', JSONEncoder)
python
{ "resource": "" }
q276583
_load_arg_defaults
test
def _load_arg_defaults(kwargs): """Inject default arguments for load functions.""" if current_app: kwargs.setdefault('cls', current_app.json_decoder) else: kwargs.setdefault('cls', JSONDecoder)
python
{ "resource": "" }
q276584
BaseCache.set_many
test
def set_many(self, mapping, timeout=None): """Sets multiple keys and values from a mapping. :param mapping: a mapping with the keys/values to set. :param timeout: the cache timeout for the key (if not specified, it uses the default timeout). :returns: Whether all given keys have been set. :rtype: boolean """ rv = True for key, value in _items(mapping): if not self.set(key, value, timeout): rv = False return rv
python
{ "resource": "" }
q276585
BaseCache.inc
test
def inc(self, key, delta=1): """Increments the value of a key by `delta`. If the key does not yet exist it is initialized with `delta`. For supporting caches this is an atomic operation. :param key: the key to increment. :param delta: the delta to add. :returns: The new value or ``None`` for backend errors. """ value = (self.get(key) or 0) + delta return value if self.set(key, value) else None
python
{ "resource": "" }
q276586
RedisCache.dump_object
test
def dump_object(self, value): """Dumps an object into a string for redis. By default it serializes integers as regular string and pickle dumps everything else. """ t = type(value) if t in integer_types: return str(value).encode('ascii') return b'!' + pickle.dumps(value)
python
{ "resource": "" }
q276587
_build_editable_options
test
def _build_editable_options(req): """ This method generates a dictionary of the query string parameters contained in a given editable URL. """ regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)") matched = regexp.findall(req) if matched: ret = dict() for option in matched: (name, value) = option if name in ret: raise Exception("%s option already defined" % name) ret[name] = value return ret return None
python
{ "resource": "" }
q276588
InstallRequirement.populate_link
test
def populate_link(self, finder, upgrade): """Ensure that if a link can be found for this, that it is found. Note that self.link may still be None - if Upgrade is False and the requirement is already installed. """ if self.link is None: self.link = finder.find_requirement(self, upgrade)
python
{ "resource": "" }
q276589
InstallRequirement.ensure_has_source_dir
test
def ensure_has_source_dir(self, parent_dir): """Ensure that a source_dir is set. This will create a temporary build dir if the name of the requirement isn't known yet. :param parent_dir: The ideal pip parent_dir for the source_dir. Generally src_dir for editables and build_dir for sdists. :return: self.source_dir """ if self.source_dir is None: self.source_dir = self.build_location(parent_dir) return self.source_dir
python
{ "resource": "" }
q276590
InstallRequirement.remove_temporary_source
test
def remove_temporary_source(self): """Remove the source files from this requirement, if they are marked for deletion""" if self.source_dir and os.path.exists( os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)): logger.debug('Removing source in %s', self.source_dir) rmtree(self.source_dir) self.source_dir = None if self._temp_build_dir and os.path.exists(self._temp_build_dir): rmtree(self._temp_build_dir) self._temp_build_dir = None
python
{ "resource": "" }
q276591
InstallRequirement.get_dist
test
def get_dist(self): """Return a pkg_resources.Distribution built from self.egg_info_path""" egg_info = self.egg_info_path('').rstrip('/') base_dir = os.path.dirname(egg_info) metadata = pkg_resources.PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] return pkg_resources.Distribution( os.path.dirname(egg_info), project_name=dist_name, metadata=metadata)
python
{ "resource": "" }
q276592
BaseRequest.get_data
test
def get_data(self, cache=True, as_text=False, parse_form_data=False): """This reads the buffered incoming data from the client into one bytestring. By default this is cached but that behavior can be changed by setting `cache` to `False`. Usually it's a bad idea to call this method without checking the content length first as a client could send dozens of megabytes or more to cause memory problems on the server. Note that if the form data was already parsed this method will not return anything as form data parsing does not cache the data like this method does. To implicitly invoke form data parsing function set `parse_form_data` to `True`. When this is done the return value of this method will be an empty string if the form parser handles the data. This generally is not necessary as if the whole data is cached (which is the default) the form parser will used the cached data to parse the form data. Please be generally aware of checking the content length first in any case before calling this method to avoid exhausting server memory. If `as_text` is set to `True` the return value will be a decoded unicode string. .. versionadded:: 0.9 """ rv = getattr(self, '_cached_data', None) if rv is None: if parse_form_data: self._load_form_data() rv = self.stream.read() if cache: self._cached_data = rv if as_text: rv = rv.decode(self.charset, self.encoding_errors) return rv
python
{ "resource": "" }
q276593
BaseResponse.get_wsgi_headers
test
def get_wsgi_headers(self, environ): """This is automatically called right before the response is started and returns headers modified for the given environment. It returns a copy of the headers from the response with some modifications applied if necessary. For example the location header (if present) is joined with the root URL of the environment. Also the content length is automatically set to zero here for certain status codes. .. versionchanged:: 0.6 Previously that function was called `fix_headers` and modified the response object in place. Also since 0.6, IRIs in location and content-location headers are handled properly. Also starting with 0.6, Werkzeug will attempt to set the content length if it is able to figure it out on its own. This is the case if all the strings in the response iterable are already encoded and the iterable is buffered. :param environ: the WSGI environment of the request. :return: returns a new :class:`~werkzeug.datastructures.Headers` object. """ headers = Headers(self.headers) location = None content_location = None content_length = None status = self.status_code # iterate over the headers to find all values in one go. Because # get_wsgi_headers is used each response that gives us a tiny # speedup. for key, value in headers: ikey = key.lower() if ikey == u'location': location = value elif ikey == u'content-location': content_location = value elif ikey == u'content-length': content_length = value # make sure the location header is an absolute URL if location is not None: old_location = location if isinstance(location, text_type): # Safe conversion is necessary here as we might redirect # to a broken URI scheme (for instance itms-services). location = iri_to_uri(location, safe_conversion=True) if self.autocorrect_location_header: current_url = get_current_url(environ, root_only=True) if isinstance(current_url, text_type): current_url = iri_to_uri(current_url) location = url_join(current_url, location) if location != old_location: headers['Location'] = location # make sure the content location is a URL if content_location is not None and \ isinstance(content_location, text_type): headers['Content-Location'] = iri_to_uri(content_location) # remove entity headers and set content length to zero if needed. # Also update content_length accordingly so that the automatic # content length detection does not trigger in the following # code. if 100 <= status < 200 or status == 204: headers['Content-Length'] = content_length = u'0' elif status == 304: remove_entity_headers(headers) # if we can determine the content length automatically, we # should try to do that. But only if this does not involve # flattening the iterator or encoding of unicode strings in # the response. We however should not do that if we have a 304 # response. if self.automatically_set_content_length and \ self.is_sequence and content_length is None and status != 304: try: content_length = sum(len(to_bytes(x, 'ascii')) for x in self.response) except UnicodeError: # aha, something non-bytestringy in there, too bad, we # can't safely figure out the length of the response. pass else: headers['Content-Length'] = str(content_length) return headers
python
{ "resource": "" }
q276594
iri_to_uri
test
def iri_to_uri(iri, charset='utf-8', errors='strict', safe_conversion=False): r""" Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always uses utf-8 URLs internally because this is what browsers and HTTP do as well. In some places where it accepts an URL it also accepts a unicode IRI and converts it into a URI. Examples for IRI versus URI: >>> iri_to_uri(u'http://☃.net/') 'http://xn--n3h.net/' >>> iri_to_uri(u'http://üser:pässword@☃.net/påth') 'http://%C3%BCser:p%C3%[email protected]/p%C3%A5th' There is a general problem with IRI and URI conversion with some protocols that appear in the wild that are in violation of the URI specification. In places where Werkzeug goes through a forced IRI to URI conversion it will set the `safe_conversion` flag which will not perform a conversion if the end result is already ASCII. This can mean that the return value is not an entirely correct URI but it will not destroy such invalid URLs in the process. As an example consider the following two IRIs:: magnet:?xt=uri:whatever itms-services://?action=download-manifest The internal representation after parsing of those URLs is the same and there is no way to reconstruct the original one. If safe conversion is enabled however this function becomes a noop for both of those strings as they both can be considered URIs. .. versionadded:: 0.6 .. versionchanged:: 0.9.6 The `safe_conversion` parameter was added. :param iri: The IRI to convert. :param charset: The charset for the URI. :param safe_conversion: indicates if a safe conversion should take place. For more information see the explanation above. """ if isinstance(iri, tuple): iri = url_unparse(iri) if safe_conversion: try: native_iri = to_native(iri) ascii_iri = to_native(iri).encode('ascii') if ascii_iri.split() == [ascii_iri]: return native_iri except UnicodeError: pass iri = url_parse(to_unicode(iri, charset, errors)) netloc = iri.encode_netloc() path = url_quote(iri.path, charset, errors, '/:~+%') query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=') fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/') return to_native(url_unparse((iri.scheme, netloc, path, query, fragment)))
python
{ "resource": "" }
q276595
user_cache_dir
test
def user_cache_dir(appname): r""" Return full path to the user-specific cache dir for this application. "appname" is the name of application. Typical user cache directories are: Mac OS X: ~/Library/Caches/<AppName> Unix: ~/.cache/<AppName> (XDG default) Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir`). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. """ if WINDOWS: # Get the base path path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) # Add our app name and Cache directory to it path = os.path.join(path, appname, "Cache") elif sys.platform == "darwin": # Get the base path path = os.path.expanduser("~/Library/Caches") # Add our app name to it path = os.path.join(path, appname) else: # Get the base path path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) # Add our app name to it path = os.path.join(path, appname) return path
python
{ "resource": "" }
q276596
user_data_dir
test
def user_data_dir(appname, roaming=False): """ Return full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user data directories are: Mac OS X: ~/Library/Application Support/<AppName> Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\<username>\ ... ...Application Data\<AppName> Win XP (roaming): C:\Documents and Settings\<username>\Local ... ...Settings\Application Data\<AppName> Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName> Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName> For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/<AppName>". """ if WINDOWS: const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" path = os.path.join(os.path.normpath(_get_win_folder(const)), appname) elif sys.platform == "darwin": path = os.path.join( os.path.expanduser('~/Library/Application Support/'), appname, ) else: path = os.path.join( os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")), appname, ) return path
python
{ "resource": "" }
q276597
user_log_dir
test
def user_log_dir(appname): """ Return full path to the user-specific log dir for this application. "appname" is the name of application. If None, just the system directory is returned. Typical user cache directories are: Mac OS X: ~/Library/Logs/<AppName> Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined Win XP: C:\Documents and Settings\<username>\Local Settings\ ... ...Application Data\<AppName>\Logs Vista: C:\\Users\<username>\AppData\Local\<AppName>\Logs On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in examples of what some windows apps use for a logs dir.) OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` value for Windows and appends "log" to the user cache dir for Unix. """ if WINDOWS: path = os.path.join(user_data_dir(appname), "Logs") elif sys.platform == "darwin": path = os.path.join(os.path.expanduser('~/Library/Logs'), appname) else: path = os.path.join(user_cache_dir(appname), "log") return path
python
{ "resource": "" }
q276598
user_config_dir
test
def user_config_dir(appname, roaming=True): """Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default True) can be set False to not use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user data directories are: Mac OS X: same as user_data_dir Unix: ~/.config/<AppName> Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by deafult "~/.config/<AppName>". """ if WINDOWS: path = user_data_dir(appname, roaming=roaming) elif sys.platform == "darwin": path = user_data_dir(appname) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) path = os.path.join(path, appname) return path
python
{ "resource": "" }
q276599
site_config_dirs
test
def site_config_dirs(appname): """Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: Mac OS X: /Library/Application Support/<AppName>/ Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\<AppName>\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\<AppName>\ """ if WINDOWS: path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) pathlist = [os.path.join(path, appname)] elif sys.platform == 'darwin': pathlist = [os.path.join('/Library/Application Support', appname)] else: # try looking in $XDG_CONFIG_DIRS xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') if xdg_config_dirs: pathlist = [ os.sep.join([os.path.expanduser(x), appname]) for x in xdg_config_dirs.split(os.pathsep) ] else: pathlist = [] # always look in /etc directly as well pathlist.append('/etc') return pathlist
python
{ "resource": "" }