_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q276800
highlight_info
test
def highlight_info(ctx, style): """Outputs the CSS which can be customized for highlighted code""" click.secho("The following styles are available to choose from:", fg="green") click.echo(list(pygments.styles.get_all_styles())) click.echo() click.secho( f'The following CSS for the "{style}" style can be customized:', fg="green" ) click.echo(pygments.formatters.HtmlFormatter(style=style).get_style_defs())
python
{ "resource": "" }
q276801
Polygon._draw_mainlayer
test
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"): """ Draws a closed polygon """ gc.save_state() try: # self._draw_bounds(gc) if len(self.points) >= 2: # Set the drawing parameters. gc.set_fill_color(self.pen.fill_color_) gc.set_stroke_color(self.pen.color_) gc.set_line_width(self.pen.line_width) # Draw the path. gc.begin_path() # x0 = self.points[0][0] - self.x # y0 = self.points[0][1] + self.y # gc.move_to(x0, y0) # offset_points = [(x-self.x, y+self.y) for x, y in self.points] gc.lines(self.points) gc.close_path() if self.filled: gc.draw_path(self.inside_rule_) else: gc.stroke_path() finally: gc.restore_state()
python
{ "resource": "" }
q276802
Polygon.is_in
test
def is_in(self, point_x, point_y): """ Test if a point is within this polygonal region """ point_array = array(((point_x, point_y),)) vertices = array(self.points) winding = self.inside_rule == "winding" result = points_in_polygon(point_array, vertices, winding) return result[0]
python
{ "resource": "" }
q276803
BSpline._draw_mainlayer
test
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"): """ Draws the Bezier component """ if not self.points: return gc.save_state() try: gc.set_fill_color(self.pen.fill_color_) gc.set_line_width(self.pen.line_width) gc.set_stroke_color(self.pen.color_) gc.begin_path() start_x, start_y = self.points[0] gc.move_to(start_x, start_y) for triple in nsplit(self.points[1:], 3): x1, y1 = triple[0] x2, y2 = triple[1] end_x, end_y = triple[2] gc.curve_to(x1, y1, x2, y2, end_x, end_y) # One point overlap gc.move_to(end_x, end_y) gc.stroke_path() finally: gc.restore_state()
python
{ "resource": "" }
q276804
DatabaseExtension._handle_event
test
def _handle_event(self, event, *args, **kw): """Broadcast an event to the database connections registered.""" for engine in self.engines.values(): if hasattr(engine, event): getattr(engine, event)(*args, **kw)
python
{ "resource": "" }
q276805
Worker.run
test
def run(self): """ Method that gets run when the Worker thread is started. When there's an item in in_queue, it takes it out, passes it to func as an argument, and puts the result in out_queue. """ while not self.stopper.is_set(): try: item = self.in_queue.get(timeout=5) except queue.Empty: continue try: result = self.func(item) except TypeError: continue else: self.out_queue.put(result)
python
{ "resource": "" }
q276806
Pager.get_full_page_url
test
def get_full_page_url(self, page_number, scheme=None): """Get the full, external URL for this page, optinally with the passed in URL scheme""" args = dict( request.view_args, _external=True, ) if scheme is not None: args['_scheme'] = scheme if page_number != 1: args['page'] = page_number return url_for(request.endpoint, **args)
python
{ "resource": "" }
q276807
Pager.render_prev_next_links
test
def render_prev_next_links(self, scheme=None): """Render the rel=prev and rel=next links to a Markup object for injection into a template""" output = '' if self.has_prev: output += '<link rel="prev" href="{}" />\n'.format(self.get_full_page_url(self.prev, scheme=scheme)) if self.has_next: output += '<link rel="next" href="{}" />\n'.format(self.get_full_page_url(self.next, scheme=scheme)) return Markup(output)
python
{ "resource": "" }
q276808
Pager.render_seo_links
test
def render_seo_links(self, scheme=None): """Render the rel=canonical, rel=prev and rel=next links to a Markup object for injection into a template""" out = self.render_prev_next_links(scheme=scheme) if self.total_pages == 1: out += self.render_canonical_link(scheme=scheme) return out
python
{ "resource": "" }
q276809
_content_type_matches
test
def _content_type_matches(candidate, pattern): """Is ``candidate`` an exact match or sub-type of ``pattern``?""" def _wildcard_compare(type_spec, type_pattern): return type_pattern == '*' or type_spec == type_pattern return ( _wildcard_compare(candidate.content_type, pattern.content_type) and _wildcard_compare(candidate.content_subtype, pattern.content_subtype) )
python
{ "resource": "" }
q276810
select_content_type
test
def select_content_type(requested, available): """Selects the best content type. :param requested: a sequence of :class:`.ContentType` instances :param available: a sequence of :class:`.ContentType` instances that the server is capable of producing :returns: the selected content type (from ``available``) and the pattern that it matched (from ``requested``) :rtype: :class:`tuple` of :class:`.ContentType` instances :raises: :class:`.NoMatch` when a suitable match was not found This function implements the *Proactive Content Negotiation* algorithm as described in sections 3.4.1 and 5.3 of :rfc:`7231`. The input is the `Accept`_ header as parsed by :func:`.parse_http_accept_header` and a list of parsed :class:`.ContentType` instances. The ``available`` sequence should be a sequence of content types that the server is capable of producing. The selected value should ultimately be used as the `Content-Type`_ header in the generated response. .. _Accept: http://tools.ietf.org/html/rfc7231#section-5.3.2 .. _Content-Type: http://tools.ietf.org/html/rfc7231#section-3.1.1.5 """ class Match(object): """Sorting assistant. Sorting matches is a tricky business. We need a way to prefer content types by *specificity*. The definition of *more specific* is a little less than clear. This class treats the strength of a match as the most important thing. Wild cards are less specific in all cases. This is tracked by the ``match_type`` attribute. If we the candidate and pattern differ only by parameters, then the strength is based on the number of pattern parameters that match parameters from the candidate. The easiest way to track this is to count the number of candidate parameters that are matched by the pattern. This is what ``parameter_distance`` tracks. The final key to the solution is to order the result set such that the most specific matches are first in the list. This is done by carefully choosing values for ``match_type`` such that full matches bubble up to the front. We also need a scheme of counting matching parameters that pushes stronger matches to the front of the list. The ``parameter_distance`` attribute starts at the number of candidate parameters and decreases for each matching parameter - the lesser the value, the stronger the match. """ WILDCARD, PARTIAL, FULL_TYPE, = 2, 1, 0 def __init__(self, candidate, pattern): self.candidate = candidate self.pattern = pattern if pattern.content_type == pattern.content_subtype == '*': self.match_type = self.WILDCARD elif pattern.content_subtype == '*': self.match_type = self.PARTIAL else: self.match_type = self.FULL_TYPE self.parameter_distance = len(self.candidate.parameters) for key, value in candidate.parameters.items(): if key in pattern.parameters: if pattern.parameters[key] == value: self.parameter_distance -= 1 else: self.parameter_distance += 1 def extract_quality(obj): return getattr(obj, 'quality', 1.0) matches = [] for pattern in sorted(requested, key=extract_quality, reverse=True): for candidate in sorted(available): if _content_type_matches(candidate, pattern): if candidate == pattern: # exact match!!! if extract_quality(pattern) == 0.0: raise errors.NoMatch # quality of 0 means NO return candidate, pattern matches.append(Match(candidate, pattern)) if not matches: raise errors.NoMatch matches = sorted(matches, key=attrgetter('match_type', 'parameter_distance')) return matches[0].candidate, matches[0].pattern
python
{ "resource": "" }
q276811
rewrite_url
test
def rewrite_url(input_url, **kwargs): """ Create a new URL from `input_url` with modifications applied. :param str input_url: the URL to modify :keyword str fragment: if specified, this keyword sets the fragment portion of the URL. A value of :data:`None` will remove the fragment portion of the URL. :keyword str host: if specified, this keyword sets the host portion of the network location. A value of :data:`None` will remove the network location portion of the URL. :keyword str password: if specified, this keyword sets the password portion of the URL. A value of :data:`None` will remove the password from the URL. :keyword str path: if specified, this keyword sets the path portion of the URL. A value of :data:`None` will remove the path from the URL. :keyword int port: if specified, this keyword sets the port portion of the network location. A value of :data:`None` will remove the port from the URL. :keyword query: if specified, this keyword sets the query portion of the URL. See the comments for a description of this parameter. :keyword str scheme: if specified, this keyword sets the scheme portion of the URL. A value of :data:`None` will remove the scheme. Note that this will make the URL relative and may have unintended consequences. :keyword str user: if specified, this keyword sets the user portion of the URL. A value of :data:`None` will remove the user and password portions. :keyword bool enable_long_host: if this keyword is specified and it is :data:`True`, then the host name length restriction from :rfc:`3986#section-3.2.2` is relaxed. :keyword bool encode_with_idna: if this keyword is specified and it is :data:`True`, then the ``host`` parameter will be encoded using IDN. If this value is provided as :data:`False`, then the percent-encoding scheme is used instead. If this parameter is omitted or included with a different value, then the ``host`` parameter is processed using :data:`IDNA_SCHEMES`. :return: the modified URL :raises ValueError: when a keyword parameter is given an invalid value If the `host` parameter is specified and not :data:`None`, then it will be processed as an Internationalized Domain Name (IDN) if the scheme appears in :data:`IDNA_SCHEMES`. Otherwise, it will be encoded as UTF-8 and percent encoded. The handling of the `query` parameter requires some additional explanation. You can specify a query value in three different ways - as a *mapping*, as a *sequence* of pairs, or as a *string*. This flexibility makes it possible to meet the wide range of finicky use cases. *If the query parameter is a mapping*, then the key + value pairs are *sorted by the key* before they are encoded. Use this method whenever possible. *If the query parameter is a sequence of pairs*, then each pair is encoded *in the given order*. Use this method if you require that parameter order is controlled. *If the query parameter is a string*, then it is *used as-is*. This form SHOULD BE AVOIDED since it can easily result in broken URLs since *no URL escaping is performed*. This is the obvious pass through case that is almost always present. """ scheme, netloc, path, query, fragment = parse.urlsplit(input_url) if 'scheme' in kwargs: scheme = kwargs['scheme'] ident, host_n_port = parse.splituser(netloc) user, password = parse.splitpasswd(ident) if ident else (None, None) if 'user' in kwargs: user = kwargs['user'] elif user is not None: user = parse.unquote_to_bytes(user).decode('utf-8') if 'password' in kwargs: password = kwargs['password'] elif password is not None: password = parse.unquote_to_bytes(password).decode('utf-8') ident = _create_url_identifier(user, password) host, port = parse.splitnport(host_n_port, defport=None) if 'host' in kwargs: host = kwargs['host'] if host is not None: host = _normalize_host( host, enable_long_host=kwargs.get('enable_long_host', False), encode_with_idna=kwargs.get('encode_with_idna', None), scheme=scheme, ) if 'port' in kwargs: port = kwargs['port'] if port is not None: port = int(kwargs['port']) if port < 0: raise ValueError('port is required to be non-negative') if host is None or host == '': host_n_port = None elif port is None: host_n_port = host else: host_n_port = '{0}:{1}'.format(host, port) if 'path' in kwargs: path = kwargs['path'] if path is None: path = '/' else: path = parse.quote(path.encode('utf-8'), safe=PATH_SAFE_CHARS) netloc = '{0}@{1}'.format(ident, host_n_port) if ident else host_n_port if 'query' in kwargs: new_query = kwargs['query'] if new_query is None: query = None else: params = [] try: for param in sorted(new_query.keys()): params.append((param, new_query[param])) except AttributeError: # arg is None or not a dict pass if not params: # maybe a sequence of tuples? try: params = [(param, value) for param, value in new_query] except ValueError: # guess not... pass if params: query = parse.urlencode(params) else: query = new_query if 'fragment' in kwargs: fragment = kwargs['fragment'] if fragment is not None: fragment = parse.quote(fragment.encode('utf-8'), safe=FRAGMENT_SAFE_CHARS) # The following is necessary to get around some interesting special # case code in urllib.parse._coerce_args in Python 3.4. Setting # scheme to None causes urlunsplit to assume that all non-``None`` # parameters with be byte strings.... if scheme is None: scheme = '' return parse.urlunsplit((scheme, netloc, path, query, fragment))
python
{ "resource": "" }
q276812
remove_url_auth
test
def remove_url_auth(url): """ Removes the user & password and returns them along with a new url. :param str url: the URL to sanitize :return: a :class:`tuple` containing the authorization portion and the sanitized URL. The authorization is a simple user & password :class:`tuple`. >>> auth, sanitized = remove_url_auth('http://foo:[email protected]') >>> auth ('foo', 'bar') >>> sanitized 'http://example.com' The return value from this function is simple named tuple with the following fields: - *auth* the username and password as a tuple - *username* the username portion of the URL or :data:`None` - *password* the password portion of the URL or :data:`None` - *url* the sanitized URL >>> result = remove_url_auth('http://me:[email protected]') >>> result.username 'me' >>> result.password 'secret' >>> result.url 'http://example.com' """ parts = parse.urlsplit(url) return RemoveUrlAuthResult(auth=(parts.username or None, parts.password), url=rewrite_url(url, user=None, password=None))
python
{ "resource": "" }
q276813
_create_url_identifier
test
def _create_url_identifier(user, password): """ Generate the user+password portion of a URL. :param str user: the user name or :data:`None` :param str password: the password or :data:`None` """ if user is not None: user = parse.quote(user.encode('utf-8'), safe=USERINFO_SAFE_CHARS) if password: password = parse.quote(password.encode('utf-8'), safe=USERINFO_SAFE_CHARS) return '{0}:{1}'.format(user, password) return user return None
python
{ "resource": "" }
q276814
_normalize_host
test
def _normalize_host(host, enable_long_host=False, encode_with_idna=None, scheme=None): """ Normalize a host for a URL. :param str host: the host name to normalize :keyword bool enable_long_host: if this keyword is specified and it is :data:`True`, then the host name length restriction from :rfc:`3986#section-3.2.2` is relaxed. :keyword bool encode_with_idna: if this keyword is specified and it is :data:`True`, then the ``host`` parameter will be encoded using IDN. If this value is provided as :data:`False`, then the percent-encoding scheme is used instead. If this parameter is omitted or included with a different value, then the ``host`` parameter is processed using :data:`IDNA_SCHEMES`. :keyword str scheme: if this keyword is specified, then it is used to determine whether to apply IDN rules or not. This parameter is ignored if `encode_with_idna` is not :data:`None`. :return: the normalized and encoded string ready for inclusion into a URL """ if encode_with_idna is not None: enable_idna = encode_with_idna else: enable_idna = scheme.lower() in IDNA_SCHEMES if scheme else False if enable_idna: try: host = '.'.join(segment.encode('idna').decode() for segment in host.split('.')) except UnicodeError as exc: raise ValueError('host is invalid - {0}'.format(exc)) else: host = parse.quote(host.encode('utf-8'), safe=HOST_SAFE_CHARS) if len(host) > 255 and not enable_long_host: raise ValueError('host too long') return host
python
{ "resource": "" }
q276815
discover_modules
test
def discover_modules(directory): """ Attempts to list all of the modules and submodules found within a given directory tree. This function searches the top-level of the directory tree for potential python modules and returns a list of candidate names. **Note:** This function returns a list of strings representing discovered module names, not the actual, loaded modules. :param directory: the directory to search for modules. """ found = list() if os.path.isdir(directory): for entry in os.listdir(directory): next_dir = os.path.join(directory, entry) # Scan only if there's an __init__.py file if os.path.isfile(os.path.join(next_dir, MODULE_INIT_FILE)): found.append(entry) return found
python
{ "resource": "" }
q276816
rdiscover_modules
test
def rdiscover_modules(directory): """ Attempts to list all of the modules and submodules found within a given directory tree. This function recursively searches the directory tree for potential python modules and returns a list of candidate names. **Note:** This function returns a list of strings representing discovered module names, not the actual, loaded modules. :param directory: the directory to search for modules. """ found = list() if os.path.isdir(directory): for entry in os.listdir(directory): next_dir = os.path.join(directory, entry) # Scan only if there's an __init__.py file if os.path.isfile(os.path.join(next_dir, MODULE_INIT_FILE)): modules = _search_for_modules(next_dir, True, entry) found.extend(modules) return found
python
{ "resource": "" }
q276817
rlist_modules
test
def rlist_modules(mname): """ Attempts to the submodules under a module recursively. This function works for modules located in the default path as well as extended paths via the sys.meta_path hooks. This function carries the expectation that the hidden module variable '__path__' has been set correctly. :param mname: the module name to descend into """ module = import_module(mname) if not module: raise ImportError('Unable to load module {}'.format(mname)) found = list() if _should_use_module_path(module): mpath = module.__path__[0] else: mpaths = sys.path mpath = _scan_paths_for(mname, mpaths) if mpath: for pmname in _search_for_modules(mpath, recursive=True): found_mod = MODULE_PATH_SEP.join((mname, pmname)) found.append(found_mod) return found
python
{ "resource": "" }
q276818
list_classes
test
def list_classes(mname, cls_filter=None): """ Attempts to list all of the classes within a specified module. This function works for modules located in the default path as well as extended paths via the sys.meta_path hooks. If a class filter is set, it will be called with each class as its parameter. This filter's return value must be interpretable as a boolean. Results that evaluate as True will include the type in the list of returned classes. Results that evaluate as False will exclude the type in the list of returned classes. :param mname: of the module to descend into :param cls_filter: a function to call to determine what classes should be included. """ found = list() module = import_module(mname) if inspect.ismodule(module): [found.append(mod) for mod in _list_classes(module, cls_filter)] return found
python
{ "resource": "" }
q276819
rlist_classes
test
def rlist_classes(module, cls_filter=None): """ Attempts to list all of the classes within a given module namespace. This method, unlike list_classes, will recurse into discovered submodules. If a type filter is set, it will be called with each class as its parameter. This filter's return value must be interpretable as a boolean. Results that evaluate as True will include the type in the list of returned classes. Results that evaluate as False will exclude the type in the list of returned classes. :param mname: of the module to descend into :param cls_filter: a function to call to determine what classes should be included. """ found = list() mnames = rlist_modules(module) for mname in mnames: [found.append(c) for c in list_classes(mname, cls_filter)] return found
python
{ "resource": "" }
q276820
ensure_dir
test
def ensure_dir(path): """Ensure that a needed directory exists, creating it if it doesn't""" try: log.info('Ensuring directory exists: %s' % path) os.makedirs(path) except OSError: if not os.path.isdir(path): raise
python
{ "resource": "" }
q276821
AzureStorageBroker.put_text
test
def put_text(self, key, contents): """Store the given text contents so that they are later retrievable by the given key.""" self._blobservice.create_blob_from_text( self.uuid, key, contents )
python
{ "resource": "" }
q276822
luhn_check
test
def luhn_check(card_number): """ checks to make sure that the card passes a luhn mod-10 checksum """ sum = 0 num_digits = len(card_number) oddeven = num_digits & 1 for count in range(0, num_digits): digit = int(card_number[count]) if not ((count & 1) ^ oddeven): digit *= 2 if digit > 9: digit -= 9 sum += digit return (sum % 10) == 0
python
{ "resource": "" }
q276823
get_git_version
test
def get_git_version(): """ Return the git hash as a string. Apparently someone got this from numpy's setup.py. It has since been modified a few times. """ # Return the git revision as a string # copied from numpy setup.py def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' with open(os.devnull, 'w') as err_out: out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=err_out, # maybe debug later? env=env).communicate()[0] return out try: git_dir = os.path.dirname(os.path.realpath(__file__)) out = _minimal_ext_cmd(['git', '-C', git_dir, 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = 'Unknown' return GIT_REVISION
python
{ "resource": "" }
q276824
ModuleLoader.load_module
test
def load_module(self, module_name): """ Loads a module's code and sets the module's expected hidden variables. For more information on these variables and what they are for, please see PEP302. :param module_name: the full name of the module to load """ if module_name != self.module_name: raise LoaderError( 'Requesting a module that the loader is unaware of.') if module_name in sys.modules: return sys.modules[module_name] module = self.load_module_py_path(module_name, self.load_target) if self.is_pkg: module.__path__ = [self.module_path] module.__package__ = module_name else: module.__package__ = module_name.rpartition('.')[0] sys.modules[module_name] = module return module
python
{ "resource": "" }
q276825
ModuleFinder.add_path
test
def add_path(self, path): """ Adds a path to search through when attempting to look up a module. :param path: the path the add to the list of searchable paths """ if path not in self.paths: self.paths.append(path)
python
{ "resource": "" }
q276826
ModuleFinder.find_module
test
def find_module(self, module_name, path=None): """ Searches the paths for the required module. :param module_name: the full name of the module to find :param path: set to None when the module in being searched for is a top-level module - otherwise this is set to package.__path__ for submodules and subpackages (unused) """ module_path = os.path.join(*module_name.split(MODULE_PATH_SEP)) for search_root in self.paths: target_path = os.path.join(search_root, module_path) is_pkg = False # If the target references a directory, try to load it as # a module by referencing the __init__.py file, otherwise # append .py and attempt to resolve it. if os.path.isdir(target_path): target_file = os.path.join(target_path, '__init__.py') is_pkg = True else: target_file = '{}.py'.format(target_path) if os.path.exists(target_file): return ModuleLoader( target_path, module_name, target_file, is_pkg) return None
python
{ "resource": "" }
q276827
split_line
test
def split_line(line, min_line_length=30, max_line_length=100): """ This is designed to work with prettified output from Beautiful Soup which indents with a single space. :param line: The line to split :param min_line_length: The minimum desired line length :param max_line_length: The maximum desired line length :return: A list of lines """ if len(line) <= max_line_length: # No need to split! return [line] # First work out the indentation on the beginning of the line indent = 0 while line[indent] == ' ' and indent < len(line): indent += 1 # Try to split the line # Start looking for a space at character max_line_length working backwards i = max_line_length split_point = None while i > min_line_length: if line[i] == ' ': split_point = i break i -= 1 if split_point is None: # We didn't find a split point - search beyond the end of the line i = max_line_length + 1 while i < len(line): if line[i] == ' ': split_point = i break i += 1 if split_point is None: # There is nowhere to split the line! return [line] else: # Split it! line1 = line[:split_point] line2 = ' ' * indent + line[split_point + 1:] return [line1] + split_line(line2, min_line_length, max_line_length)
python
{ "resource": "" }
q276828
remove_namespaces
test
def remove_namespaces(root): """Call this on an lxml.etree document to remove all namespaces""" for elem in root.getiterator(): if not hasattr(elem.tag, 'find'): continue i = elem.tag.find('}') if i >= 0: elem.tag = elem.tag[i + 1:] objectify.deannotate(root, cleanup_namespaces=True)
python
{ "resource": "" }
q276829
VersionReleaseChecks.consistency
test
def consistency(self, desired_version=None, include_package=False, strictness=None): """Checks that the versions are consistent Parameters ---------- desired_version: str optional; the version that all of these should match include_package: bool whether to check the special 'package' version for consistency (default False) strictness: str """ keys_to_check = list(self.versions.keys()) if not include_package and 'package' in keys_to_check: keys_to_check.remove('package') if desired_version is None: # if we have to guess, we trust setup.py try: desired_version = self.versions['setup.py'] except KeyError: desired_version = self.versions[keys_to_check[0]] if strictness is None: strictness = self.strictness desired = self._version(desired_version, strictness) error_keys = [] for key in keys_to_check: test = self._version(self.versions[key], strictness) if test != desired: error_keys += [key] # make the error message msg = "" for key in error_keys: msg += "Error: desired {d} != {v} ({k})\n".format( d=str(desired), v=str(self.versions[key]), k=str(key) ) return msg
python
{ "resource": "" }
q276830
Rule.from_yaml
test
def from_yaml(cls, **kwargs): """Creates a new instance of a rule in relation to the config file. This updates the dictionary of the class with the added details, which allows for flexibility in the configuation file. Only called when parsing the default configuation file. """ ret = cls() for k, v in kwargs.iteritems(): ret.__dict__[k] = v return ret
python
{ "resource": "" }
q276831
Rule.merge
test
def merge(self, new_dict): """Merges a dictionary into the Rule object.""" actions = new_dict.pop("actions") for action in actions: self.add_action(action) self.__dict__.update(new_dict)
python
{ "resource": "" }
q276832
Rule.execute_actions
test
def execute_actions(self, cwd): """Iterates over the actions and executes them in order.""" self._execute_globals(cwd) for action in self.actions: logger.info("executing {}".format(action)) p = subprocess.Popen(action, shell=True, cwd=cwd) p.wait()
python
{ "resource": "" }
q276833
CommandSet.from_yaml
test
def from_yaml(cls, defaults, **kwargs): """Creates a new instance of a rule by merging two dictionaries. This allows for independant configuration files to be merged into the defaults.""" # TODO: I hate myself for this. Fix it later mmkay? if "token" not in defaults: kwargs["token"] = None defaults = copy.deepcopy(defaults) return cls( defaults=defaults, token=kwargs.pop("token"), directory=kwargs.pop("directory"), **kwargs )
python
{ "resource": "" }
q276834
LfsSmtpHandler.add_details
test
def add_details(self, message): """ Add extra details to the message. Separate so that it can be overridden """ msg = message # Try to append Flask request details try: from flask import request url = request.url method = request.method endpoint = request.endpoint # Obscure password field and prettify a little bit form_dict = dict(request.form) for key in form_dict: if key.lower() in _error_reporting_obscured_fields: form_dict[key] = '******' elif len(form_dict[key]) == 1: form_dict[key] = form_dict[key][0] form = pprint.pformat(form_dict).replace('\n', '\n ') msg = '%s\nRequest:\n\nurl: %s\nmethod: %s\nendpoint: %s\nform: %s\n' % \ (msg, url, method, endpoint, form) except Exception: traceback.print_exc() # Try to append the session try: from flask import session from flask.json import JSONEncoder session_str = json.dumps( dict(**session), indent=2, cls=JSONEncoder ) msg = '%s\nSession:\n\n%s\n' % (msg, session_str) except Exception: traceback.print_exc() return msg
python
{ "resource": "" }
q276835
LfsSmtpHandler.emit
test
def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ try: # First, remove all records from the rate limiter list that are over a minute old now = timetool.unix_time() one_minute_ago = now - 60 new_rate_limiter = [x for x in self.rate_limiter if x > one_minute_ago] log.debug('Rate limiter %s -> %s' % (len(self.rate_limiter), len(new_rate_limiter))) self.rate_limiter = new_rate_limiter # Now, get the number of emails sent in the last minute. If it's less than the threshold, add another # entry to the rate limiter list recent_sends = len(self.rate_limiter) send_email = recent_sends < self.max_sends_per_minute if send_email: self.rate_limiter.append(now) msg = self.format(record) msg = self.add_details(msg) # Finally send the message! if send_email: if DEBUG_ERROR_EMAIL_SENDING: log.info('@@@> ! Sending error email to {} !'.format(self.toaddrs)) send_text_mail(self.toaddrs, self.subject, msg, self.fromaddr) else: log.info('!! WARNING: Not sending email as too many emails have been sent in the past minute !!') log.info(msg) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handleError(record)
python
{ "resource": "" }
q276836
RenditionAwareStructBlock.get_context
test
def get_context(self, value): """Ensure `image_rendition` is added to the global context.""" context = super(RenditionAwareStructBlock, self).get_context(value) context['image_rendition'] = self.rendition.\ image_rendition or 'original' return context
python
{ "resource": "" }
q276837
AttackProtect.log_attempt
test
def log_attempt(self, key): """ Log an attempt against key, incrementing the number of attempts for that key and potentially adding a lock to the lock table """ with self.lock: if key not in self.attempts: self.attempts[key] = 1 else: self.attempts[key] += 1 if self.attempts[key] >= self.max_attempts: log.info('Account %s locked due to too many login attempts' % key) # lock account self.locks[key] = datetime.datetime.utcnow() + datetime.timedelta(seconds=self.lock_duration)
python
{ "resource": "" }
q276838
Music2Storage.add_to_queue
test
def add_to_queue(self, url): """ Adds an URL to the download queue. :param str url: URL to the music service track """ if self.connection_handler.current_music is None: log.error('Music service is not initialized. URL was not added to queue.') elif self.connection_handler.current_storage is None: log.error('Drive service is not initialized. URL was not added to queue.') else: self.queues['download'].put(url)
python
{ "resource": "" }
q276839
Music2Storage.start_workers
test
def start_workers(self, workers_per_task=1): """ Creates and starts the workers, as well as attaching a handler to terminate them gracefully when a SIGINT signal is received. :param int workers_per_task: Number of workers to create for each task in the pipeline """ if not self.workers: for _ in range(workers_per_task): self.workers.append(Worker(self._download, self.queues['download'], self.queues['convert'], self.stopper)) self.workers.append(Worker(self._convert, self.queues['convert'], self.queues['upload'], self.stopper)) self.workers.append(Worker(self._upload, self.queues['upload'], self.queues['delete'], self.stopper)) self.workers.append(Worker(self._delete, self.queues['delete'], self.queues['done'], self.stopper)) self.signal_handler = SignalHandler(self.workers, self.stopper) signal.signal(signal.SIGINT, self.signal_handler) for worker in self.workers: worker.start()
python
{ "resource": "" }
q276840
Client.set
test
def set(self, k, v): """Add or update a key, value pair to the database""" k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) r = requests.put(url, data=str(v)) if r.status_code != 200 or r.json() is not True: raise KVStoreError('PUT returned {}'.format(r.status_code))
python
{ "resource": "" }
q276841
Client.get
test
def get(self, k, wait=False, wait_index=False, timeout='5m'): """Get the value of a given key""" k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) params = {} if wait: params['index'] = wait_index params['wait'] = timeout r = requests.get(url, params=params) if r.status_code == 404: raise KeyDoesNotExist("Key " + k + " does not exist") if r.status_code != 200: raise KVStoreError('GET returned {}'.format(r.status_code)) try: return base64.b64decode(r.json()[0]['Value']) except TypeError as e: # Value was empty and wild None appeared return ""
python
{ "resource": "" }
q276842
Client.recurse
test
def recurse(self, k, wait=False, wait_index=None, timeout='5m'): """Recursively get the tree below the given key""" k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) params = {} params['recurse'] = 'true' if wait: params['wait'] = timeout if not wait_index: params['index'] = self.index(k, recursive=True) else: params['index'] = wait_index r = requests.get(url, params=params) if r.status_code == 404: raise KeyDoesNotExist("Key " + k + " does not exist") if r.status_code != 200: raise KVStoreError('GET returned {}'.format(r.status_code)) entries = {} for e in r.json(): if e['Value']: entries[e['Key']] = base64.b64decode(e['Value']) else: entries[e['Key']] = '' return entries
python
{ "resource": "" }
q276843
Client.index
test
def index(self, k, recursive=False): """Get the current index of the key or the subtree. This is needed for later creating long polling requests """ k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) params = {} if recursive: params['recurse'] = '' r = requests.get(url, params=params) return r.headers['X-Consul-Index']
python
{ "resource": "" }
q276844
Client.delete
test
def delete(self, k, recursive=False): """Delete a given key or recursively delete the tree below it""" k = k.lstrip('/') url = '{}/{}'.format(self.endpoint, k) params = {} if recursive: params['recurse'] = '' r = requests.delete(url, params=params) if r.status_code != 200: raise KVStoreError('DELETE returned {}'.format(r.status_code))
python
{ "resource": "" }
q276845
plot_heatmap
test
def plot_heatmap(X, y, top_n=10, metric='correlation', method='complete'): ''' Plot heatmap which shows features with classes. :param X: list of dict :param y: labels :param top_n: most important n feature :param metric: metric which will be used for clustering :param method: method which will be used for clustering ''' sns.set(color_codes=True) df = feature_importance_report(X, y) df_sns = pd.DataFrame().from_records(X)[df[:top_n].index].T df_sns.columns = y color_mapping = dict(zip(set(y), sns.mpl_palette("Set2", len(set(y))))) return sns.clustermap(df_sns, figsize=(22, 22), z_score=0, metric=metric, method=method, col_colors=[color_mapping[i] for i in y])
python
{ "resource": "" }
q276846
add_months
test
def add_months(months, timestamp=datetime.datetime.utcnow()): """Add a number of months to a timestamp""" month = timestamp.month new_month = month + months years = 0 while new_month < 1: new_month += 12 years -= 1 while new_month > 12: new_month -= 12 years += 1 # month = timestamp.month year = timestamp.year + years try: return datetime.datetime(year, new_month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second) except ValueError: # This means that the day exceeds the last day of the month, i.e. it is 30th March, and we are finding the day # 1 month ago, and it is trying to return 30th February if months > 0: # We are adding, so use the first day of the next month new_month += 1 if new_month > 12: new_month -= 12 year += 1 return datetime.datetime(year, new_month, 1, timestamp.hour, timestamp.minute, timestamp.second) else: # We are subtracting - use the last day of the same month new_day = calendar.monthrange(year, new_month)[1] return datetime.datetime(year, new_month, new_day, timestamp.hour, timestamp.minute, timestamp.second)
python
{ "resource": "" }
q276847
add_months_to_date
test
def add_months_to_date(months, date): """Add a number of months to a date""" month = date.month new_month = month + months years = 0 while new_month < 1: new_month += 12 years -= 1 while new_month > 12: new_month -= 12 years += 1 # month = timestamp.month year = date.year + years try: return datetime.date(year, new_month, date.day) except ValueError: # This means that the day exceeds the last day of the month, i.e. it is 30th March, and we are finding the day # 1 month ago, and it is trying to return 30th February if months > 0: # We are adding, so use the first day of the next month new_month += 1 if new_month > 12: new_month -= 12 year += 1 return datetime.datetime(year, new_month, 1) else: # We are subtracting - use the last day of the same month new_day = calendar.monthrange(year, new_month)[1] return datetime.datetime(year, new_month, new_day)
python
{ "resource": "" }
q276848
is_christmas_period
test
def is_christmas_period(): """Is this the christmas period?""" now = datetime.date.today() if now.month != 12: return False if now.day < 15: return False if now.day > 27: return False return True
python
{ "resource": "" }
q276849
ConnectionHandler.use_music_service
test
def use_music_service(self, service_name, api_key): """ Sets the current music service to service_name. :param str service_name: Name of the music service :param str api_key: Optional API key if necessary """ try: self.current_music = self.music_services[service_name] except KeyError: if service_name == 'youtube': self.music_services['youtube'] = Youtube() self.current_music = self.music_services['youtube'] elif service_name == 'soundcloud': self.music_services['soundcloud'] = Soundcloud(api_key=api_key) self.current_music = self.music_services['soundcloud'] else: log.error('Music service name is not recognized.')
python
{ "resource": "" }
q276850
ConnectionHandler.use_storage_service
test
def use_storage_service(self, service_name, custom_path): """ Sets the current storage service to service_name and runs the connect method on the service. :param str service_name: Name of the storage service :param str custom_path: Custom path where to download tracks for local storage (optional, and must already exist, use absolute paths only) """ try: self.current_storage = self.storage_services[service_name] except KeyError: if service_name == 'google drive': self.storage_services['google drive'] = GoogleDrive() self.current_storage = self.storage_services['google drive'] self.current_storage.connect() elif service_name == 'dropbox': log.error('Dropbox is not supported yet.') elif service_name == 'local': self.storage_services['local'] = LocalStorage(custom_path=custom_path) self.current_storage = self.storage_services['local'] self.current_storage.connect() else: log.error('Storage service name is not recognized.')
python
{ "resource": "" }
q276851
SkUtilsIO.from_csv
test
def from_csv(self, label_column='labels'): ''' Read dataset from csv. ''' df = pd.read_csv(self.path, header=0) X = df.loc[:, df.columns != label_column].to_dict('records') X = map_dict_list(X, if_func=lambda k, v: v and math.isfinite(v)) y = list(df[label_column].values) return X, y
python
{ "resource": "" }
q276852
SkUtilsIO.from_json
test
def from_json(self): ''' Reads dataset from json. ''' with gzip.open('%s.gz' % self.path, 'rt') if self.gz else open(self.path) as file: return list(map(list, zip(*json.load(file))))[::-1]
python
{ "resource": "" }
q276853
SkUtilsIO.to_json
test
def to_json(self, X, y): ''' Reads dataset to csv. :param X: dataset as list of dict. :param y: labels. ''' with gzip.open('%s.gz' % self.path, 'wt') if self.gz else open( self.path, 'w') as file: json.dump(list(zip(y, X)), file)
python
{ "resource": "" }
q276854
filter_by_label
test
def filter_by_label(X, y, ref_label, reverse=False): ''' Select items with label from dataset. :param X: dataset :param y: labels :param ref_label: reference label :param bool reverse: if false selects ref_labels else eliminates ''' check_reference_label(y, ref_label) return list(zip(*filter(lambda t: (not reverse) == (t[1] == ref_label), zip(X, y))))
python
{ "resource": "" }
q276855
average_by_label
test
def average_by_label(X, y, ref_label): ''' Calculates average dictinary from list of dictionary for give label :param List[Dict] X: dataset :param list y: labels :param ref_label: reference label ''' # TODO: consider to delete defaultdict return defaultdict(float, pd.DataFrame.from_records( filter_by_label(X, y, ref_label)[0] ).mean().to_dict())
python
{ "resource": "" }
q276856
feature_importance_report
test
def feature_importance_report(X, y, threshold=0.001, correcting_multiple_hypotesis=True, method='fdr_bh', alpha=0.1, sort_by='pval'): ''' Provide signifance for features in dataset with anova using multiple hypostesis testing :param X: List of dict with key as feature names and values as features :param y: Labels :param threshold: Low-variens threshold to eliminate low varience features :param correcting_multiple_hypotesis: corrects p-val with multiple hypotesis testing :param method: method of multiple hypotesis testing :param alpha: alpha of multiple hypotesis testing :param sort_by: sorts output dataframe by pval or F :return: DataFrame with F and pval for each feature with their average values ''' df = variance_threshold_on_df( pd.DataFrame.from_records(X), threshold=threshold) F, pvals = f_classif(df.values, y) if correcting_multiple_hypotesis: _, pvals, _, _ = multipletests(pvals, alpha=alpha, method=method) df['labels'] = y df_mean = df.groupby('labels').mean().T df_mean['F'] = F df_mean['pval'] = pvals return df_mean.sort_values(sort_by, ascending=True)
python
{ "resource": "" }
q276857
SessionData.restore_data
test
def restore_data(self, data_dict): """ Restore the data dict - update the flask session and this object """ session[self._base_key] = data_dict self._data_dict = session[self._base_key]
python
{ "resource": "" }
q276858
_mergedict
test
def _mergedict(a, b): """Recusively merge the 2 dicts. Destructive on argument 'a'. """ for p, d1 in b.items(): if p in a: if not isinstance(d1, dict): continue _mergedict(a[p], d1) else: a[p] = d1 return a
python
{ "resource": "" }
q276859
multi
test
def multi(dispatch_fn, default=None): """A decorator for a function to dispatch on. The value returned by the dispatch function is used to look up the implementation function based on its dispatch key. The dispatch function is available using the `dispatch_fn` function. """ def _inner(*args, **kwargs): dispatch_value = dispatch_fn(*args, **kwargs) f = _inner.__multi__.get(dispatch_value, _inner.__multi_default__) if f is None: raise Exception( f"No implementation of {dispatch_fn.__name__} " f"for dispatch value {dispatch_value}" ) return f(*args, **kwargs) _inner.__multi__ = {} _inner.__multi_default__ = default _inner.__dispatch_fn__ = dispatch_fn return _inner
python
{ "resource": "" }
q276860
method
test
def method(dispatch_fn, dispatch_key=None): """A decorator for a function implementing dispatch_fn for dispatch_key. If no dispatch_key is specified, the function is used as the default dispacth function. """ def apply_decorator(fn): if dispatch_key is None: # Default case dispatch_fn.__multi_default__ = fn else: dispatch_fn.__multi__[dispatch_key] = fn return fn return apply_decorator
python
{ "resource": "" }
q276861
find_blocks
test
def find_blocks(): """ Auto-discover INSTALLED_APPS registered_blocks.py modules and fail silently when not present. This forces an import on them thereby registering their blocks. This is a near 1-to-1 copy of how django's admin application registers models. """ for app in settings.INSTALLED_APPS: mod = import_module(app) # Attempt to import the app's sizedimage module. try: before_import_block_registry = copy.copy( block_registry._registry ) import_module('{}.registered_blocks'.format(app)) except: # Reset the block_registry to the state before the last # import as this import will have to reoccur on the next request # and this could raise NotRegistered and AlreadyRegistered # exceptions (see django ticket #8245). block_registry._registry = before_import_block_registry # Decide whether to bubble up this error. If the app just # doesn't have a stuff module, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(mod, 'registered_blocks'): raise
python
{ "resource": "" }
q276862
RegisteredBlockStreamFieldRegistry._verify_block
test
def _verify_block(self, block_type, block): """ Verifies a block prior to registration. """ if block_type in self._registry: raise AlreadyRegistered( "A block has already been registered to the {} `block_type` " "in the registry. Either unregister that block before trying " "to register this block under a different `block_type`".format( block_type ) ) if not isinstance(block, Block): raise InvalidBlock( "The block you tried register to {} is invalid. Only " "instances of `wagtail.wagtailcore.blocks.Block` may be " "registered with the the block_registry.".format(block_type) )
python
{ "resource": "" }
q276863
RegisteredBlockStreamFieldRegistry.register_block
test
def register_block(self, block_type, block): """ Registers `block` to `block_type` in the registry. """ self._verify_block(block_type, block) self._registry[block_type] = block
python
{ "resource": "" }
q276864
RegisteredBlockStreamFieldRegistry.unregister_block
test
def unregister_block(self, block_type): """ Unregisters the block associated with `block_type` from the registry. If no block is registered to `block_type`, NotRegistered will raise. """ if block_type not in self._registry: raise NotRegistered( 'There is no block registered as "{}" with the ' 'RegisteredBlockStreamFieldRegistry registry.'.format( block_type ) ) else: del self._registry[block_type]
python
{ "resource": "" }
q276865
convert_to_mp3
test
def convert_to_mp3(file_name, delete_queue): """ Converts the file associated with the file_name passed into a MP3 file. :param str file_name: Filename of the original file in local storage :param Queue delete_queue: Delete queue to add the original file to after conversion is done :return str: Filename of the new file in local storage """ file = os.path.splitext(file_name) if file[1] == '.mp3': log.info(f"{file_name} is already a MP3 file, no conversion needed.") return file_name new_file_name = file[0] + '.mp3' ff = FFmpeg( inputs={file_name: None}, outputs={new_file_name: None} ) log.info(f"Conversion for {file_name} has started") start_time = time() try: ff.run(stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except FFRuntimeError: os.remove(new_file_name) ff.run(stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) end_time = time() log.info(f"Conversion for {file_name} has finished in {end_time - start_time} seconds") delete_queue.put(file_name) return new_file_name
python
{ "resource": "" }
q276866
GitReleaseChecks.reasonable_desired_version
test
def reasonable_desired_version(self, desired_version, allow_equal=False, allow_patch_skip=False): """ Determine whether the desired version is a reasonable next version. Parameters ---------- desired_version: str the proposed next version name """ try: desired_version = desired_version.base_version except: pass (new_major, new_minor, new_patch) = \ map(int, desired_version.split('.')) tag_versions = self._versions_from_tags() if not tag_versions: # no tags yet, and legal version is legal! return "" max_version = max(self._versions_from_tags()).base_version (old_major, old_minor, old_patch) = \ map(int, str(max_version).split('.')) update_str = str(max_version) + " -> " + str(desired_version) v_desired = vers.Version(desired_version) v_max = vers.Version(max_version) if allow_equal and v_desired == v_max: return "" if v_desired < v_max: return ("Bad update: New version doesn't increase on last tag: " + update_str + "\n") bad_update = skipped_version((old_major, old_minor, old_patch), (new_major, new_minor, new_patch), allow_patch_skip) msg = "" if bad_update: msg = ("Bad update: Did you skip a version from " + update_str + "?\n") return msg
python
{ "resource": "" }
q276867
handle_ssl_redirect
test
def handle_ssl_redirect(): """ Check if a route needs ssl, and redirect it if not. Also redirects back to http for non-ssl routes. Static routes are served as both http and https :return: A response to be returned or None """ if request.endpoint and request.endpoint not in ['static', 'filemanager.static']: needs_ssl = False ssl_enabled = False view_function = current_app.view_functions[request.endpoint] if request.endpoint.startswith('admin.') or \ (hasattr(view_function, 'ssl_required') and view_function.ssl_required): needs_ssl = True ssl_enabled = True if hasattr(view_function, 'ssl_allowed') and view_function.ssl_allowed: ssl_enabled = True if (hasattr(view_function, 'ssl_disabled') and view_function.ssl_disabled): needs_ssl = False ssl_enabled = False if current_app.config['SSL_ENABLED']: if needs_ssl and not request.is_secure: log.debug('Redirecting to https: %s' % request.endpoint) return redirect(request.url.replace("http://", "https://")) elif not ssl_enabled and request.is_secure: log.debug('Redirecting to http: %s' % request.endpoint) return redirect(request.url.replace("https://", "http://")) elif needs_ssl: log.info('Not redirecting to HTTPS for endpoint %s as SSL_ENABLED is set to False' % request.endpoint)
python
{ "resource": "" }
q276868
init_celery
test
def init_celery(app, celery): """ Initialise Celery and set up logging :param app: Flask app :param celery: Celery instance """ celery.conf.update(app.config) TaskBase = celery.Task class ContextTask(TaskBase): abstract = True def __call__(self, *args, **kwargs): with app.app_context(): return TaskBase.__call__(self, *args, **kwargs) celery.Task = ContextTask return celery
python
{ "resource": "" }
q276869
queue_email
test
def queue_email(to_addresses, from_address, subject, body, commit=True, html=True, session=None): """ Add a mail to the queue to be sent. WARNING: Commits by default! :param to_addresses: The names and addresses to send the email to, i.e. "Steve<[email protected]>, [email protected]" :param from_address: Who the email is from i.e. "Stephen Brown <[email protected]>" :param subject: The email subject :param body: The html / text body of the email :param commit: Whether to commit to the database :param html: Is this a html email? :param session: The sqlalchemy session or None to use db.session """ from models import QueuedEmail if session is None: session = _db.session log.info('Queuing mail to %s: %s' % (to_addresses, subject)) queued_email = QueuedEmail(html, to_addresses, from_address, subject, body, STATUS_QUEUED) session.add(queued_email) session.commit() return queued_email
python
{ "resource": "" }
q276870
parse_accept
test
def parse_accept(header_value): """Parse an HTTP accept-like header. :param str header_value: the header value to parse :return: a :class:`list` of :class:`.ContentType` instances in decreasing quality order. Each instance is augmented with the associated quality as a ``float`` property named ``quality``. ``Accept`` is a class of headers that contain a list of values and an associated preference value. The ever present `Accept`_ header is a perfect example. It is a list of content types and an optional parameter named ``q`` that indicates the relative weight of a particular type. The most basic example is:: Accept: audio/*;q=0.2, audio/basic Which states that I prefer the ``audio/basic`` content type but will accept other ``audio`` sub-types with an 80% mark down. .. _Accept: http://tools.ietf.org/html/rfc7231#section-5.3.2 """ next_explicit_q = decimal.ExtendedContext.next_plus(decimal.Decimal('5.0')) headers = [parse_content_type(header) for header in parse_list(header_value)] for header in headers: q = header.parameters.pop('q', None) if q is None: q = '1.0' elif float(q) == 1.0: q = float(next_explicit_q) next_explicit_q = next_explicit_q.next_minus() header.quality = float(q) def ordering(left, right): """ Method for sorting the header values :param mixed left: :param mixed right: :rtype: mixed """ if left.quality != right.quality: return right.quality - left.quality if left == right: return 0 if left > right: return -1 return 1 return sorted(headers, key=functools.cmp_to_key(ordering))
python
{ "resource": "" }
q276871
parse_cache_control
test
def parse_cache_control(header_value): """ Parse a `Cache-Control`_ header, returning a dictionary of key-value pairs. Any of the ``Cache-Control`` parameters that do not have directives, such as ``public`` or ``no-cache`` will be returned with a value of ``True`` if they are set in the header. :param str header_value: ``Cache-Control`` header value to parse :return: the parsed ``Cache-Control`` header values :rtype: dict .. _Cache-Control: https://tools.ietf.org/html/rfc7234#section-5.2 """ directives = {} for segment in parse_list(header_value): name, sep, value = segment.partition('=') if sep != '=': directives[name] = None elif sep and value: value = _dequote(value.strip()) try: directives[name] = int(value) except ValueError: directives[name] = value # NB ``name='' is never valid and is ignored! # convert parameterless boolean directives for name in _CACHE_CONTROL_BOOL_DIRECTIVES: if directives.get(name, '') is None: directives[name] = True return directives
python
{ "resource": "" }
q276872
parse_content_type
test
def parse_content_type(content_type, normalize_parameter_values=True): """Parse a content type like header. :param str content_type: the string to parse as a content type :param bool normalize_parameter_values: setting this to ``False`` will enable strict RFC2045 compliance in which content parameter values are case preserving. :return: a :class:`~ietfparse.datastructures.ContentType` instance """ parts = _remove_comments(content_type).split(';') content_type, content_subtype = parts.pop(0).split('/') if '+' in content_subtype: content_subtype, content_suffix = content_subtype.split('+') else: content_suffix = None parameters = _parse_parameter_list( parts, normalize_parameter_values=normalize_parameter_values) return datastructures.ContentType(content_type, content_subtype, dict(parameters), content_suffix)
python
{ "resource": "" }
q276873
parse_forwarded
test
def parse_forwarded(header_value, only_standard_parameters=False): """ Parse RFC7239 Forwarded header. :param str header_value: value to parse :keyword bool only_standard_parameters: if this keyword is specified and given a *truthy* value, then a non-standard parameter name will result in :exc:`~ietfparse.errors.StrictHeaderParsingFailure` :return: an ordered :class:`list` of :class:`dict` instances :raises: :exc:`ietfparse.errors.StrictHeaderParsingFailure` is raised if `only_standard_parameters` is enabled and a non-standard parameter name is encountered This function parses a :rfc:`7239` HTTP header into a :class:`list` of :class:`dict` instances with each instance containing the param values. The list is ordered as received from left to right and the parameter names are folded to lower case strings. """ result = [] for entry in parse_list(header_value): param_tuples = _parse_parameter_list(entry.split(';'), normalize_parameter_names=True, normalize_parameter_values=False) if only_standard_parameters: for name, _ in param_tuples: if name not in ('for', 'proto', 'by', 'host'): raise errors.StrictHeaderParsingFailure('Forwarded', header_value) result.append(dict(param_tuples)) return result
python
{ "resource": "" }
q276874
parse_list
test
def parse_list(value): """ Parse a comma-separated list header. :param str value: header value to split into elements :return: list of header elements as strings """ segments = _QUOTED_SEGMENT_RE.findall(value) for segment in segments: left, match, right = value.partition(segment) value = ''.join([left, match.replace(',', '\000'), right]) return [_dequote(x.strip()).replace('\000', ',') for x in value.split(',')]
python
{ "resource": "" }
q276875
_parse_parameter_list
test
def _parse_parameter_list(parameter_list, normalized_parameter_values=_DEF_PARAM_VALUE, normalize_parameter_names=False, normalize_parameter_values=True): """ Parse a named parameter list in the "common" format. :param parameter_list: sequence of string values to parse :keyword bool normalize_parameter_names: if specified and *truthy* then parameter names will be case-folded to lower case :keyword bool normalize_parameter_values: if omitted or specified as *truthy*, then parameter values are case-folded to lower case :keyword bool normalized_parameter_values: alternate way to spell ``normalize_parameter_values`` -- this one is deprecated :return: a sequence containing the name to value pairs The parsed values are normalized according to the keyword parameters and returned as :class:`tuple` of name to value pairs preserving the ordering from `parameter_list`. The values will have quotes removed if they were present. """ if normalized_parameter_values is not _DEF_PARAM_VALUE: # pragma: no cover warnings.warn('normalized_parameter_values keyword to ' '_parse_parameter_list is deprecated, use ' 'normalize_parameter_values instead', DeprecationWarning) normalize_parameter_values = normalized_parameter_values parameters = [] for param in parameter_list: param = param.strip() if param: name, value = param.split('=') if normalize_parameter_names: name = name.lower() if normalize_parameter_values: value = value.lower() parameters.append((name, _dequote(value.strip()))) return parameters
python
{ "resource": "" }
q276876
resize_image_to_fit_width
test
def resize_image_to_fit_width(image, dest_w): """ Resize and image to fit the passed in width, keeping the aspect ratio the same :param image: PIL.Image :param dest_w: The desired width """ scale_factor = dest_w / image.size[0] dest_h = image.size[1] * scale_factor scaled_image = image.resize((int(dest_w), int(dest_h)), PIL.Image.ANTIALIAS) return scaled_image
python
{ "resource": "" }
q276877
ParameterParser.add_value
test
def add_value(self, name, value): """ Add a new value to the list. :param str name: name of the value that is being parsed :param str value: value that is being parsed :raises ietfparse.errors.MalformedLinkValue: if *strict mode* is enabled and a validation error is detected This method implements most of the validation mentioned in sections 5.3 and 5.4 of :rfc:`5988`. The ``_rfc_values`` dictionary contains the appropriate values for the attributes that get special handling. If *strict mode* is enabled, then only values that are acceptable will be added to ``_values``. """ try: if self._rfc_values[name] is None: self._rfc_values[name] = value elif self.strict: if name in ('media', 'type'): raise errors.MalformedLinkValue( 'More than one {} parameter present'.format(name)) return except KeyError: pass if self.strict and name in ('title', 'title*'): return self._values.append((name, value))
python
{ "resource": "" }
q276878
Youtube.download
test
def download(self, url): """ Downloads a MP4 or WebM file that is associated with the video at the URL passed. :param str url: URL of the video to be downloaded :return str: Filename of the file in local storage """ try: yt = YouTube(url) except RegexMatchError: log.error(f"Cannot download file at {url}") else: stream = yt.streams.first() log.info(f"Download for {stream.default_filename} has started") start_time = time() stream.download() end_time = time() log.info(f"Download for {stream.default_filename} has finished in {end_time - start_time} seconds") return stream.default_filename
python
{ "resource": "" }
q276879
GoogleDrive.connect
test
def connect(self): """Creates connection to the Google Drive API, sets the connection attribute to make requests, and creates the Music folder if it doesn't exist.""" SCOPES = 'https://www.googleapis.com/auth/drive' store = file.Storage('drive_credentials.json') creds = store.get() if not creds or creds.invalid: try: flow = client.flow_from_clientsecrets('client_secret.json', SCOPES) except InvalidClientSecretsError: log.error('ERROR: Could not find client_secret.json in current directory, please obtain it from the API console.') return creds = tools.run_flow(flow, store) self.connection = build('drive', 'v3', http=creds.authorize(Http())) response = self.connection.files().list(q="name='Music' and mimeType='application/vnd.google-apps.folder' and trashed=false").execute() try: folder_id = response.get('files', [])[0]['id'] except IndexError: log.warning('Music folder is missing. Creating it.') folder_metadata = {'name': 'Music', 'mimeType': 'application/vnd.google-apps.folder'} folder = self.connection.files().create(body=folder_metadata, fields='id').execute()
python
{ "resource": "" }
q276880
GoogleDrive.upload
test
def upload(self, file_name): """ Uploads the file associated with the file_name passed to Google Drive in the Music folder. :param str file_name: Filename of the file to be uploaded :return str: Original filename passed as an argument (in order for the worker to send it to the delete queue) """ response = self.connection.files().list(q="name='Music' and mimeType='application/vnd.google-apps.folder' and trashed=false").execute() folder_id = response.get('files', [])[0]['id'] file_metadata = {'name': file_name, 'parents': [folder_id]} media = MediaFileUpload(file_name, mimetype='audio/mpeg') log.info(f"Upload for {file_name} has started") start_time = time() self.connection.files().create(body=file_metadata, media_body=media, fields='id').execute() end_time = time() log.info(f"Upload for {file_name} has finished in {end_time - start_time} seconds") return file_name
python
{ "resource": "" }
q276881
LocalStorage.connect
test
def connect(self): """Initializes the connection attribute with the path to the user home folder's Music folder, and creates it if it doesn't exist.""" if self.music_folder is None: music_folder = os.path.join(os.path.expanduser('~'), 'Music') if not os.path.exists(music_folder): os.makedirs(music_folder) self.music_folder = music_folder
python
{ "resource": "" }
q276882
RunParameters.write_sky_params_to_file
test
def write_sky_params_to_file(self): """Writes the params to file that skytool_Free needs to generate the sky radiance distribution.""" inp_file = self.sky_file + '_params.txt' lg.info('Writing Inputs to file : ' + inp_file) f = open(inp_file, 'w') f.write('verbose= ' + str(self.verbose) + '\n') f.write('band_count= ' + str(self.num_bands) + '\n') f.write('band_centres_data= ') f.write(",".join([str(wave) for wave in self.wavelengths]) + '\n') f.write('partition= ' + self.partition + '\n') f.write('vn= ' + str(self.vn) + '\n') f.write('hn= ' + str(self.hn) + '\n') f.write('rdif= ' + str(self.sky_r_dif) + '\n') f.write('theta_points= ') f.write(",".join([str(theta) for theta in self.theta_points]) + '\n') f.write('type= ' + self.sky_type + '\n') f.write('azimuth= ' + str(self.sky_azimuth) + '\n') f.write('zenith= ' + str(self.sky_zenith) + '\n') f.write('sky_save_fp= ' + inp_file.strip('_params.txt') + '\n') f.write('sky_image_save_fp= ' + self.sky_file + '.ppm' + '\n') f.write('sky_image_size= 256' + '\n') if self.sky_type == 'hlideal': f.write('C= ' + str(self.sky_c) + '\n') f.write('rdif= ' + str(self.sky_r_dif) + '\n') f.flush() f.close()
python
{ "resource": "" }
q276883
RunParameters.update_filenames
test
def update_filenames(self): """Does nothing currently. May not need this method""" self.sky_file = os.path.abspath(os.path.join(os.path.join(self.input_path, 'sky_files'), 'sky_' + self.sky_state + '_z' + str( self.sky_zenith) + '_a' + str( self.sky_azimuth) + '_' + str( self.num_bands) + '_' + self.ds_code))
python
{ "resource": "" }
q276884
BioOpticalParameters.read_aphi_from_file
test
def read_aphi_from_file(self, file_name): """Read the phytoplankton absorption file from a csv formatted file :param file_name: filename and path of the csv file """ lg.info('Reading ahpi absorption') try: self.a_phi = self._read_iop_from_file(file_name) except: lg.exception('Problem reading file :: ' + file_name) self.a_phi = -1
python
{ "resource": "" }
q276885
BioOpticalParameters.scale_aphi
test
def scale_aphi(self, scale_parameter): """Scale the spectra by multiplying by linear scaling factor :param scale_parameter: Linear scaling factor """ lg.info('Scaling a_phi by :: ' + str(scale_parameter)) try: self.a_phi = self.a_phi * scale_parameter except: lg.exception("Can't scale a_phi, check that it has been defined ")
python
{ "resource": "" }
q276886
BioOpticalParameters.read_pure_water_absorption_from_file
test
def read_pure_water_absorption_from_file(self, file_name): """Read the pure water absorption from a csv formatted file :param file_name: filename and path of the csv file """ lg.info('Reading water absorption from file') try: self.a_water = self._read_iop_from_file(file_name) except: lg.exception('Problem reading file :: ' + file_name)
python
{ "resource": "" }
q276887
BioOpticalParameters.read_pure_water_scattering_from_file
test
def read_pure_water_scattering_from_file(self, file_name): """Read the pure water scattering from a csv formatted file :param file_name: filename and path of the csv file """ lg.info('Reading water scattering from file') try: self.b_water = self._read_iop_from_file(file_name) except: lg.exception('Problem reading file :: ' + file_name)
python
{ "resource": "" }
q276888
BioOpticalParameters._read_iop_from_file
test
def _read_iop_from_file(self, file_name): """ Generic IOP reader that interpolates the iop to the common wavelengths defined in the constructor :param file_name: filename and path of the csv file :returns interpolated iop """ lg.info('Reading :: ' + file_name + ' :: and interpolating to ' + str(self.wavelengths)) if os.path.isfile(file_name): iop_reader = csv.reader(open(file_name), delimiter=',', quotechar='"') wave = iop_reader.next() iop = iop_reader.next() else: lg.exception('Problem reading file :: ' + file_name) raise IOError try: wave = map(float, wave) iop = map(float, iop) return scipy.interp(self.wavelengths, wave, iop) except IOError: lg.exception('Error interpolating IOP to common wavelength') return -1
python
{ "resource": "" }
q276889
BioOpticalParameters._write_iop_to_file
test
def _write_iop_to_file(self, iop, file_name): """Generic iop file writer :param iop numpy array to write to file :param file_name the file and path to write the IOP to """ lg.info('Writing :: ' + file_name) f = open(file_name, 'w') for i in scipy.nditer(iop): f.write(str(i) + '\n')
python
{ "resource": "" }
q276890
BioOpticalParameters.build_b
test
def build_b(self, scattering_fraction=0.01833): """Calculates the total scattering from back-scattering :param scattering_fraction: the fraction of back-scattering to total scattering default = 0.01833 b = ( bb[sea water] + bb[p] ) /0.01833 """ lg.info('Building b with scattering fraction of :: ' + str(scattering_fraction)) self.b = (self.b_b + self.b_water / 2.0) / scattering_fraction
python
{ "resource": "" }
q276891
BioOpticalParameters.build_a
test
def build_a(self): """Calculates the total absorption from water, phytoplankton and CDOM a = awater + acdom + aphi """ lg.info('Building total absorption') self.a = self.a_water + self.a_cdom + self.a_phi
python
{ "resource": "" }
q276892
BioOpticalParameters.build_c
test
def build_c(self): """Calculates the total attenuation from the total absorption and total scattering c = a + b """ lg.info('Building total attenuation C') self.c = self.a + self.b
python
{ "resource": "" }
q276893
BioOpticalParameters.build_all_iop
test
def build_all_iop(self): """Meta method that calls all of the build methods in the correct order self.build_a() self.build_bb() self.build_b() self.build_c() """ lg.info('Building all b and c from IOPs') self.build_a() self.build_bb() self.build_b() self.build_c()
python
{ "resource": "" }
q276894
BatchRun.batch_parameters
test
def batch_parameters(self, saa, sza, p, x, y, g, s, z): """Takes lists for parameters and saves them as class properties :param saa: <list> Sun Azimuth Angle (deg) :param sza: <list> Sun Zenith Angle (deg) :param p: <list> Phytoplankton linear scalling factor :param x: <list> Scattering scaling factor :param y: <list> Scattering slope factor :param g: <list> CDOM absorption scaling factor :param s: <list> CDOM absorption slope factor :param z: <list> depth (m)""" self.saa_list = saa self.sza_list = sza self.p_list = p self.x_list = x self.y_list = y self.g_list = g self.s_list = s self.z_list = z
python
{ "resource": "" }
q276895
FileTools.read_param_file_to_dict
test
def read_param_file_to_dict(file_name): """Loads a text file to a python dictionary using '=' as the delimiter :param file_name: the name and path of the text file """ data = loadtxt(file_name, delimiter='=', dtype=scipy.string0) data_dict = dict(data) for key in data_dict.keys(): data_dict[key] = data_dict[key].strip() data_dict[key.strip()] = data_dict[key] del data_dict[key] return data_dict
python
{ "resource": "" }
q276896
HelperMethods.string_to_float_list
test
def string_to_float_list(string_var): """Pull comma separated string values out of a text file and converts them to float list""" try: return [float(s) for s in string_var.strip('[').strip(']').split(', ')] except: return [float(s) for s in string_var.strip('[').strip(']').split(',')]
python
{ "resource": "" }
q276897
ReportTools.read_pr_report
test
def read_pr_report(self, filename): """Reads in a PlanarRad generated report Saves the single line reported parameters as a python dictionary :param filename: The name and path of the PlanarRad generated file :returns self.data_dictionary: python dictionary with the key and values from the report """ done = False f = open(filename) while f: #for line in open(filename): line = f.readline() if not line: done = True break if "# Quad solid angle mean point theta table (rows are horizontal, columns are vertical):" in line.strip(): # read in the bunch of lines. tmp = [] for i_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2): tmp.append(f.readline()) self.data_dictionary['Quad_solid_angle_mean_point_theta'] = tmp elif '#' not in line or not line.strip(): element = line.split(',') self.data_dictionary[element[0]] = element[1:] if "# Quad solid angle mean point phi table (rows are horizontal, columns are vertical):" in line.strip(): # read in the bunch of lines. tmp = [] for i_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2): tmp.append(f.readline()) self.data_dictionary['Quad_solid_angle_mean_point_phi'] = tmp elif '#' not in line or not line.strip(): element = line.split(',') self.data_dictionary[element[0]] = element[1:] if "L_w band" in line.strip(): for i_iter in range(0, int(self.data_dictionary['band_count'][1])): tmp = [] for j_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2): tmp.append(f.readline()) self.data_dictionary['L_w_band_' + str(i_iter + 1)] = tmp f.readline() f.readline() # skip the next 2 lines if "L_it band" in line.strip(): for i_iter in range(0, int(self.data_dictionary['band_count'][1])): tmp = [] for j_iter in range(0, len(self.data_dictionary['theta_points_deg']) - 2): tmp.append(f.readline()) self.data_dictionary['L_it_band_' + str(i_iter + 1)] = tmp f.readline() f.readline() # skip the next 2 lines return self.data_dictionary
python
{ "resource": "" }
q276898
SignalHandler.set_handler
test
def set_handler(self, signals, handler=signal.SIG_DFL): """ Takes a list of signals and sets a handler for them """ for sig in signals: self.log.debug("Creating handler for signal: {0}".format(sig)) signal.signal(sig, handler)
python
{ "resource": "" }
q276899
SignalHandler.pseudo_handler
test
def pseudo_handler(self, signum, frame): """ Pseudo handler placeholder while signal is beind processed """ self.log.warn("Received sigal {0} but system is already busy processing a previous signal, current frame: {1}".format(signum, str(frame)))
python
{ "resource": "" }