text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Read the line content and return the converted value <END_TASK> <USER_TASK:> Description: def convert(self, line=None, is_end=True): """Read the line content and return the converted value :param line: the line to feed to converter :param is_end: if set to True, will raise an error if the line has something remaining. """
if line is not None: self.line = line if not self.line: raise TomlDecodeError(self.parser.lineno, 'EOF is hit!') token = None self.line = self.line.lstrip() for key, pattern in self.patterns: m = pattern.match(self.line) if m: self.line = self.line[m.end():] handler = getattr(self, 'convert_%s' % key) token = handler(m) break else: raise TomlDecodeError(self.parser.lineno, 'Parsing error: %r' % self.line) if is_end and not BLANK_RE.match(self.line): raise TomlDecodeError(self.parser.lineno, 'Something is remained: %r' % self.line) return token
<SYSTEM_TASK:> Parse the lines from index i <END_TASK> <USER_TASK:> Description: def parse(self, data=None, table_name=None): """Parse the lines from index i :param data: optional, store the parsed result to it when specified :param table_name: when inside a table array, it is the table name """
temp = self.dict_() sub_table = None is_array = False line = '' while True: line = self._readline() if not line: self._store_table(sub_table, temp, is_array, data=data) break # EOF if BLANK_RE.match(line): continue if TABLE_RE.match(line): next_table = self.split_string( TABLE_RE.match(line).group(1), '.', False) if table_name and not contains_list(next_table, table_name): self._store_table(sub_table, temp, is_array, data=data) break table = cut_list(next_table, table_name) if sub_table == table: raise TomlDecodeError(self.lineno, 'Duplicate table name' 'in origin: %r' % sub_table) else: # different table name self._store_table(sub_table, temp, is_array, data=data) sub_table = table is_array = False elif TABLE_ARRAY_RE.match(line): next_table = self.split_string( TABLE_ARRAY_RE.match(line).group(1), '.', False) if table_name and not contains_list(next_table, table_name): # Out of current loop # write current data dict to table dict self._store_table(sub_table, temp, is_array, data=data) break table = cut_list(next_table, table_name) if sub_table == table and not is_array: raise TomlDecodeError(self.lineno, 'Duplicate name of ' 'table and array of table: %r' % sub_table) else: # Begin a nested loop # Write any temp data to table dict self._store_table(sub_table, temp, is_array, data=data) sub_table = table is_array = True self.parse(temp, next_table) elif KEY_RE.match(line): m = KEY_RE.match(line) keys = self.split_string(m.group(1), '.') value = self.converter.convert(line[m.end():]) if value is None: raise TomlDecodeError(self.lineno, 'Value is missing') self._store_table(keys[:-1], {keys[-1]: value}, data=temp) else: raise TomlDecodeError(self.lineno, 'Pattern is not recognized: %r' % line) # Rollback to the last line for next parse # This will do nothing if EOF is hit self.instream.seek(self.instream.tell() - len(line)) self.lineno -= 1
<SYSTEM_TASK:> Return a boolean if the page is visible in navigation. <END_TASK> <USER_TASK:> Description: def is_visible(self): """ Return a boolean if the page is visible in navigation. Pages must have show in navigation set. Regular pages must be published (published and have a current version - checked with `is_published`), pages with a glitter app associated don't need any page versions. """
if self.glitter_app_name: visible = self.show_in_navigation else: visible = self.show_in_navigation and self.is_published return visible
<SYSTEM_TASK:> Add this method because django doesn't validate correctly because required fields are <END_TASK> <USER_TASK:> Description: def validate_unique(self): """ Add this method because django doesn't validate correctly because required fields are excluded. """
unique_checks, date_checks = self.instance._get_unique_checks(exclude=[]) errors = self.instance._perform_unique_checks(unique_checks) if errors: self.add_error(None, errors)
<SYSTEM_TASK:> Create Image from raw dictionary data. <END_TASK> <USER_TASK:> Description: def from_dict(raw_data): """Create Image from raw dictionary data."""
url = None width = None height = None try: url = raw_data['url'] width = raw_data['width'] height = raw_data['height'] except KeyError: raise ValueError('Unexpected image json structure') except TypeError: # Happens when raw_data is None, i.e. when a term has no image: pass return Image(url, width, height)
<SYSTEM_TASK:> Return set of common words between two word sets. <END_TASK> <USER_TASK:> Description: def has_common(self, other): """Return set of common words between two word sets."""
if not isinstance(other, WordSet): raise ValueError('Can compare only WordSets') return self.term_set & other.term_set
<SYSTEM_TASK:> Create a new release in github <END_TASK> <USER_TASK:> Description: def release(ctx, yes, latest): """Create a new release in github """
m = RepoManager(ctx.obj['agile']) api = m.github_repo() if latest: latest = api.releases.latest() if latest: click.echo(latest['tag_name']) elif m.can_release('sandbox'): branch = m.info['branch'] version = m.validate_version() name = 'v%s' % version body = ['Release %s from agiletoolkit' % name] data = dict( tag_name=name, target_commitish=branch, name=name, body='\n\n'.join(body), draft=False, prerelease=False ) if yes: data = api.releases.create(data=data) m.message('Successfully created a new Github release') click.echo(niceJson(data)) else: click.echo('skipped')
<SYSTEM_TASK:> Create XenaManager object. <END_TASK> <USER_TASK:> Description: def init_xena(api, logger, owner, ip=None, port=57911): """ Create XenaManager object. :param api: cli/rest :param logger: python logger :param owner: owner of the scripting session :param ip: rest server IP :param port: rest server TCP port :return: Xena object :rtype: XenaApp """
if api == ApiType.socket: api_wrapper = XenaCliWrapper(logger) elif api == ApiType.rest: api_wrapper = XenaRestWrapper(logger, ip, port) return XenaApp(logger, owner, api_wrapper)
<SYSTEM_TASK:> Add chassis. <END_TASK> <USER_TASK:> Description: def add_chassis(self, chassis, port=22611, password='xena'): """ Add chassis. XenaManager-2G -> Add Chassis. :param chassis: chassis IP address :param port: chassis port number :param password: chassis password :return: newly created chassis :rtype: xenamanager.xena_app.XenaChassis """
if chassis not in self.chassis_list: try: XenaChassis(self, chassis, port, password) except Exception as error: self.objects.pop('{}/{}'.format(self.owner, chassis)) raise error return self.chassis_list[chassis]
<SYSTEM_TASK:> Stop traffic on list of ports. <END_TASK> <USER_TASK:> Description: def stop_traffic(self, *ports): """ Stop traffic on list of ports. :param ports: list of ports to stop traffic on. Default - all session ports. """
for chassis, chassis_ports in self._per_chassis_ports(*self._get_operation_ports(*ports)).items(): chassis.stop_traffic(*chassis_ports)
<SYSTEM_TASK:> Decorator to registering you Admin class. <END_TASK> <USER_TASK:> Description: def register(model, admin=None, category=None): """ Decorator to registering you Admin class. """
def _model_admin_wrapper(admin_class): site.register(model, admin_class=admin_class) if category: site.register_block(model, category) return admin_class return _model_admin_wrapper
<SYSTEM_TASK:> Determine the HttpResponse for the change_view stage. <END_TASK> <USER_TASK:> Description: def response_change(self, request, obj): """Determine the HttpResponse for the change_view stage."""
opts = self.opts.app_label, self.opts.model_name pk_value = obj._get_pk_val() if '_continue' in request.POST: msg = _( 'The %(name)s block was changed successfully. You may edit it again below.' ) % {'name': force_text(self.opts.verbose_name)} self.message_user(request, msg, messages.SUCCESS) # We redirect to the save and continue page, which updates the # parent window in javascript and redirects back to the edit page # in javascript. return HttpResponseRedirect(reverse( 'admin:%s_%s_continue' % opts, args=(pk_value,), current_app=self.admin_site.name )) # Update column and close popup - don't bother with a message as they won't see it return self.response_rerender(request, obj, 'admin/glitter/update_column.html')
<SYSTEM_TASK:> A field could be found for this term, try to get filter string for it. <END_TASK> <USER_TASK:> Description: def get_filter_item(name: str, operation: bytes, value: bytes) -> bytes: """ A field could be found for this term, try to get filter string for it. """
assert isinstance(name, str) assert isinstance(value, bytes) if operation is None: return filter_format(b"(%s=%s)", [name, value]) elif operation == "contains": assert value != "" return filter_format(b"(%s=*%s*)", [name, value]) else: raise ValueError("Unknown search operation %s" % operation)
<SYSTEM_TASK:> Translate the Q tree into a filter string to search for, or None <END_TASK> <USER_TASK:> Description: def get_filter(q: tldap.Q, fields: Dict[str, tldap.fields.Field], pk: str): """ Translate the Q tree into a filter string to search for, or None if no results possible. """
# check the details are valid if q.negated and len(q.children) == 1: op = b"!" elif q.connector == tldap.Q.AND: op = b"&" elif q.connector == tldap.Q.OR: op = b"|" else: raise ValueError("Invalid value of op found") # scan through every child search = [] for child in q.children: # if this child is a node, then descend into it if isinstance(child, tldap.Q): search.append(get_filter(child, fields, pk)) else: # otherwise get the values in this node name, value = child # split the name if possible name, _, operation = name.rpartition("__") if name == "": name, operation = operation, None # replace pk with the real attribute if name == "pk": name = pk # DN is a special case if name == "dn": dn_name = "entryDN:" if isinstance(value, list): s = [] for v in value: assert isinstance(v, str) v = v.encode('utf_8') s.append(get_filter_item(dn_name, operation, v)) search.append("(&".join(search) + ")") # or process just the single value else: assert isinstance(value, str) v = value.encode('utf_8') search.append(get_filter_item(dn_name, operation, v)) continue # try to find field associated with name field = fields[name] if isinstance(value, list) and len(value) == 1: value = value[0] assert isinstance(value, str) # process as list if isinstance(value, list): s = [] for v in value: v = field.value_to_filter(v) s.append(get_filter_item(name, operation, v)) search.append(b"(&".join(search) + b")") # or process just the single value else: value = field.value_to_filter(value) search.append(get_filter_item(name, operation, value)) # output the results if len(search) == 1 and not q.negated: # just one non-negative term, return it return search[0] else: # multiple terms return b"(" + op + b"".join(search) + b")"
<SYSTEM_TASK:> r"""The name of the script, callable from the command line. <END_TASK> <USER_TASK:> Description: def program_name(self): r"""The name of the script, callable from the command line. """
name = "-".join( word.lower() for word in uqbar.strings.delimit_words(type(self).__name__) ) return name
<SYSTEM_TASK:> Function does nothing - is just ``pass`` or docstring. <END_TASK> <USER_TASK:> Description: def function_is_noop(function_node: ast.FunctionDef) -> bool: """ Function does nothing - is just ``pass`` or docstring. """
return all(node_is_noop(n) for n in function_node.body)
<SYSTEM_TASK:> Adds "parent" attribute to all child nodes of passed node. <END_TASK> <USER_TASK:> Description: def add_node_parents(root: ast.AST) -> None: """ Adds "parent" attribute to all child nodes of passed node. Code taken from https://stackoverflow.com/a/43311383/1286705 """
for node in ast.walk(root): for child in ast.iter_child_nodes(node): child.parent = node
<SYSTEM_TASK:> Generates a list of lines that the passed node covers, relative to the <END_TASK> <USER_TASK:> Description: def build_footprint(node: ast.AST, first_line_no: int) -> Set[int]: """ Generates a list of lines that the passed node covers, relative to the marked lines list - i.e. start of function is line 0. """
return set( range( get_first_token(node).start[0] - first_line_no, get_last_token(node).end[0] - first_line_no + 1, ) )
<SYSTEM_TASK:> Finds all nodes that are before the ``max_line_number`` and are not <END_TASK> <USER_TASK:> Description: def filter_arrange_nodes(nodes: List[ast.stmt], max_line_number: int) -> List[ast.stmt]: """ Finds all nodes that are before the ``max_line_number`` and are not docstrings or ``pass``. """
return [ node for node in nodes if node.lineno < max_line_number and not isinstance(node, ast.Pass) and not (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)) ]
<SYSTEM_TASK:> Finds all nodes that are after the ``min_line_number`` <END_TASK> <USER_TASK:> Description: def filter_assert_nodes(nodes: List[ast.stmt], min_line_number: int) -> List[ast.stmt]: """ Finds all nodes that are after the ``min_line_number`` """
return [node for node in nodes if node.lineno > min_line_number]
<SYSTEM_TASK:> Finds all lines that contain a string in a tree, usually a function. These <END_TASK> <USER_TASK:> Description: def find_stringy_lines(tree: ast.AST, first_line_no: int) -> Set[int]: """ Finds all lines that contain a string in a tree, usually a function. These lines will be ignored when searching for blank lines. """
str_footprints = set() for node in ast.walk(tree): if isinstance(node, ast.Str): str_footprints.update(build_footprint(node, first_line_no)) return str_footprints
<SYSTEM_TASK:> Run everything required for checking this function. <END_TASK> <USER_TASK:> Description: def check_all(self) -> Generator[AAAError, None, None]: """ Run everything required for checking this function. Returns: A generator of errors. Raises: ValidationError: A non-recoverable linting error is found. """
# Function def if function_is_noop(self.node): return self.mark_bl() self.mark_def() # ACT # Load act block and kick out when none is found self.act_node = self.load_act_node() self.act_block = Block.build_act(self.act_node.node, self.node) act_block_first_line_no, act_block_last_line_no = self.act_block.get_span(0) # ARRANGE self.arrange_block = Block.build_arrange(self.node.body, act_block_first_line_no) # ASSERT assert self.act_node self.assert_block = Block.build_assert(self.node.body, act_block_last_line_no) # SPACING for block in ['arrange', 'act', 'assert']: self_block = getattr(self, '{}_block'.format(block)) try: span = self_block.get_span(self.first_line_no) except EmptyBlock: continue self.line_markers.update(span, self_block.line_type) yield from self.line_markers.check_arrange_act_spacing() yield from self.line_markers.check_act_assert_spacing() yield from self.line_markers.check_blank_lines()
<SYSTEM_TASK:> Mark unprocessed lines that have no content and no string nodes <END_TASK> <USER_TASK:> Description: def mark_bl(self) -> int: """ Mark unprocessed lines that have no content and no string nodes covering them as blank line BL. Returns: Number of blank lines found with no stringy parent node. """
counter = 0 stringy_lines = find_stringy_lines(self.node, self.first_line_no) for relative_line_number, line in enumerate(self.lines): if relative_line_number not in stringy_lines and line.strip() == '': counter += 1 self.line_markers[relative_line_number] = LineType.blank_line return counter
<SYSTEM_TASK:> Function getParamFromEnv <END_TASK> <USER_TASK:> Description: def getParamFromEnv(self, var, default=''): """ Function getParamFromEnv Search a parameter in the host environment @param var: the var name @param hostgroup: the hostgroup item linked to this host @param default: default value @return RETURN: the value """
if self.getParam(var): return self.getParam(var) if self.hostgroup: if self.hostgroup.getParam(var): return self.hostgroup.getParam(var) if self.domain.getParam('password'): return self.domain.getParam('password') else: return default
<SYSTEM_TASK:> Function getUserData <END_TASK> <USER_TASK:> Description: def getUserData(self, hostgroup, domain, defaultPwd='', defaultSshKey='', proxyHostname='', tplFolder='metadata/templates/'): """ Function getUserData Generate a userdata script for metadata server from Foreman API @param domain: the domain item linked to this host @param hostgroup: the hostgroup item linked to this host @param defaultPwd: the default password if no password is specified in the host>hostgroup>domain params @param defaultSshKey: the default ssh key if no password is specified in the host>hostgroup>domain params @param proxyHostname: hostname of the smartproxy @param tplFolder: the templates folder @return RETURN: the user data """
if 'user-data' in self.keys(): return self['user-data'] else: self.hostgroup = hostgroup self.domain = domain if proxyHostname == '': proxyHostname = 'foreman.' + domain['name'] password = self.getParamFromEnv('password', defaultPwd) sshauthkeys = self.getParamFromEnv('global_sshkey', defaultSshKey) with open(tplFolder+'puppet.conf', 'r') as puppet_file: p = MyTemplate(puppet_file.read()) content = p.substitute(foremanHostname=proxyHostname) enc_puppet_file = base64.b64encode(bytes(content, 'utf-8')) with open(tplFolder+'cloud-init.tpl', 'r') as content_file: s = MyTemplate(content_file.read()) if sshauthkeys: sshauthkeys = ' - '+sshauthkeys self.userdata = s.substitute( password=password, fqdn=self['name'], sshauthkeys=sshauthkeys, foremanurlbuilt="http://{}/unattended/built" .format(proxyHostname), puppet_conf_content=enc_puppet_file.decode('utf-8')) return self.userdata
<SYSTEM_TASK:> Queue one or more payload for execution after its runner is started <END_TASK> <USER_TASK:> Description: def register_payload(self, *payloads, flavour: ModuleType): """Queue one or more payload for execution after its runner is started"""
for payload in payloads: self._logger.debug('registering payload %s (%s)', NameRepr(payload), NameRepr(flavour)) self.runners[flavour].register_payload(payload)
<SYSTEM_TASK:> Execute one payload after its runner is started and return its output <END_TASK> <USER_TASK:> Description: def run_payload(self, payload, *, flavour: ModuleType): """Execute one payload after its runner is started and return its output"""
return self.runners[flavour].run_payload(payload)
<SYSTEM_TASK:> Run all runners, blocking until completion or error <END_TASK> <USER_TASK:> Description: def run(self): """Run all runners, blocking until completion or error"""
self._logger.info('starting all runners') try: with self._lock: assert not self.running.set(), 'cannot re-run: %s' % self self.running.set() thread_runner = self.runners[threading] for runner in self.runners.values(): if runner is not thread_runner: thread_runner.register_payload(runner.run) if threading.current_thread() == threading.main_thread(): asyncio_main_run(root_runner=thread_runner) else: thread_runner.run() except Exception as err: self._logger.exception('runner terminated: %s', err) raise RuntimeError from err finally: self._stop_runners() self._logger.info('stopped all runners') self.running.clear()
<SYSTEM_TASK:> Hook for specifying the form Field instance for a given database Field <END_TASK> <USER_TASK:> Description: def formfield_for_dbfield(self, db_field, **kwargs): """ Hook for specifying the form Field instance for a given database Field instance. If kwargs are given, they're passed to the form Field's constructor. """
formfield = super().formfield_for_dbfield(db_field, **kwargs) if db_field.name == 'image': formfield.widget = ImageRelatedFieldWidgetWrapper( ImageSelect(), db_field.rel, self.admin_site, can_add_related=True, can_change_related=True, ) return formfield
<SYSTEM_TASK:> Compare two structures that represents JSON schemas. <END_TASK> <USER_TASK:> Description: def compare_schemas(one, two): """Compare two structures that represents JSON schemas. For comparison you can't use normal comparison, because in JSON schema lists DO NOT keep order (and Python lists do), so this must be taken into account during comparison. Note this wont check all configurations, only first one that seems to match, which can lead to wrong results. :param one: First schema to compare. :param two: Second schema to compare. :rtype: `bool` """
one = _normalize_string_type(one) two = _normalize_string_type(two) _assert_same_types(one, two) if isinstance(one, list): return _compare_lists(one, two) elif isinstance(one, dict): return _compare_dicts(one, two) elif isinstance(one, SCALAR_TYPES): return one == two elif one is None: return one is two else: raise RuntimeError('Not allowed type "{type}"'.format( type=type(one).__name__))
<SYSTEM_TASK:> Check if given regex is of type ECMA 262 or not. <END_TASK> <USER_TASK:> Description: def is_ecma_regex(regex): """Check if given regex is of type ECMA 262 or not. :rtype: bool """
parts = regex.split('/') if len(parts) == 1: return False if len(parts) < 3: raise ValueError('Given regex isn\'t ECMA regex nor Python regex.') parts.pop() parts.append('') raw_regex = '/'.join(parts) if raw_regex.startswith('/') and raw_regex.endswith('/'): return True return False
<SYSTEM_TASK:> Convert ECMA 262 regex to Python tuple with regex and flags. <END_TASK> <USER_TASK:> Description: def convert_ecma_regex_to_python(value): """Convert ECMA 262 regex to Python tuple with regex and flags. If given value is already Python regex it will be returned unchanged. :param string value: ECMA regex. :return: 2-tuple with `regex` and `flags` :rtype: namedtuple """
if not is_ecma_regex(value): return PythonRegex(value, []) parts = value.split('/') flags = parts.pop() try: result_flags = [ECMA_TO_PYTHON_FLAGS[f] for f in flags] except KeyError: raise ValueError('Wrong flags "{}".'.format(flags)) return PythonRegex('/'.join(parts[1:]), result_flags)
<SYSTEM_TASK:> Convert Python regex to ECMA 262 regex. <END_TASK> <USER_TASK:> Description: def convert_python_regex_to_ecma(value, flags=[]): """Convert Python regex to ECMA 262 regex. If given value is already ECMA regex it will be returned unchanged. :param string value: Python regex. :param list flags: List of flags (allowed flags: `re.I`, `re.M`) :return: ECMA 262 regex :rtype: str """
if is_ecma_regex(value): return value result_flags = [PYTHON_TO_ECMA_FLAGS[f] for f in flags] result_flags = ''.join(result_flags) return '/{value}/{flags}'.format(value=value, flags=result_flags)
<SYSTEM_TASK:> Get field associated with given attribute. <END_TASK> <USER_TASK:> Description: def get_field(self, field_name): """Get field associated with given attribute."""
for attr_name, field in self: if field_name == attr_name: return field raise errors.FieldNotFound('Field not found', field_name)
<SYSTEM_TASK:> Explicitly validate all the fields. <END_TASK> <USER_TASK:> Description: def validate(self): """Explicitly validate all the fields."""
for name, field in self: try: field.validate_for_object(self) except ValidationError as error: raise ValidationError( "Error for field '{name}'.".format(name=name), error, )
<SYSTEM_TASK:> Iterate over fields, but also give `structure_name`. <END_TASK> <USER_TASK:> Description: def iterate_with_name(cls): """Iterate over fields, but also give `structure_name`. Format is `(attribute_name, structue_name, field_instance)`. Structure name is name under which value is seen in structure and schema (in primitives) and only there. """
for attr_name, field in cls.iterate_over_fields(): structure_name = field.structue_name(attr_name) yield attr_name, structure_name, field
<SYSTEM_TASK:> Parse value to proper model type. <END_TASK> <USER_TASK:> Description: def parse_value(self, value): """Parse value to proper model type."""
if not isinstance(value, dict): return value embed_type = self._get_embed_type() return embed_type(**value)
<SYSTEM_TASK:> Parse string into instance of `time`. <END_TASK> <USER_TASK:> Description: def parse_value(self, value): """Parse string into instance of `time`."""
if value is None: return value if isinstance(value, datetime.time): return value return parse(value).timetz()
<SYSTEM_TASK:> Parse string into instance of `datetime`. <END_TASK> <USER_TASK:> Description: def parse_value(self, value): """Parse string into instance of `datetime`."""
if isinstance(value, datetime.datetime): return value if value: return parse(value) else: return None
<SYSTEM_TASK:> Cast instance of model to python structure. <END_TASK> <USER_TASK:> Description: def to_struct(model): """Cast instance of model to python structure. :param model: Model to be casted. :rtype: ``dict`` """
model.validate() resp = {} for _, name, field in model.iterate_with_name(): value = field.__get__(model) if value is None: continue value = field.to_struct(value) resp[name] = value return resp
<SYSTEM_TASK:> Prepare bloom for existing checks <END_TASK> <USER_TASK:> Description: def __prepare_bloom(self): """Prepare bloom for existing checks """
self.__bloom = pybloom_live.ScalableBloomFilter() columns = [getattr(self.__table.c, key) for key in self.__update_keys] keys = select(columns).execution_options(stream_results=True).execute() for key in keys: self.__bloom.add(tuple(key))
<SYSTEM_TASK:> Check if row exists in table <END_TASK> <USER_TASK:> Description: def __check_existing(self, row): """Check if row exists in table """
if self.__update_keys is not None: key = tuple(row[key] for key in self.__update_keys) if key in self.__bloom: return True self.__bloom.add(key) return False return False
<SYSTEM_TASK:> Create SQL comment from field's title and description <END_TASK> <USER_TASK:> Description: def _get_field_comment(field, separator=' - '): """ Create SQL comment from field's title and description :param field: tableschema-py Field, with optional 'title' and 'description' values :param separator: :return: >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': 'my_desc'})) 'my_title - my_desc' >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': None})) 'my_title' >>> _get_field_comment(tableschema.Field({'title': '', 'description': 'my_description'})) 'my_description' >>> _get_field_comment(tableschema.Field({})) '' """
title = field.descriptor.get('title') or '' description = field.descriptor.get('description') or '' return _get_comment(description, title, separator)
<SYSTEM_TASK:> Restore bucket from SQL <END_TASK> <USER_TASK:> Description: def restore_bucket(self, table_name): """Restore bucket from SQL """
if table_name.startswith(self.__prefix): return table_name.replace(self.__prefix, '', 1) return None
<SYSTEM_TASK:> Restore descriptor from SQL <END_TASK> <USER_TASK:> Description: def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None): """Restore descriptor from SQL """
# Fields fields = [] for column in columns: if column.name == autoincrement_column: continue field_type = self.restore_type(column.type) field = {'name': column.name, 'type': field_type} if not column.nullable: field['constraints'] = {'required': True} fields.append(field) # Primary key pk = [] for constraint in constraints: if isinstance(constraint, sa.PrimaryKeyConstraint): for column in constraint.columns: if column.name == autoincrement_column: continue pk.append(column.name) # Foreign keys fks = [] if self.__dialect == 'postgresql': for constraint in constraints: if isinstance(constraint, sa.ForeignKeyConstraint): resource = '' own_fields = [] foreign_fields = [] for element in constraint.elements: own_fields.append(element.parent.name) if element.column.table.name != table_name: resource = self.restore_bucket(element.column.table.name) foreign_fields.append(element.column.name) if len(own_fields) == len(foreign_fields) == 1: own_fields = own_fields.pop() foreign_fields = foreign_fields.pop() fks.append({ 'fields': own_fields, 'reference': {'resource': resource, 'fields': foreign_fields}, }) # Desscriptor descriptor = {} descriptor['fields'] = fields if len(pk) > 0: if len(pk) == 1: pk = pk.pop() descriptor['primaryKey'] = pk if len(fks) > 0: descriptor['foreignKeys'] = fks return descriptor
<SYSTEM_TASK:> Restore row from SQL <END_TASK> <USER_TASK:> Description: def restore_row(self, row, schema): """Restore row from SQL """
row = list(row) for index, field in enumerate(schema.fields): if self.__dialect == 'postgresql': if field.type in ['array', 'object']: continue row[index] = field.cast_value(row[index]) return row
<SYSTEM_TASK:> Restore type from SQL <END_TASK> <USER_TASK:> Description: def restore_type(self, type): """Restore type from SQL """
# All dialects mapping = { ARRAY: 'array', sa.Boolean: 'boolean', sa.Date: 'date', sa.DateTime: 'datetime', sa.Float: 'number', sa.Integer: 'integer', JSONB: 'object', JSON: 'object', sa.Numeric: 'number', sa.Text: 'string', sa.Time: 'time', sa.VARCHAR: 'string', UUID: 'string', } # Get field type field_type = None for key, value in mapping.items(): if isinstance(type, key): field_type = value # Not supported if field_type is None: message = 'Type "%s" is not supported' raise tableschema.exceptions.StorageError(message % type) return field_type
<SYSTEM_TASK:> CreateFileType <END_TASK> <USER_TASK:> Description: def open_hierarchy(self, path, relative_to_object_id, object_id, create_file_type=0): """ CreateFileType 0 - Creates no new object. 1 - Creates a notebook with the specified name at the specified location. 2 - Creates a section group with the specified name at the specified location. 3 - Creates a section with the specified name at the specified location. """
try: return(self.process.OpenHierarchy(path, relative_to_object_id, "", create_file_type)) except Exception as e: print(e) print("Could not Open Hierarchy")
<SYSTEM_TASK:> NewPageStyle <END_TASK> <USER_TASK:> Description: def create_new_page (self, section_id, new_page_style=0): """ NewPageStyle 0 - Create a Page that has Default Page Style 1 - Create a blank page with no title 2 - Createa blank page that has no title """
try: self.process.CreateNewPage(section_id, "", new_page_style) except Exception as e: print(e) print("Unable to create the page")
<SYSTEM_TASK:> PageInfo <END_TASK> <USER_TASK:> Description: def get_page_content(self, page_id, page_info=0): """ PageInfo 0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass. 1 - Returns page content with no selection markup, but with all binary data. 2 - Returns page content with selection markup, but no binary data. 3 - Returns page content with selection markup and all binary data. """
try: return(self.process.GetPageContent(page_id, "", page_info)) except Exception as e: print(e) print("Could not get Page Content")
<SYSTEM_TASK:> SpecialLocation <END_TASK> <USER_TASK:> Description: def get_special_location(self, special_location=0): """ SpecialLocation 0 - Gets the path to the Backup Folders folder location. 1 - Gets the path to the Unfiled Notes folder location. 2 - Gets the path to the Default Notebook folder location. """
try: return(self.process.GetSpecialLocation(special_location)) except Exception as e: print(e) print("Could not retreive special location")
<SYSTEM_TASK:> Determine memory specifications of the machine. <END_TASK> <USER_TASK:> Description: def memory(): """Determine memory specifications of the machine. Returns ------- mem_info : dictonary Holds the current values for the total, free and used memory of the system. """
mem_info = dict() for k, v in psutil.virtual_memory()._asdict().items(): mem_info[k] = int(v) return mem_info
<SYSTEM_TASK:> Given a two-dimensional array with a dimension of size 'N', <END_TASK> <USER_TASK:> Description: def get_chunk_size(N, n): """Given a two-dimensional array with a dimension of size 'N', determine the number of rows or columns that can fit into memory. Parameters ---------- N : int The size of one of the dimensions of a two-dimensional array. n : int The number of arrays of size 'N' times 'chunk_size' that can fit in memory. Returns ------- chunk_size : int The size of the dimension orthogonal to the one of size 'N'. """
mem_free = memory()['free'] if mem_free > 60000000: chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N)) return chunk_size elif mem_free > 40000000: chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N)) return chunk_size elif mem_free > 14000000: chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N)) return chunk_size elif mem_free > 8000000: chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N)) return chunk_size elif mem_free > 2000000: chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N)) return chunk_size elif mem_free > 1000000: chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N)) return chunk_size else: print("\nERROR: Cluster_Ensembles: get_chunk_size: " "this machine does not have enough free memory resources " "to perform ensemble clustering.\n") sys.exit(1)
<SYSTEM_TASK:> Compute a weighted average of the mutual information with the known labels, <END_TASK> <USER_TASK:> Description: def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False): """Compute a weighted average of the mutual information with the known labels, the weights being proportional to the fraction of known labels. Parameters ---------- cluster_runs : array of shape (n_partitions, n_samples) Each row of this matrix is such that the i-th entry corresponds to the cluster ID to which the i-th sample of the data-set has been classified by this particular clustering. Samples not selected for clustering in a given round are are tagged by an NaN. cluster_ensemble : array of shape (n_samples,), optional (default = None) The identity of the cluster to which each sample of the whole data-set belong to according to consensus clustering. verbose : Boolean, optional (default = False) Specifies if status messages will be displayed on the standard output. Returns ------- unnamed variable : float The weighted average of the mutual information between the consensus clustering and the many runs from the ensemble of independent clusterings on subsamples of the data-set. """
if cluster_ensemble is None: return 0.0 if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape): cluster_runs = cluster_runs.reshape(1, -1) weighted_average_mutual_information = 0 N_labelled_indices = 0 for i in range(cluster_runs.shape[0]): labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0] N = labelled_indices.size x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N) y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N) q = normalized_mutual_info_score(x, y) weighted_average_mutual_information += q * N N_labelled_indices += N return float(weighted_average_mutual_information) / N_labelled_indices
<SYSTEM_TASK:> Ensure that a cluster labelling is in a valid format. <END_TASK> <USER_TASK:> Description: def checkcl(cluster_run, verbose = False): """Ensure that a cluster labelling is in a valid format. Parameters ---------- cluster_run : array of shape (n_samples,) A vector of cluster IDs for each of the samples selected for a given round of clustering. The samples not selected are labelled with NaN. verbose : Boolean, optional (default = False) Specifies if status messages will be displayed on the standard output. Returns ------- cluster_run : array of shape (n_samples,) The input vector is modified in place, such that invalid values are either rejected or altered. In particular, the labelling of cluster IDs starts at zero and increases by 1 without any gap left. """
cluster_run = np.asanyarray(cluster_run) if cluster_run.size == 0: raise ValueError("\nERROR: Cluster_Ensembles: checkcl: " "empty vector provided as input.\n") elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape): raise ValueError("\nERROR: Cluster_Ensembles: checkl: " "problem in dimensions of the cluster label vector " "under consideration.\n") elif np.where(np.isnan(cluster_run))[0].size != 0: raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster " "labellings provided as input contains at least one 'NaN'.\n") else: min_label = np.amin(cluster_run) if min_label < 0: if verbose: print("\nINFO: Cluster_Ensembles: checkcl: detected negative values " "as cluster labellings.") cluster_run -= min_label if verbose: print("\nINFO: Cluster_Ensembles: checkcl: " "offset to a minimum value of '0'.") x = one_to_max(cluster_run) if np.amax(cluster_run) != np.amax(x): if verbose: print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster " "labellings provided is not a dense integer mapping.") cluster_run = x if verbose: print("INFO: Cluster_Ensembles: checkcl: brought modification " "to this vector so that its labels range " "from 0 to {0}, included.\n".format(np.amax(cluster_run))) return cluster_run
<SYSTEM_TASK:> Alter a vector of cluster labels to a dense mapping. <END_TASK> <USER_TASK:> Description: def one_to_max(array_in): """Alter a vector of cluster labels to a dense mapping. Given that this function is herein always called after passing a vector to the function checkcl, one_to_max relies on the assumption that cluster_run does not contain any NaN entries. Parameters ---------- array_in : a list or one-dimensional array The list of cluster IDs to be processed. Returns ------- result : one-dimensional array A massaged version of the input vector of cluster identities. """
x = np.asanyarray(array_in) N_in = x.size array_in = x.reshape(N_in) sorted_array = np.sort(array_in) sorting_indices = np.argsort(array_in) last = np.nan current_index = -1 for i in range(N_in): if last != sorted_array[i] or np.isnan(last): last = sorted_array[i] current_index += 1 sorted_array[i] = current_index result = np.empty(N_in, dtype = int) result[sorting_indices] = sorted_array return result
<SYSTEM_TASK:> Check that a matrix is a proper similarity matrix and bring <END_TASK> <USER_TASK:> Description: def checks(similarities, verbose = False): """Check that a matrix is a proper similarity matrix and bring appropriate changes if applicable. Parameters ---------- similarities : array of shape (n_samples, n_samples) A matrix of pairwise similarities between (sub)-samples of the data-set. verbose : Boolean, optional (default = False) Alerts of any issue with the similarities matrix provided and of any step possibly taken to remediate such problem. """
if similarities.size == 0: raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities " "matrix provided as input happens to be empty.\n") elif np.where(np.isnan(similarities))[0].size != 0: raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities " "matrix contains at least one 'NaN'.\n") elif np.where(np.isinf(similarities))[0].size != 0: raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry " "detected in input similarities matrix.\n") else: if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0: if verbose: print("\nINFO: Cluster_Ensembles: checks: complex entries found " "in the similarities matrix.") similarities = similarities.real if verbose: print("\nINFO: Cluster_Ensembles: checks: " "truncated to their real components.") if similarities.shape[0] != similarities.shape[1]: if verbose: print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.") N_square = min(similarities.shape) similarities = similarities[:N_square, :N_square] if verbose: print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.") max_sim = np.amax(similarities) min_sim = np.amin(similarities) if max_sim > 1 or min_sim < 0: if verbose: print("\nINFO: Cluster_Ensembles: checks: strictly negative " "or bigger than unity entries spotted in input similarities matrix.") indices_too_big = np.where(similarities > 1) indices_negative = np.where(similarities < 0) similarities[indices_too_big] = 1.0 similarities[indices_negative] = 0.0 if verbose: print("\nINFO: Cluster_Ensembles: checks: done setting them to " "the lower or upper accepted values.") if not np.allclose(similarities, np.transpose(similarities)): if verbose: print("\nINFO: Cluster_Ensembles: checks: non-symmetric input " "similarities matrix.") similarities = np.divide(similarities + np.transpose(similarities), 2.0) if verbose: print("\nINFO: Cluster_Ensembles: checks: now symmetrized.") if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])): if verbose: print("\nINFO: Cluster_Ensembles: checks: the self-similarities " "provided as input are not all of unit value.") similarities[np.diag_indices(similarities.shape[0])] = 1 if verbose: print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
<SYSTEM_TASK:> METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph <END_TASK> <USER_TASK:> Description: def metis(hdf5_file_name, N_clusters_max): """METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph passed by CSPA. Parameters ---------- hdf5_file_name : string or file handle N_clusters_max : int Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the cluster to which each sample has been assigned as a result of the CSPA heuristics for consensus clustering. Reference --------- G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for Partitioning Irregular Graphs" In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999. """
file_name = wgraph(hdf5_file_name) labels = sgraph(N_clusters_max, file_name) subprocess.call(['rm', file_name]) return labels
<SYSTEM_TASK:> Gives cluster labels ranging from 1 to N_clusters_max for <END_TASK> <USER_TASK:> Description: def hmetis(hdf5_file_name, N_clusters_max, w = None): """Gives cluster labels ranging from 1 to N_clusters_max for hypergraph partitioning required for HGPA. Parameters ---------- hdf5_file_name : file handle or string N_clusters_max : int w : array, optional (default = None) Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the cluster to which each sample has been assigned as a result of the HGPA approximation algorithm for consensus clustering. Reference --------- G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph partitioning: applications in VLSI domain" In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, Vol. 7, No. 1, pp. 69-79, 1999. """
if w is None: file_name = wgraph(hdf5_file_name, None, 2) else: file_name = wgraph(hdf5_file_name, w, 3) labels = sgraph(N_clusters_max, file_name) labels = one_to_max(labels) subprocess.call(['rm', file_name]) return labels
<SYSTEM_TASK:> Obfuscate the auth details to avoid easy snatching. <END_TASK> <USER_TASK:> Description: def obfuscate(p, action): """Obfuscate the auth details to avoid easy snatching. It's best to use a throw away account for these alerts to avoid having your authentication put at risk by storing it locally. """
key = "ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH" s = list() if action == 'store': if PY2: for i in range(len(p)): kc = key[i % len(key)] ec = chr((ord(p[i]) + ord(kc)) % 256) s.append(ec) return base64.urlsafe_b64encode("".join(s)) else: return base64.urlsafe_b64encode(p.encode()).decode() else: if PY2: e = base64.urlsafe_b64decode(p) for i in range(len(e)): kc = key[i % len(key)] dc = chr((256 + ord(e[i]) - ord(kc)) % 256) s.append(dc) return "".join(s) else: e = base64.urlsafe_b64decode(p) return e.decode()
<SYSTEM_TASK:> Go through and establish the defaults on the file system. <END_TASK> <USER_TASK:> Description: def _config_bootstrap(self): """Go through and establish the defaults on the file system. The approach here was stolen from the CLI tool provided with the module. Idea being that the user should not always need to provide a username and password in order to run the script. If the configuration file is already present with valid data, then lets use it. """
if not os.path.exists(CONFIG_PATH): os.makedirs(CONFIG_PATH) if not os.path.exists(CONFIG_FILE): json.dump(CONFIG_DEFAULTS, open(CONFIG_FILE, 'w'), indent=4, separators=(',', ': ')) config = CONFIG_DEFAULTS if self._email and self._password: # Save the configuration locally to pull later on config['email'] = self._email config['password'] = str(obfuscate(self._password, 'store')) self._log.debug("Caching authentication in config file") json.dump(config, open(CONFIG_FILE, 'w'), indent=4, separators=(',', ': ')) else: # Load the config file and override the class config = json.load(open(CONFIG_FILE)) if config.get('py2', PY2) != PY2: raise Exception("Python versions have changed. Please run `setup` again to reconfigure the client.") if config['email'] and config['password']: self._email = config['email'] self._password = obfuscate(str(config['password']), 'fetch') self._log.debug("Loaded authentication from config file")
<SYSTEM_TASK:> Attempt to authenticate the user through a session file. <END_TASK> <USER_TASK:> Description: def _session_check(self): """Attempt to authenticate the user through a session file. This process is done to avoid having to authenticate the user every single time. It uses a session file that is saved when a valid session is captured and then reused. Because sessions can expire, we need to test the session prior to calling the user authenticated. Right now that is done with a test string found in an unauthenticated session. This approach is not an ideal method, but it works. """
if not os.path.exists(SESSION_FILE): self._log.debug("Session file does not exist") return False with open(SESSION_FILE, 'rb') as f: cookies = requests.utils.cookiejar_from_dict(pickle.load(f)) self._session.cookies = cookies self._log.debug("Loaded cookies from session file") response = self._session.get(url=self.TEST_URL, headers=self.HEADERS) if self.TEST_KEY in str(response.content): self._log.debug("Session file appears invalid") return False self._is_authenticated = True self._process_state() return True
<SYSTEM_TASK:> Override the default log level of the class <END_TASK> <USER_TASK:> Description: def set_log_level(self, level): """Override the default log level of the class"""
if level == 'info': level = logging.INFO if level == 'debug': level = logging.DEBUG if level == 'error': level = logging.ERROR self._log.setLevel(level)
<SYSTEM_TASK:> Process the application state configuration. <END_TASK> <USER_TASK:> Description: def _process_state(self): """Process the application state configuration. Google Alerts manages the account information and alert data through some custom state configuration. Not all values have been completely enumerated. """
self._log.debug("Capturing state from the request") response = self._session.get(url=self.ALERTS_URL, headers=self.HEADERS) soup = BeautifulSoup(response.content, "html.parser") for i in soup.findAll('script'): if i.text.find('window.STATE') == -1: continue state = json.loads(i.text[15:-1]) if state != "": self._state = state self._log.debug("State value set: %s" % self._state) return self._state
<SYSTEM_TASK:> Authenticate the user and setup our state. <END_TASK> <USER_TASK:> Description: def authenticate(self): """Authenticate the user and setup our state."""
valid = self._session_check() if self._is_authenticated and valid: self._log.debug("[!] User has already authenticated") return init = self._session.get(url=self.LOGIN_URL, headers=self.HEADERS) soup = BeautifulSoup(init.content, "html.parser") soup_login = soup.find('form').find_all('input') post_data = dict() for u in soup_login: if u.has_attr('name') and u.has_attr('value'): post_data[u['name']] = u['value'] post_data['Email'] = self._email post_data['Passwd'] = self._password response = self._session.post(url=self.AUTH_URL, data=post_data, headers=self.HEADERS) if self.CAPTCHA_KEY in str(response.content): raise AccountCaptcha('Google is forcing a CAPTCHA. To get around this issue, run the google-alerts with the seed option to open an interactive authentication session. Once authenticated, this module will cache your session and load that in the future') cookies = [x.name for x in response.cookies] if 'SIDCC' not in cookies: raise InvalidCredentials("Email or password was incorrect.") with open(SESSION_FILE, 'wb') as f: cookies = requests.utils.dict_from_cookiejar(self._session.cookies) pickle.dump(cookies, f, protocol=2) self._log.debug("Saved session to disk for future reference") self._log.debug("User successfully authenticated") self._is_authenticated = True self._process_state() return
<SYSTEM_TASK:> List alerts configured for the account. <END_TASK> <USER_TASK:> Description: def list(self, term=None): """List alerts configured for the account."""
if not self._state: raise InvalidState("State was not properly obtained from the app") self._process_state() if not self._state[1]: self._log.info("No monitors have been created yet.") return list() monitors = list() for monitor in self._state[1][1]: obj = dict() obj['monitor_id'] = monitor[1] obj['user_id'] = monitor[-1] obj['term'] = monitor[2][3][1] if term and obj['term'] != term: continue obj['language'] = monitor[2][3][3][1] obj['region'] = monitor[2][3][3][2] obj['delivery'] = self.DELIVERY[monitor[2][6][0][1]] obj['match_type'] = self.MONITOR_MATCH_TYPE[monitor[2][5]] if obj['delivery'] == 'MAIL': obj['alert_frequency'] = self.ALERT_FREQ[monitor[2][6][0][4]] obj['email_address'] = monitor[2][6][0][2] else: rss_id = monitor[2][6][0][11] url = "https://google.com/alerts/feeds/{uid}/{fid}" obj['rss_link'] = url.format(uid=obj['user_id'], fid=rss_id) monitors.append(obj) return monitors
<SYSTEM_TASK:> Let's handle old-style response processing here, as usual. <END_TASK> <USER_TASK:> Description: def process_response(self, request, response): """Let's handle old-style response processing here, as usual."""
# For debug only. if not settings.DEBUG: return response # Check for responses where the data can't be inserted. content_encoding = response.get('Content-Encoding', '') content_type = response.get('Content-Type', '').split(';')[0] if any((getattr(response, 'streaming', False), 'gzip' in content_encoding, content_type not in _HTML_TYPES)): return response content = force_text(response.content, encoding=settings.DEFAULT_CHARSET) pattern = re.escape('</body>') bits = re.split(pattern, content, flags=re.IGNORECASE) if len(bits) > 1: bits[-2] += debug_payload(request, response, self.view_data) response.content = "</body>".join(bits) if response.get('Content-Length', None): response['Content-Length'] = len(response.content) return response
<SYSTEM_TASK:> Mark a cached item invalid and trigger an asynchronous <END_TASK> <USER_TASK:> Description: def invalidate(self, *raw_args, **raw_kwargs): """ Mark a cached item invalid and trigger an asynchronous job to refresh the cache """
args = self.prepare_args(*raw_args) kwargs = self.prepare_kwargs(**raw_kwargs) key = self.key(*args, **kwargs) item = self.cache.get(key) if item is not None: expiry, data = item self.store(key, self.timeout(*args, **kwargs), data) self.async_refresh(*args, **kwargs)
<SYSTEM_TASK:> Remove an item from the cache <END_TASK> <USER_TASK:> Description: def delete(self, *raw_args, **raw_kwargs): """ Remove an item from the cache """
args = self.prepare_args(*raw_args) kwargs = self.prepare_kwargs(**raw_kwargs) key = self.key(*args, **kwargs) item = self.cache.get(key) if item is not None: self.cache.delete(key)
<SYSTEM_TASK:> Manually set the cache value with its appropriate expiry. <END_TASK> <USER_TASK:> Description: def set(self, *raw_args, **raw_kwargs): """ Manually set the cache value with its appropriate expiry. """
if self.set_data_kwarg in raw_kwargs: data = raw_kwargs.pop(self.set_data_kwarg) else: raw_args = list(raw_args) data = raw_args.pop() args = self.prepare_args(*raw_args) kwargs = self.prepare_kwargs(**raw_kwargs) key = self.key(*args, **kwargs) expiry = self.expiry(*args, **kwargs) logger.debug("Setting %s cache with key '%s', args '%r', kwargs '%r', expiry '%r'", self.class_path, key, args, kwargs, expiry) self.store(key, expiry, data)
<SYSTEM_TASK:> Add a result to the cache <END_TASK> <USER_TASK:> Description: def store(self, key, expiry, data): """ Add a result to the cache :key: Cache key to use :expiry: The expiry timestamp after which the result is stale :data: The data to cache """
self.cache.set(key, (expiry, data), self.cache_ttl) if getattr(settings, 'CACHEBACK_VERIFY_CACHE_WRITE', True): # We verify that the item was cached correctly. This is to avoid a # Memcache problem where some values aren't cached correctly # without warning. __, cached_data = self.cache.get(key, (None, None)) if data is not None and cached_data is None: raise RuntimeError( "Unable to save data of type %s to cache" % ( type(data)))
<SYSTEM_TASK:> Fetch the result SYNCHRONOUSLY and populate the cache <END_TASK> <USER_TASK:> Description: def refresh(self, *args, **kwargs): """ Fetch the result SYNCHRONOUSLY and populate the cache """
result = self.fetch(*args, **kwargs) self.store(self.key(*args, **kwargs), self.expiry(*args, **kwargs), result) return result
<SYSTEM_TASK:> Trigger an asynchronous job to refresh the cache <END_TASK> <USER_TASK:> Description: def async_refresh(self, *args, **kwargs): """ Trigger an asynchronous job to refresh the cache """
# We trigger the task with the class path to import as well as the # (a) args and kwargs for instantiating the class # (b) args and kwargs for calling the 'refresh' method try: enqueue_task( dict( klass_str=self.class_path, obj_args=self.get_init_args(), obj_kwargs=self.get_init_kwargs(), call_args=args, call_kwargs=kwargs ), task_options=self.task_options ) except Exception: # Handle exceptions from talking to RabbitMQ - eg connection # refused. When this happens, we try to run the task # synchronously. logger.error("Unable to trigger task asynchronously - failing " "over to synchronous refresh", exc_info=True) try: return self.refresh(*args, **kwargs) except Exception as e: # Something went wrong while running the task logger.error("Unable to refresh data synchronously: %s", e, exc_info=True) else: logger.debug("Failover synchronous refresh completed successfully")
<SYSTEM_TASK:> Return whether to refresh an item synchronously when it is found in the <END_TASK> <USER_TASK:> Description: def should_stale_item_be_fetched_synchronously(self, delta, *args, **kwargs): """ Return whether to refresh an item synchronously when it is found in the cache but stale """
if self.fetch_on_stale_threshold is None: return False return delta > (self.fetch_on_stale_threshold - self.lifetime)
<SYSTEM_TASK:> Return the cache key to use. <END_TASK> <USER_TASK:> Description: def key(self, *args, **kwargs): """ Return the cache key to use. If you're passing anything but primitive types to the ``get`` method, it's likely that you'll need to override this method. """
if not args and not kwargs: return self.class_path try: if args and not kwargs: return "%s:%s" % (self.class_path, self.hash(args)) # The line might break if your passed values are un-hashable. If # it does, you need to override this method and implement your own # key algorithm. return "%s:%s:%s:%s" % (self.class_path, self.hash(args), self.hash([k for k in sorted(kwargs)]), self.hash([kwargs[k] for k in sorted(kwargs)])) except TypeError: raise RuntimeError( "Unable to generate cache key due to unhashable" "args or kwargs - you need to implement your own" "key generation method to avoid this problem")
<SYSTEM_TASK:> Generate a hash of the given iterable. <END_TASK> <USER_TASK:> Description: def hash(self, value): """ Generate a hash of the given iterable. This is for use in a cache key. """
if is_iterable(value): value = tuple(to_bytestring(v) for v in value) return hashlib.md5(six.b(':').join(value)).hexdigest()
<SYSTEM_TASK:> Re-populate cache using the given job class. <END_TASK> <USER_TASK:> Description: def perform_async_refresh(cls, klass_str, obj_args, obj_kwargs, call_args, call_kwargs): """ Re-populate cache using the given job class. The job class is instantiated with the passed constructor args and the refresh method is called with the passed call args. That is:: data = klass(*obj_args, **obj_kwargs).refresh( *call_args, **call_kwargs) :klass_str: String repr of class (eg 'apps.twitter.jobs.FetchTweetsJob') :obj_args: Constructor args :obj_kwargs: Constructor kwargs :call_args: Refresh args :call_kwargs: Refresh kwargs """
klass = get_job_class(klass_str) if klass is None: logger.error("Unable to construct %s with args %r and kwargs %r", klass_str, obj_args, obj_kwargs) return logger.info("Using %s with constructor args %r and kwargs %r", klass_str, obj_args, obj_kwargs) logger.info("Calling refresh with args %r and kwargs %r", call_args, call_kwargs) start = time.time() try: klass(*obj_args, **obj_kwargs).refresh( *call_args, **call_kwargs) except Exception as e: logger.exception("Error running job: '%s'", e) else: duration = time.time() - start logger.info("Refreshed cache in %.6f seconds", duration)
<SYSTEM_TASK:> Decorate function to cache its return value. <END_TASK> <USER_TASK:> Description: def cacheback(lifetime=None, fetch_on_miss=None, cache_alias=None, job_class=None, task_options=None, **job_class_kwargs): """ Decorate function to cache its return value. :lifetime: How long to cache items for :fetch_on_miss: Whether to perform a synchronous fetch when no cached result is found :cache_alias: The Django cache alias to store the result into. :job_class: The class to use for running the cache refresh job. Defaults using the FunctionJob. :job_class_kwargs: Any extra kwargs to pass to job_class constructor. Useful with custom job_class implementations. """
if job_class is None: job_class = FunctionJob job = job_class(lifetime=lifetime, fetch_on_miss=fetch_on_miss, cache_alias=cache_alias, task_options=task_options, **job_class_kwargs) def _wrapper(fn): # using available_attrs to work around http://bugs.python.org/issue3445 @wraps(fn, assigned=available_attrs(fn)) def __wrapper(*args, **kwargs): return job.get(fn, *args, **kwargs) # Assign reference to unwrapped function so that we can access it # later without descending into infinite regress. __wrapper.fn = fn # Assign reference to job so we can use the full Job API __wrapper.job = job return __wrapper return _wrapper
<SYSTEM_TASK:> Return the angle in radians between vectors 'v1' and 'v2'. <END_TASK> <USER_TASK:> Description: def angle(v1, v2): """Return the angle in radians between vectors 'v1' and 'v2'."""
v1_u = unit_vector(v1) v2_u = unit_vector(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
<SYSTEM_TASK:> Keep vertices with angles higher then given minimum. <END_TASK> <USER_TASK:> Description: def keep_high_angle(vertices, min_angle_deg): """Keep vertices with angles higher then given minimum."""
accepted = [] v = vertices v1 = v[1] - v[0] accepted.append((v[0][0], v[0][1])) for i in range(1, len(v) - 2): v2 = v[i + 1] - v[i - 1] diff_angle = np.fabs(angle(v1, v2) * 180.0 / np.pi) if diff_angle > min_angle_deg: accepted.append((v[i][0], v[i][1])) v1 = v[i] - v[i - 1] accepted.append((v[-1][0], v[-1][1])) return np.array(accepted, dtype=vertices.dtype)
<SYSTEM_TASK:> Transform matplotlib.contourf to geojson with overlapping filled contours. <END_TASK> <USER_TASK:> Description: def contourf_to_geojson_overlap(contourf, geojson_filepath=None, min_angle_deg=None, ndigits=5, unit='', stroke_width=1, fill_opacity=.9, geojson_properties=None, strdump=False, serialize=True): """Transform matplotlib.contourf to geojson with overlapping filled contours."""
polygon_features = [] contourf_idx = 0 for collection in contourf.collections: color = collection.get_facecolor() for path in collection.get_paths(): for coord in path.to_polygons(): if min_angle_deg: coord = keep_high_angle(coord, min_angle_deg) coord = np.around(coord, ndigits) if ndigits else coord polygon = Polygon(coordinates=[coord.tolist()]) fcolor = rgb2hex(color[0]) properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit) if geojson_properties: properties.update(geojson_properties) feature = Feature(geometry=polygon, properties=properties) polygon_features.append(feature) contourf_idx += 1 feature_collection = FeatureCollection(polygon_features) return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize)
<SYSTEM_TASK:> Get a qualified URL for the provider to return to upon authorization <END_TASK> <USER_TASK:> Description: def get_authorize_callback(endpoint, provider_id): """Get a qualified URL for the provider to return to upon authorization param: endpoint: Absolute path to append to the application's host """
endpoint_prefix = config_value('BLUEPRINT_NAME') url = url_for(endpoint_prefix + '.' + endpoint, provider_id=provider_id) return request.url_root[:-1] + url
<SYSTEM_TASK:> Starts the provider login OAuth flow <END_TASK> <USER_TASK:> Description: def login(provider_id): """Starts the provider login OAuth flow"""
provider = get_provider_or_404(provider_id) callback_url = get_authorize_callback('login', provider_id) post_login = request.form.get('next', get_post_login_redirect()) session[config_value('POST_OAUTH_LOGIN_SESSION_KEY')] = post_login return provider.authorize(callback_url)
<SYSTEM_TASK:> Starts the provider connection OAuth flow <END_TASK> <USER_TASK:> Description: def connect(provider_id): """Starts the provider connection OAuth flow"""
provider = get_provider_or_404(provider_id) callback_url = get_authorize_callback('connect', provider_id) allow_view = get_url(config_value('CONNECT_ALLOW_VIEW')) pc = request.form.get('next', allow_view) session[config_value('POST_OAUTH_CONNECT_SESSION_KEY')] = pc return provider.authorize(callback_url)
<SYSTEM_TASK:> Remove all connections for the authenticated user to the <END_TASK> <USER_TASK:> Description: def remove_all_connections(provider_id): """Remove all connections for the authenticated user to the specified provider """
provider = get_provider_or_404(provider_id) ctx = dict(provider=provider.name, user=current_user) deleted = _datastore.delete_connections(user_id=current_user.get_id(), provider_id=provider_id) if deleted: after_this_request(_commit) msg = ('All connections to %s removed' % provider.name, 'info') connection_removed.send(current_app._get_current_object(), user=current_user._get_current_object(), provider_id=provider_id) else: msg = ('Unable to remove connection to %(provider)s' % ctx, 'error') do_flash(*msg) return redirect(request.referrer)
<SYSTEM_TASK:> Remove a specific connection for the authenticated user to the <END_TASK> <USER_TASK:> Description: def remove_connection(provider_id, provider_user_id): """Remove a specific connection for the authenticated user to the specified provider """
provider = get_provider_or_404(provider_id) ctx = dict(provider=provider.name, user=current_user, provider_user_id=provider_user_id) deleted = _datastore.delete_connection(user_id=current_user.get_id(), provider_id=provider_id, provider_user_id=provider_user_id) if deleted: after_this_request(_commit) msg = ('Connection to %(provider)s removed' % ctx, 'info') connection_removed.send(current_app._get_current_object(), user=current_user._get_current_object(), provider_id=provider_id) else: msg = ('Unabled to remove connection to %(provider)s' % ctx, 'error') do_flash(*msg) return redirect(request.referrer or get_post_login_redirect())
<SYSTEM_TASK:> Shared method to handle the connection process <END_TASK> <USER_TASK:> Description: def connect_handler(cv, provider): """Shared method to handle the connection process :param connection_values: A dictionary containing the connection values :param provider_id: The provider ID the connection shoudl be made to """
cv.setdefault('user_id', current_user.get_id()) connection = _datastore.find_connection( provider_id=cv['provider_id'], provider_user_id=cv['provider_user_id']) if connection is None: after_this_request(_commit) connection = _datastore.create_connection(**cv) msg = ('Connection established to %s' % provider.name, 'success') connection_created.send(current_app._get_current_object(), user=current_user._get_current_object(), connection=connection) else: msg = ('A connection is already established with %s ' 'to your account' % provider.name, 'notice') connection_failed.send(current_app._get_current_object(), user=current_user._get_current_object()) redirect_url = session.pop(config_value('POST_OAUTH_CONNECT_SESSION_KEY'), get_url(config_value('CONNECT_ALLOW_VIEW'))) do_flash(*msg) return redirect(redirect_url)
<SYSTEM_TASK:> Shared method to handle the signin process <END_TASK> <USER_TASK:> Description: def login_handler(response, provider, query): """Shared method to handle the signin process"""
connection = _datastore.find_connection(**query) if connection: after_this_request(_commit) token_pair = get_token_pair_from_oauth_response(provider, response) if (token_pair['access_token'] != connection.access_token or token_pair['secret'] != connection.secret): connection.access_token = token_pair['access_token'] connection.secret = token_pair['secret'] _datastore.put(connection) user = connection.user login_user(user) key = _social.post_oauth_login_session_key redirect_url = session.pop(key, get_post_login_redirect()) login_completed.send(current_app._get_current_object(), provider=provider, user=user) return redirect(redirect_url) login_failed.send(current_app._get_current_object(), provider=provider, oauth_response=response) next = get_url(_security.login_manager.login_view) msg = '%s account not associated with an existing user' % provider.name do_flash(msg, 'error') return redirect(next)
<SYSTEM_TASK:> Initialize the application with the Social extension <END_TASK> <USER_TASK:> Description: def init_app(self, app, datastore=None): """Initialize the application with the Social extension :param app: The Flask application :param datastore: Connection datastore instance """
datastore = datastore or self.datastore for key, value in default_config.items(): app.config.setdefault(key, value) providers = dict() for key, config in app.config.items(): if not key.startswith('SOCIAL_') or config is None or key in default_config: continue suffix = key.lower().replace('social_', '') default_module_name = 'flask_social.providers.%s' % suffix module_name = config.get('module', default_module_name) module = import_module(module_name) config = update_recursive(module.config, config) providers[config['id']] = OAuthRemoteApp(**config) providers[config['id']].tokengetter(_get_token) state = _get_state(app, datastore, providers) app.register_blueprint(create_blueprint(state, __name__)) app.extensions['social'] = state return state
<SYSTEM_TASK:> Creates a Postman object with TLS and Auth <END_TASK> <USER_TASK:> Description: def postman(host, port=587, auth=(None, None), force_tls=False, options=None): """ Creates a Postman object with TLS and Auth middleware. TLS is placed before authentication because usually authentication happens and is accepted only after TLS is enabled. :param auth: Tuple of (username, password) to be used to ``login`` to the server. :param force_tls: Whether TLS should be forced. :param options: Dictionary of keyword arguments to be used when the SMTP class is called. """
return Postman( host=host, port=port, middlewares=[ middleware.tls(force=force_tls), middleware.auth(*auth), ], **options )
<SYSTEM_TASK:> Returns the finalised mime object, after <END_TASK> <USER_TASK:> Description: def mime(self): """ Returns the finalised mime object, after applying the internal headers. Usually this is not to be overriden. """
mime = self.mime_object() self.headers.prepare(mime) return mime
<SYSTEM_TASK:> Try to find existing model class named `model_name`. <END_TASK> <USER_TASK:> Description: def get_existing_model(model_name): """ Try to find existing model class named `model_name`. :param model_name: String name of the model class. """
try: model_cls = engine.get_document_cls(model_name) log.debug('Model `{}` already exists. Using existing one'.format( model_name)) return model_cls except ValueError: log.debug('Model `{}` does not exist'.format(model_name))
<SYSTEM_TASK:> Create referenced model if it doesn't exist. <END_TASK> <USER_TASK:> Description: def prepare_relationship(config, model_name, raml_resource): """ Create referenced model if it doesn't exist. When preparing a relationship, we check to see if the model that will be referenced already exists. If not, it is created so that it will be possible to use it in a relationship. Thus the first usage of this model in RAML file must provide its schema in POST method resource body schema. :param model_name: Name of model which should be generated. :param raml_resource: Instance of ramlfications.raml.ResourceNode for which :model_name: will be defined. """
if get_existing_model(model_name) is None: plural_route = '/' + pluralize(model_name.lower()) route = '/' + model_name.lower() for res in raml_resource.root.resources: if res.method.upper() != 'POST': continue if res.path.endswith(plural_route) or res.path.endswith(route): break else: raise ValueError('Model `{}` used in relationship is not ' 'defined'.format(model_name)) setup_data_model(config, res, model_name)
<SYSTEM_TASK:> Generate model class. <END_TASK> <USER_TASK:> Description: def generate_model_cls(config, schema, model_name, raml_resource, es_based=True): """ Generate model class. Engine DB field types are determined using `type_fields` and only those types may be used. :param schema: Model schema dict parsed from RAML. :param model_name: String that is used as new model's name. :param raml_resource: Instance of ramlfications.raml.ResourceNode. :param es_based: Boolean indicating if generated model should be a subclass of Elasticsearch-based document class or not. It True, ESBaseDocument is used; BaseDocument is used otherwise. Defaults to True. """
from nefertari.authentication.models import AuthModelMethodsMixin base_cls = engine.ESBaseDocument if es_based else engine.BaseDocument model_name = str(model_name) metaclass = type(base_cls) auth_model = schema.get('_auth_model', False) bases = [] if config.registry.database_acls: from nefertari_guards import engine as guards_engine bases.append(guards_engine.DocumentACLMixin) if auth_model: bases.append(AuthModelMethodsMixin) bases.append(base_cls) attrs = { '__tablename__': model_name.lower(), '_public_fields': schema.get('_public_fields') or [], '_auth_fields': schema.get('_auth_fields') or [], '_hidden_fields': schema.get('_hidden_fields') or [], '_nested_relationships': schema.get('_nested_relationships') or [], } if '_nesting_depth' in schema: attrs['_nesting_depth'] = schema.get('_nesting_depth') # Generate fields from properties properties = schema.get('properties', {}) for field_name, props in properties.items(): if field_name in attrs: continue db_settings = props.get('_db_settings') if db_settings is None: continue field_kwargs = db_settings.copy() field_kwargs['required'] = bool(field_kwargs.get('required')) for default_attr_key in ('default', 'onupdate'): value = field_kwargs.get(default_attr_key) if is_callable_tag(value): field_kwargs[default_attr_key] = resolve_to_callable(value) type_name = ( field_kwargs.pop('type', 'string') or 'string').lower() if type_name not in type_fields: raise ValueError('Unknown type: {}'.format(type_name)) field_cls = type_fields[type_name] if field_cls is engine.Relationship: prepare_relationship( config, field_kwargs['document'], raml_resource) if field_cls is engine.ForeignKeyField: key = 'ref_column_type' field_kwargs[key] = type_fields[field_kwargs[key]] if field_cls is engine.ListField: key = 'item_type' field_kwargs[key] = type_fields[field_kwargs[key]] attrs[field_name] = field_cls(**field_kwargs) # Update model definition with methods and variables defined in registry attrs.update(registry.mget(model_name)) # Generate new model class model_cls = metaclass(model_name, tuple(bases), attrs) setup_model_event_subscribers(config, model_cls, schema) setup_fields_processors(config, model_cls, schema) return model_cls, auth_model
<SYSTEM_TASK:> Generates model name and runs `setup_data_model` to get <END_TASK> <USER_TASK:> Description: def handle_model_generation(config, raml_resource): """ Generates model name and runs `setup_data_model` to get or generate actual model class. :param raml_resource: Instance of ramlfications.raml.ResourceNode. """
model_name = generate_model_name(raml_resource) try: return setup_data_model(config, raml_resource, model_name) except ValueError as ex: raise ValueError('{}: {}'.format(model_name, str(ex)))
<SYSTEM_TASK:> Set up model event subscribers. <END_TASK> <USER_TASK:> Description: def setup_model_event_subscribers(config, model_cls, schema): """ Set up model event subscribers. :param config: Pyramid Configurator instance. :param model_cls: Model class for which handlers should be connected. :param schema: Dict of model JSON schema. """
events_map = get_events_map() model_events = schema.get('_event_handlers', {}) event_kwargs = {'model': model_cls} for event_tag, subscribers in model_events.items(): type_, action = event_tag.split('_') event_objects = events_map[type_][action] if not isinstance(event_objects, list): event_objects = [event_objects] for sub_name in subscribers: sub_func = resolve_to_callable(sub_name) config.subscribe_to_events( sub_func, event_objects, **event_kwargs)
<SYSTEM_TASK:> Set up model fields' processors. <END_TASK> <USER_TASK:> Description: def setup_fields_processors(config, model_cls, schema): """ Set up model fields' processors. :param config: Pyramid Configurator instance. :param model_cls: Model class for field of which processors should be set up. :param schema: Dict of model JSON schema. """
properties = schema.get('properties', {}) for field_name, props in properties.items(): if not props: continue processors = props.get('_processors') backref_processors = props.get('_backref_processors') if processors: processors = [resolve_to_callable(val) for val in processors] setup_kwargs = {'model': model_cls, 'field': field_name} config.add_field_processors(processors, **setup_kwargs) if backref_processors: db_settings = props.get('_db_settings', {}) is_relationship = db_settings.get('type') == 'relationship' document = db_settings.get('document') backref_name = db_settings.get('backref_name') if not (is_relationship and document and backref_name): continue backref_processors = [ resolve_to_callable(val) for val in backref_processors] setup_kwargs = { 'model': engine.get_document_cls(document), 'field': backref_name } config.add_field_processors( backref_processors, **setup_kwargs)
<SYSTEM_TASK:> Setup Pyramid AuthTktAuthenticationPolicy. <END_TASK> <USER_TASK:> Description: def _setup_ticket_policy(config, params): """ Setup Pyramid AuthTktAuthenticationPolicy. Notes: * Initial `secret` params value is considered to be a name of config param that represents a cookie name. * `auth_model.get_groups_by_userid` is used as a `callback`. * Also connects basic routes to perform authentication actions. :param config: Pyramid Configurator instance. :param params: Nefertari dictset which contains security scheme `settings`. """
from nefertari.authentication.views import ( TicketAuthRegisterView, TicketAuthLoginView, TicketAuthLogoutView) log.info('Configuring Pyramid Ticket Authn policy') if 'secret' not in params: raise ValueError( 'Missing required security scheme settings: secret') params['secret'] = config.registry.settings[params['secret']] auth_model = config.registry.auth_model params['callback'] = auth_model.get_groups_by_userid config.add_request_method( auth_model.get_authuser_by_userid, 'user', reify=True) policy = AuthTktAuthenticationPolicy(**params) RegisterViewBase = TicketAuthRegisterView if config.registry.database_acls: class RegisterViewBase(ACLAssignRegisterMixin, TicketAuthRegisterView): pass class RamsesTicketAuthRegisterView(RegisterViewBase): Model = config.registry.auth_model class RamsesTicketAuthLoginView(TicketAuthLoginView): Model = config.registry.auth_model class RamsesTicketAuthLogoutView(TicketAuthLogoutView): Model = config.registry.auth_model common_kw = { 'prefix': 'auth', 'factory': 'nefertari.acl.AuthenticationACL', } root = config.get_root_resource() root.add('register', view=RamsesTicketAuthRegisterView, **common_kw) root.add('login', view=RamsesTicketAuthLoginView, **common_kw) root.add('logout', view=RamsesTicketAuthLogoutView, **common_kw) return policy
<SYSTEM_TASK:> Setup `nefertari.ApiKeyAuthenticationPolicy`. <END_TASK> <USER_TASK:> Description: def _setup_apikey_policy(config, params): """ Setup `nefertari.ApiKeyAuthenticationPolicy`. Notes: * User may provide model name in :params['user_model']: do define the name of the user model. * `auth_model.get_groups_by_token` is used to perform username and token check * `auth_model.get_token_credentials` is used to get username and token from userid * Also connects basic routes to perform authentication actions. Arguments: :config: Pyramid Configurator instance. :params: Nefertari dictset which contains security scheme `settings`. """
from nefertari.authentication.views import ( TokenAuthRegisterView, TokenAuthClaimView, TokenAuthResetView) log.info('Configuring ApiKey Authn policy') auth_model = config.registry.auth_model params['check'] = auth_model.get_groups_by_token params['credentials_callback'] = auth_model.get_token_credentials params['user_model'] = auth_model config.add_request_method( auth_model.get_authuser_by_name, 'user', reify=True) policy = ApiKeyAuthenticationPolicy(**params) RegisterViewBase = TokenAuthRegisterView if config.registry.database_acls: class RegisterViewBase(ACLAssignRegisterMixin, TokenAuthRegisterView): pass class RamsesTokenAuthRegisterView(RegisterViewBase): Model = auth_model class RamsesTokenAuthClaimView(TokenAuthClaimView): Model = auth_model class RamsesTokenAuthResetView(TokenAuthResetView): Model = auth_model common_kw = { 'prefix': 'auth', 'factory': 'nefertari.acl.AuthenticationACL', } root = config.get_root_resource() root.add('register', view=RamsesTokenAuthRegisterView, **common_kw) root.add('token', view=RamsesTokenAuthClaimView, **common_kw) root.add('reset_token', view=RamsesTokenAuthResetView, **common_kw) return policy
<SYSTEM_TASK:> Setup authentication, authorization policies. <END_TASK> <USER_TASK:> Description: def setup_auth_policies(config, raml_root): """ Setup authentication, authorization policies. Performs basic validation to check all the required values are present and performs authentication, authorization policies generation using generator functions from `AUTHENTICATION_POLICIES`. :param config: Pyramid Configurator instance. :param raml_root: Instance of ramlfications.raml.RootNode. """
log.info('Configuring auth policies') secured_by_all = raml_root.secured_by or [] secured_by = [item for item in secured_by_all if item] if not secured_by: log.info('API is not secured. `secured_by` attribute ' 'value missing.') return secured_by = secured_by[0] schemes = {scheme.name: scheme for scheme in raml_root.security_schemes} if secured_by not in schemes: raise ValueError( 'Undefined security scheme used in `secured_by`: {}'.format( secured_by)) scheme = schemes[secured_by] if scheme.type not in AUTHENTICATION_POLICIES: raise ValueError('Unsupported security scheme type: {}'.format( scheme.type)) # Setup Authentication policy policy_generator = AUTHENTICATION_POLICIES[scheme.type] params = dictset(scheme.settings or {}) authn_policy = policy_generator(config, params) config.set_authentication_policy(authn_policy) # Setup Authorization policy authz_policy = ACLAuthorizationPolicy() config.set_authorization_policy(authz_policy)