text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_value(self, value):
"""Parse string into instance of `time`.""" |
if value is None:
return value
if isinstance(value, datetime.time):
return value
return parse(value).timetz() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_struct(self, value):
"""Cast `date` object to string.""" |
if self.str_format:
return value.strftime(self.str_format)
return value.strftime(self.default_format) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_value(self, value):
"""Parse string into instance of `datetime`.""" |
if isinstance(value, datetime.datetime):
return value
if value:
return parse(value)
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_struct(model):
"""Cast instance of model to python structure. :param model: Model to be casted. :rtype: ``dict`` """ |
model.validate()
resp = {}
for _, name, field in model.iterate_with_name():
value = field.__get__(model)
if value is None:
continue
value = field.to_struct(value)
resp[name] = value
return resp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def javascript(filename, type='text/javascript'):
'''A simple shortcut to render a ``script`` tag to a static javascript file'''
if '?' in filename and len(filename.split('?')) is 2:
filename, params = filename.split('?')
return '<script type="%s" src="%s?%s"></script>' % (type, staticfiles_storage.url(filename), params)
else:
return '<script type="%s" src="%s"></script>' % (type, staticfiles_storage.url(filename)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def jquery_js(version=None, migrate=False):
'''A shortcut to render a ``script`` tag for the packaged jQuery'''
version = version or settings.JQUERY_VERSION
suffix = '.min' if not settings.DEBUG else ''
libs = [js_lib('jquery-%s%s.js' % (version, suffix))]
if _boolean(migrate):
libs.append(js_lib('jquery-migrate-%s%s.js' % (JQUERY_MIGRATE_VERSION, suffix)))
return '\n'.join(libs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def django_js(context, jquery=True, i18n=True, csrf=True, init=True):
'''Include Django.js javascript library in the page'''
return {
'js': {
'minified': not settings.DEBUG,
'jquery': _boolean(jquery),
'i18n': _boolean(i18n),
'csrf': _boolean(csrf),
'init': _boolean(init),
}
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def django_js_init(context, jquery=False, i18n=True, csrf=True, init=True):
'''Include Django.js javascript library initialization in the page'''
return {
'js': {
'jquery': _boolean(jquery),
'i18n': _boolean(i18n),
'csrf': _boolean(csrf),
'init': _boolean(init),
}
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def as_dict(self):
'''
Serialize the context as a dictionnary from a given request.
'''
data = {}
if settings.JS_CONTEXT_ENABLED:
for context in RequestContext(self.request):
for key, value in six.iteritems(context):
if settings.JS_CONTEXT and key not in settings.JS_CONTEXT:
continue
if settings.JS_CONTEXT_EXCLUDE and key in settings.JS_CONTEXT_EXCLUDE:
continue
handler_name = 'process_%s' % key
if hasattr(self, handler_name):
handler = getattr(self, handler_name)
data[key] = handler(value, data)
elif isinstance(value, SERIALIZABLE_TYPES):
data[key] = value
if settings.JS_USER_ENABLED:
self.handle_user(data)
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def process_LANGUAGE_CODE(self, language_code, data):
'''
Fix language code when set to non included default `en`
and add the extra variables ``LANGUAGE_NAME`` and ``LANGUAGE_NAME_LOCAL``.
'''
# Dirty hack to fix non included default
language_code = 'en-us' if language_code == 'en' else language_code
language = translation.get_language_info('en' if language_code == 'en-us' else language_code)
if not settings.JS_CONTEXT or 'LANGUAGE_NAME' in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and 'LANGUAGE_NAME' in settings.JS_CONTEXT_EXCLUDE):
data['LANGUAGE_NAME'] = language['name']
if not settings.JS_CONTEXT or 'LANGUAGE_NAME_LOCAL' in settings.JS_CONTEXT \
or (settings.JS_CONTEXT_EXCLUDE and 'LANGUAGE_NAME_LOCAL' in settings.JS_CONTEXT_EXCLUDE):
data['LANGUAGE_NAME_LOCAL'] = language['name_local']
return language_code |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle_user(self, data):
'''
Insert user informations in data
Override it to add extra user attributes.
'''
# Default to unauthenticated anonymous user
data['user'] = {
'username': '',
'is_authenticated': False,
'is_staff': False,
'is_superuser': False,
'permissions': tuple(),
}
if 'django.contrib.sessions.middleware.SessionMiddleware' in settings.MIDDLEWARE_CLASSES:
user = self.request.user
data['user']['is_authenticated'] = user.is_authenticated()
if hasattr(user, 'username'):
data['user']['username'] = user.username
elif hasattr(user, 'get_username'):
data['user']['username'] = user.get_username()
if hasattr(user, 'is_staff'):
data['user']['is_staff'] = user.is_staff
if hasattr(user, 'is_superuser'):
data['user']['is_superuser'] = user.is_superuser
if hasattr(user, 'get_all_permissions'):
data['user']['permissions'] = tuple(user.get_all_permissions()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def class_from_string(name):
'''
Get a python class object from its name
'''
module_name, class_name = name.rsplit('.', 1)
__import__(module_name)
module = sys.modules[module_name]
return getattr(module, class_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def execute(self, command):
'''
Execute a subprocess yielding output lines
'''
process = Popen(command, stdout=PIPE, stderr=STDOUT, universal_newlines=True)
while True:
if process.poll() is not None:
self.returncode = process.returncode # pylint: disable=W0201
break
yield process.stdout.readline() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def phantomjs(self, *args, **kwargs):
'''
Execute PhantomJS by giving ``args`` as command line arguments.
If test are run in verbose mode (``-v/--verbosity`` = 2), it output:
- the title as header (with separators before and after)
- modules and test names
- assertions results (with ``django.utils.termcolors`` support)
In case of error, a JsTestException is raised to give details about javascript errors.
'''
separator = '=' * LINE_SIZE
title = kwargs['title'] if 'title' in kwargs else 'phantomjs output'
nb_spaces = (LINE_SIZE - len(title)) // 2
if VERBOSE:
print('')
print(separator)
print(' ' * nb_spaces + title)
print(separator)
sys.stdout.flush()
with NamedTemporaryFile(delete=True) as cookies_file:
cmd = ('phantomjs', '--cookies-file=%s' % cookies_file.name) + args
if self.timeout:
cmd += (str(self.timeout),)
parser = TapParser(debug=VERBOSITY > 2)
output = self.execute(cmd)
for item in parser.parse(output):
if VERBOSE:
print(item.display())
sys.stdout.flush()
if VERBOSE:
print(separator)
sys.stdout.flush()
failures = parser.suites.get_all_failures()
if failures:
raise JsTestException('Failed javascript assertions', failures)
if self.returncode > 0:
raise JsTestException('PhantomJS return with non-zero exit code (%s)' % self.returncode) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def run_suite(self):
'''
Run a phantomjs test suite.
- ``phantomjs_runner`` is mandatory.
- Either ``url`` or ``url_name`` needs to be defined.
'''
if not self.phantomjs_runner:
raise JsTestException('phantomjs_runner need to be defined')
url = self.get_url()
self.phantomjs(self.phantomjs_runner, url, title=self.title)
self.cleanup() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __prepare_bloom(self):
"""Prepare bloom for existing checks """ |
self.__bloom = pybloom_live.ScalableBloomFilter()
columns = [getattr(self.__table.c, key) for key in self.__update_keys]
keys = select(columns).execution_options(stream_results=True).execute()
for key in keys:
self.__bloom.add(tuple(key)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __insert(self):
"""Insert rows to table """ |
if len(self.__buffer) > 0:
# Insert data
statement = self.__table.insert()
if self.__autoincrement:
statement = statement.returning(
getattr(self.__table.c, self.__autoincrement))
statement = statement.values(self.__buffer)
res = statement.execute()
for id, in res:
row = self.__buffer.pop(0)
yield WrittenRow(row, False, id)
else:
statement.execute(self.__buffer)
for row in self.__buffer:
yield WrittenRow(row, False, None)
# Clean memory
self.__buffer = [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __update(self, row):
"""Update rows in table """ |
expr = self.__table.update().values(row)
for key in self.__update_keys:
expr = expr.where(getattr(self.__table.c, key) == row[key])
if self.__autoincrement:
expr = expr.returning(getattr(self.__table.c, self.__autoincrement))
res = expr.execute()
if res.rowcount > 0:
if self.__autoincrement:
first = next(iter(res))
last_row_id = first[0]
return last_row_id
return 0
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __check_existing(self, row):
"""Check if row exists in table """ |
if self.__update_keys is not None:
key = tuple(row[key] for key in self.__update_keys)
if key in self.__bloom:
return True
self.__bloom.add(key)
return False
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __get_table(self, bucket):
"""Get table by bucket """ |
table_name = self.__mapper.convert_bucket(bucket)
if self.__dbschema:
table_name = '.'.join((self.__dbschema, table_name))
return self.__metadata.tables[table_name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_field_comment(field, separator=' - '):
""" Create SQL comment from field's title and description :param field: tableschema-py Field, with optional 'title' and 'description' values :param separator: :return: 'my_title - my_desc' 'my_title' 'my_description' '' """ |
title = field.descriptor.get('title') or ''
description = field.descriptor.get('description') or ''
return _get_comment(description, title, separator) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None):
"""Convert descriptor to SQL """ |
# Prepare
columns = []
indexes = []
fallbacks = []
constraints = []
column_mapping = {}
table_name = self.convert_bucket(bucket)
table_comment = _get_comment(descriptor.get('title', ''), descriptor.get('description', ''))
schema = tableschema.Schema(descriptor)
# Autoincrement
if autoincrement is not None:
columns.append(sa.Column(
autoincrement, sa.Integer, autoincrement=True, nullable=False))
# Fields
for field in schema.fields:
column_type = self.convert_type(field.type)
if not column_type:
column_type = sa.Text
fallbacks.append(field.name)
nullable = not field.required
table_comment = _get_field_comment(field)
unique = field.constraints.get('unique', False)
column = sa.Column(field.name, column_type, nullable=nullable, comment=table_comment,
unique=unique)
columns.append(column)
column_mapping[field.name] = column
# Primary key
pk = descriptor.get('primaryKey', None)
if pk is not None:
if isinstance(pk, six.string_types):
pk = [pk]
if autoincrement is not None:
if pk is not None:
pk = [autoincrement] + pk
else:
pk = [autoincrement]
if pk is not None:
constraint = sa.PrimaryKeyConstraint(*pk)
constraints.append(constraint)
# Foreign keys
if self.__dialect == 'postgresql':
fks = descriptor.get('foreignKeys', [])
for fk in fks:
fields = fk['fields']
resource = fk['reference']['resource']
foreign_fields = fk['reference']['fields']
if isinstance(fields, six.string_types):
fields = [fields]
if resource != '':
table_name = self.convert_bucket(resource)
if isinstance(foreign_fields, six.string_types):
foreign_fields = [foreign_fields]
composer = lambda field: '.'.join([table_name, field])
foreign_fields = list(map(composer, foreign_fields))
constraint = sa.ForeignKeyConstraint(fields, foreign_fields)
constraints.append(constraint)
# Indexes
if self.__dialect == 'postgresql':
for index, index_definition in enumerate(index_fields):
name = table_name + '_ix%03d' % index
index_columns = [column_mapping[field] for field in index_definition]
indexes.append(sa.Index(name, *index_columns))
return columns, constraints, indexes, fallbacks, table_comment |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL """ |
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_type(self, type):
"""Convert type to SQL """ |
# Default dialect
mapping = {
'any': sa.Text,
'array': None,
'boolean': sa.Boolean,
'date': sa.Date,
'datetime': sa.DateTime,
'duration': None,
'geojson': None,
'geopoint': None,
'integer': sa.Integer,
'number': sa.Float,
'object': None,
'string': sa.Text,
'time': sa.Time,
'year': sa.Integer,
'yearmonth': None,
}
# Postgresql dialect
if self.__dialect == 'postgresql':
mapping.update({
'array': JSONB,
'geojson': JSONB,
'number': sa.Numeric,
'object': JSONB,
})
# Not supported type
if type not in mapping:
message = 'Field type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return mapping[type] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def restore_bucket(self, table_name):
"""Restore bucket from SQL """ |
if table_name.startswith(self.__prefix):
return table_name.replace(self.__prefix, '', 1)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None):
"""Restore descriptor from SQL """ |
# Fields
fields = []
for column in columns:
if column.name == autoincrement_column:
continue
field_type = self.restore_type(column.type)
field = {'name': column.name, 'type': field_type}
if not column.nullable:
field['constraints'] = {'required': True}
fields.append(field)
# Primary key
pk = []
for constraint in constraints:
if isinstance(constraint, sa.PrimaryKeyConstraint):
for column in constraint.columns:
if column.name == autoincrement_column:
continue
pk.append(column.name)
# Foreign keys
fks = []
if self.__dialect == 'postgresql':
for constraint in constraints:
if isinstance(constraint, sa.ForeignKeyConstraint):
resource = ''
own_fields = []
foreign_fields = []
for element in constraint.elements:
own_fields.append(element.parent.name)
if element.column.table.name != table_name:
resource = self.restore_bucket(element.column.table.name)
foreign_fields.append(element.column.name)
if len(own_fields) == len(foreign_fields) == 1:
own_fields = own_fields.pop()
foreign_fields = foreign_fields.pop()
fks.append({
'fields': own_fields,
'reference': {'resource': resource, 'fields': foreign_fields},
})
# Desscriptor
descriptor = {}
descriptor['fields'] = fields
if len(pk) > 0:
if len(pk) == 1:
pk = pk.pop()
descriptor['primaryKey'] = pk
if len(fks) > 0:
descriptor['foreignKeys'] = fks
return descriptor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def restore_row(self, row, schema):
"""Restore row from SQL """ |
row = list(row)
for index, field in enumerate(schema.fields):
if self.__dialect == 'postgresql':
if field.type in ['array', 'object']:
continue
row[index] = field.cast_value(row[index])
return row |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def restore_type(self, type):
"""Restore type from SQL """ |
# All dialects
mapping = {
ARRAY: 'array',
sa.Boolean: 'boolean',
sa.Date: 'date',
sa.DateTime: 'datetime',
sa.Float: 'number',
sa.Integer: 'integer',
JSONB: 'object',
JSON: 'object',
sa.Numeric: 'number',
sa.Text: 'string',
sa.Time: 'time',
sa.VARCHAR: 'string',
UUID: 'string',
}
# Get field type
field_type = None
for key, value in mapping.items():
if isinstance(type, key):
field_type = value
# Not supported
if field_type is None:
message = 'Type "%s" is not supported'
raise tableschema.exceptions.StorageError(message % type)
return field_type |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open_hierarchy(self, path, relative_to_object_id, object_id, create_file_type=0):
""" CreateFileType 0 - Creates no new object. 1 - Creates a notebook with the specified name at the specified location. 2 - Creates a section group with the specified name at the specified location. 3 - Creates a section with the specified name at the specified location. """ |
try:
return(self.process.OpenHierarchy(path, relative_to_object_id, "", create_file_type))
except Exception as e:
print(e)
print("Could not Open Hierarchy") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_new_page (self, section_id, new_page_style=0):
""" NewPageStyle 0 - Create a Page that has Default Page Style 1 - Create a blank page with no title 2 - Createa blank page that has no title """ |
try:
self.process.CreateNewPage(section_id, "", new_page_style)
except Exception as e:
print(e)
print("Unable to create the page") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_page_content(self, page_id, page_info=0):
""" PageInfo 0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass. 1 - Returns page content with no selection markup, but with all binary data. 2 - Returns page content with selection markup, but no binary data. 3 - Returns page content with selection markup and all binary data. """ |
try:
return(self.process.GetPageContent(page_id, "", page_info))
except Exception as e:
print(e)
print("Could not get Page Content") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_special_location(self, special_location=0):
""" SpecialLocation 0 - Gets the path to the Backup Folders folder location. 1 - Gets the path to the Unfiled Notes folder location. 2 - Gets the path to the Default Notebook folder location. """ |
try:
return(self.process.GetSpecialLocation(special_location))
except Exception as e:
print(e)
print("Could not retreive special location") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def memory():
"""Determine memory specifications of the machine. Returns ------- mem_info : dictonary Holds the current values for the total, free and used memory of the system. """ |
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N', determine the number of rows or columns that can fit into memory. Parameters N : int The size of one of the dimensions of a two-dimensional array. n : int The number of arrays of size 'N' times 'chunk_size' that can fit in memory. Returns ------- chunk_size : int The size of the dimension orthogonal to the one of size 'N'. """ |
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels, the weights being proportional to the fraction of known labels. Parameters cluster_runs : array of shape (n_partitions, n_samples) Each row of this matrix is such that the i-th entry corresponds to the cluster ID to which the i-th sample of the data-set has been classified by this particular clustering. Samples not selected for clustering in a given round are are tagged by an NaN. cluster_ensemble : array of shape (n_samples,), optional (default = None) The identity of the cluster to which each sample of the whole data-set belong to according to consensus clustering. verbose : Boolean, optional (default = False) Specifies if status messages will be displayed on the standard output. Returns ------- unnamed variable : float The weighted average of the mutual information between the consensus clustering and the many runs from the ensemble of independent clusterings on subsamples of the data-set. """ |
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format. Parameters cluster_run : array of shape (n_samples,) A vector of cluster IDs for each of the samples selected for a given round of clustering. The samples not selected are labelled with NaN. verbose : Boolean, optional (default = False) Specifies if status messages will be displayed on the standard output. Returns ------- cluster_run : array of shape (n_samples,) The input vector is modified in place, such that invalid values are either rejected or altered. In particular, the labelling of cluster IDs starts at zero and increases by 1 without any gap left. """ |
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping. Given that this function is herein always called after passing a vector to the function checkcl, one_to_max relies on the assumption that cluster_run does not contain any NaN entries. Parameters array_in : a list or one-dimensional array The list of cluster IDs to be processed. Returns ------- result : one-dimensional array A massaged version of the input vector of cluster identities. """ |
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring appropriate changes if applicable. Parameters similarities : array of shape (n_samples, n_samples) A matrix of pairwise similarities between (sub)-samples of the data-set. verbose : Boolean, optional (default = False) Alerts of any issue with the similarities matrix provided and of any step possibly taken to remediate such problem. """ |
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph passed by CSPA. Parameters hdf5_file_name : string or file handle N_clusters_max : int Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the cluster to which each sample has been assigned as a result of the CSPA heuristics for consensus clustering. Reference --------- G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for Partitioning Irregular Graphs" In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999. """ |
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for hypergraph partitioning required for HGPA. Parameters hdf5_file_name : file handle or string N_clusters_max : int w : array, optional (default = None) Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the cluster to which each sample has been assigned as a result of the HGPA approximation algorithm for consensus clustering. Reference --------- G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph partitioning: applications in VLSI domain" In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, Vol. 7, No. 1, pp. 69-79, 1999. """ |
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def obfuscate(p, action):
"""Obfuscate the auth details to avoid easy snatching. It's best to use a throw away account for these alerts to avoid having your authentication put at risk by storing it locally. """ |
key = "ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH"
s = list()
if action == 'store':
if PY2:
for i in range(len(p)):
kc = key[i % len(key)]
ec = chr((ord(p[i]) + ord(kc)) % 256)
s.append(ec)
return base64.urlsafe_b64encode("".join(s))
else:
return base64.urlsafe_b64encode(p.encode()).decode()
else:
if PY2:
e = base64.urlsafe_b64decode(p)
for i in range(len(e)):
kc = key[i % len(key)]
dc = chr((256 + ord(e[i]) - ord(kc)) % 256)
s.append(dc)
return "".join(s)
else:
e = base64.urlsafe_b64decode(p)
return e.decode() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _config_bootstrap(self):
"""Go through and establish the defaults on the file system. The approach here was stolen from the CLI tool provided with the module. Idea being that the user should not always need to provide a username and password in order to run the script. If the configuration file is already present with valid data, then lets use it. """ |
if not os.path.exists(CONFIG_PATH):
os.makedirs(CONFIG_PATH)
if not os.path.exists(CONFIG_FILE):
json.dump(CONFIG_DEFAULTS, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
config = CONFIG_DEFAULTS
if self._email and self._password:
# Save the configuration locally to pull later on
config['email'] = self._email
config['password'] = str(obfuscate(self._password, 'store'))
self._log.debug("Caching authentication in config file")
json.dump(config, open(CONFIG_FILE, 'w'), indent=4,
separators=(',', ': '))
else:
# Load the config file and override the class
config = json.load(open(CONFIG_FILE))
if config.get('py2', PY2) != PY2:
raise Exception("Python versions have changed. Please run `setup` again to reconfigure the client.")
if config['email'] and config['password']:
self._email = config['email']
self._password = obfuscate(str(config['password']), 'fetch')
self._log.debug("Loaded authentication from config file") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _session_check(self):
"""Attempt to authenticate the user through a session file. This process is done to avoid having to authenticate the user every single time. It uses a session file that is saved when a valid session is captured and then reused. Because sessions can expire, we need to test the session prior to calling the user authenticated. Right now that is done with a test string found in an unauthenticated session. This approach is not an ideal method, but it works. """ |
if not os.path.exists(SESSION_FILE):
self._log.debug("Session file does not exist")
return False
with open(SESSION_FILE, 'rb') as f:
cookies = requests.utils.cookiejar_from_dict(pickle.load(f))
self._session.cookies = cookies
self._log.debug("Loaded cookies from session file")
response = self._session.get(url=self.TEST_URL, headers=self.HEADERS)
if self.TEST_KEY in str(response.content):
self._log.debug("Session file appears invalid")
return False
self._is_authenticated = True
self._process_state()
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_log_level(self, level):
"""Override the default log level of the class""" |
if level == 'info':
level = logging.INFO
if level == 'debug':
level = logging.DEBUG
if level == 'error':
level = logging.ERROR
self._log.setLevel(level) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _process_state(self):
"""Process the application state configuration. Google Alerts manages the account information and alert data through some custom state configuration. Not all values have been completely enumerated. """ |
self._log.debug("Capturing state from the request")
response = self._session.get(url=self.ALERTS_URL, headers=self.HEADERS)
soup = BeautifulSoup(response.content, "html.parser")
for i in soup.findAll('script'):
if i.text.find('window.STATE') == -1:
continue
state = json.loads(i.text[15:-1])
if state != "":
self._state = state
self._log.debug("State value set: %s" % self._state)
return self._state |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self):
"""Authenticate the user and setup our state.""" |
valid = self._session_check()
if self._is_authenticated and valid:
self._log.debug("[!] User has already authenticated")
return
init = self._session.get(url=self.LOGIN_URL, headers=self.HEADERS)
soup = BeautifulSoup(init.content, "html.parser")
soup_login = soup.find('form').find_all('input')
post_data = dict()
for u in soup_login:
if u.has_attr('name') and u.has_attr('value'):
post_data[u['name']] = u['value']
post_data['Email'] = self._email
post_data['Passwd'] = self._password
response = self._session.post(url=self.AUTH_URL, data=post_data,
headers=self.HEADERS)
if self.CAPTCHA_KEY in str(response.content):
raise AccountCaptcha('Google is forcing a CAPTCHA. To get around this issue, run the google-alerts with the seed option to open an interactive authentication session. Once authenticated, this module will cache your session and load that in the future')
cookies = [x.name for x in response.cookies]
if 'SIDCC' not in cookies:
raise InvalidCredentials("Email or password was incorrect.")
with open(SESSION_FILE, 'wb') as f:
cookies = requests.utils.dict_from_cookiejar(self._session.cookies)
pickle.dump(cookies, f, protocol=2)
self._log.debug("Saved session to disk for future reference")
self._log.debug("User successfully authenticated")
self._is_authenticated = True
self._process_state()
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list(self, term=None):
"""List alerts configured for the account.""" |
if not self._state:
raise InvalidState("State was not properly obtained from the app")
self._process_state()
if not self._state[1]:
self._log.info("No monitors have been created yet.")
return list()
monitors = list()
for monitor in self._state[1][1]:
obj = dict()
obj['monitor_id'] = monitor[1]
obj['user_id'] = monitor[-1]
obj['term'] = monitor[2][3][1]
if term and obj['term'] != term:
continue
obj['language'] = monitor[2][3][3][1]
obj['region'] = monitor[2][3][3][2]
obj['delivery'] = self.DELIVERY[monitor[2][6][0][1]]
obj['match_type'] = self.MONITOR_MATCH_TYPE[monitor[2][5]]
if obj['delivery'] == 'MAIL':
obj['alert_frequency'] = self.ALERT_FREQ[monitor[2][6][0][4]]
obj['email_address'] = monitor[2][6][0][2]
else:
rss_id = monitor[2][6][0][11]
url = "https://google.com/alerts/feeds/{uid}/{fid}"
obj['rss_link'] = url.format(uid=obj['user_id'], fid=rss_id)
monitors.append(obj)
return monitors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, monitor_id):
"""Delete a monitor by ID.""" |
if not self._state:
raise InvalidState("State was not properly obtained from the app")
monitors = self.list() # Get the latest set of monitors
bit = None
for monitor in monitors:
if monitor_id != monitor['monitor_id']:
continue
bit = monitor['monitor_id']
if not bit:
raise MonitorNotFound("No monitor was found with that term.")
url = self.ALERTS_DELETE_URL.format(requestX=self._state[3])
self._log.debug("Deleting alert using: %s" % url)
payload = [None, monitor_id]
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to delete by ID: %s"
% response.content)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_view(self, request, view_func, view_args, view_kwargs):
""" Collect data on Class-Based Views """ |
# Purge data in view method cache
# Python 3's keys() method returns an iterator, so force evaluation before iterating.
view_keys = list(VIEW_METHOD_DATA.keys())
for key in view_keys:
del VIEW_METHOD_DATA[key]
self.view_data = {}
try:
cbv = view_func.view_class
except AttributeError:
cbv = False
if cbv:
self.view_data['cbv'] = True
klass = view_func.view_class
self.view_data['bases'] = [base.__name__ for base in inspect.getmro(klass)]
# Inject with drugz
for member in inspect.getmembers(view_func.view_class):
# Check that we are interested in capturing data for this method
# and ensure that a decorated method is not decorated multiple times.
if member[0] in VIEW_METHOD_WHITEIST and member[0] not in PATCHED_METHODS[klass]:
decorate_method(klass, member[0])
PATCHED_METHODS[klass].append(member[0]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_response(self, request, response):
"""Let's handle old-style response processing here, as usual.""" |
# For debug only.
if not settings.DEBUG:
return response
# Check for responses where the data can't be inserted.
content_encoding = response.get('Content-Encoding', '')
content_type = response.get('Content-Type', '').split(';')[0]
if any((getattr(response, 'streaming', False),
'gzip' in content_encoding,
content_type not in _HTML_TYPES)):
return response
content = force_text(response.content, encoding=settings.DEFAULT_CHARSET)
pattern = re.escape('</body>')
bits = re.split(pattern, content, flags=re.IGNORECASE)
if len(bits) > 1:
bits[-2] += debug_payload(request, response, self.view_data)
response.content = "</body>".join(bits)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_job_class(klass_str):
""" Return the job class """ |
mod_name, klass_name = klass_str.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
except ImportError as e:
logger.error("Error importing job module %s: '%s'", mod_name, e)
return
try:
klass = getattr(mod, klass_name)
except AttributeError:
logger.error("Module '%s' does not define a '%s' class", mod_name, klass_name)
return
return klass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def invalidate(self, *raw_args, **raw_kwargs):
""" Mark a cached item invalid and trigger an asynchronous job to refresh the cache """ |
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
item = self.cache.get(key)
if item is not None:
expiry, data = item
self.store(key, self.timeout(*args, **kwargs), data)
self.async_refresh(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, *raw_args, **raw_kwargs):
""" Remove an item from the cache """ |
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
item = self.cache.get(key)
if item is not None:
self.cache.delete(key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, *raw_args, **raw_kwargs):
""" Manually set the cache value with its appropriate expiry. """ |
if self.set_data_kwarg in raw_kwargs:
data = raw_kwargs.pop(self.set_data_kwarg)
else:
raw_args = list(raw_args)
data = raw_args.pop()
args = self.prepare_args(*raw_args)
kwargs = self.prepare_kwargs(**raw_kwargs)
key = self.key(*args, **kwargs)
expiry = self.expiry(*args, **kwargs)
logger.debug("Setting %s cache with key '%s', args '%r', kwargs '%r', expiry '%r'",
self.class_path, key, args, kwargs, expiry)
self.store(key, expiry, data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store(self, key, expiry, data):
""" Add a result to the cache :key: Cache key to use :expiry: The expiry timestamp after which the result is stale :data: The data to cache """ |
self.cache.set(key, (expiry, data), self.cache_ttl)
if getattr(settings, 'CACHEBACK_VERIFY_CACHE_WRITE', True):
# We verify that the item was cached correctly. This is to avoid a
# Memcache problem where some values aren't cached correctly
# without warning.
__, cached_data = self.cache.get(key, (None, None))
if data is not None and cached_data is None:
raise RuntimeError(
"Unable to save data of type %s to cache" % (
type(data))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def refresh(self, *args, **kwargs):
""" Fetch the result SYNCHRONOUSLY and populate the cache """ |
result = self.fetch(*args, **kwargs)
self.store(self.key(*args, **kwargs), self.expiry(*args, **kwargs), result)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def async_refresh(self, *args, **kwargs):
""" Trigger an asynchronous job to refresh the cache """ |
# We trigger the task with the class path to import as well as the
# (a) args and kwargs for instantiating the class
# (b) args and kwargs for calling the 'refresh' method
try:
enqueue_task(
dict(
klass_str=self.class_path,
obj_args=self.get_init_args(),
obj_kwargs=self.get_init_kwargs(),
call_args=args,
call_kwargs=kwargs
),
task_options=self.task_options
)
except Exception:
# Handle exceptions from talking to RabbitMQ - eg connection
# refused. When this happens, we try to run the task
# synchronously.
logger.error("Unable to trigger task asynchronously - failing "
"over to synchronous refresh", exc_info=True)
try:
return self.refresh(*args, **kwargs)
except Exception as e:
# Something went wrong while running the task
logger.error("Unable to refresh data synchronously: %s", e,
exc_info=True)
else:
logger.debug("Failover synchronous refresh completed successfully") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def should_stale_item_be_fetched_synchronously(self, delta, *args, **kwargs):
""" Return whether to refresh an item synchronously when it is found in the cache but stale """ |
if self.fetch_on_stale_threshold is None:
return False
return delta > (self.fetch_on_stale_threshold - self.lifetime) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def key(self, *args, **kwargs):
""" Return the cache key to use. If you're passing anything but primitive types to the ``get`` method, it's likely that you'll need to override this method. """ |
if not args and not kwargs:
return self.class_path
try:
if args and not kwargs:
return "%s:%s" % (self.class_path, self.hash(args))
# The line might break if your passed values are un-hashable. If
# it does, you need to override this method and implement your own
# key algorithm.
return "%s:%s:%s:%s" % (self.class_path,
self.hash(args),
self.hash([k for k in sorted(kwargs)]),
self.hash([kwargs[k] for k in sorted(kwargs)]))
except TypeError:
raise RuntimeError(
"Unable to generate cache key due to unhashable"
"args or kwargs - you need to implement your own"
"key generation method to avoid this problem") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hash(self, value):
""" Generate a hash of the given iterable. This is for use in a cache key. """ |
if is_iterable(value):
value = tuple(to_bytestring(v) for v in value)
return hashlib.md5(six.b(':').join(value)).hexdigest() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def perform_async_refresh(cls, klass_str, obj_args, obj_kwargs, call_args, call_kwargs):
""" Re-populate cache using the given job class. The job class is instantiated with the passed constructor args and the refresh method is called with the passed call args. That is:: data = klass(*obj_args, **obj_kwargs).refresh( *call_args, **call_kwargs) :klass_str: String repr of class (eg 'apps.twitter.jobs.FetchTweetsJob') :obj_args: Constructor args :obj_kwargs: Constructor kwargs :call_args: Refresh args :call_kwargs: Refresh kwargs """ |
klass = get_job_class(klass_str)
if klass is None:
logger.error("Unable to construct %s with args %r and kwargs %r",
klass_str, obj_args, obj_kwargs)
return
logger.info("Using %s with constructor args %r and kwargs %r",
klass_str, obj_args, obj_kwargs)
logger.info("Calling refresh with args %r and kwargs %r", call_args,
call_kwargs)
start = time.time()
try:
klass(*obj_args, **obj_kwargs).refresh(
*call_args, **call_kwargs)
except Exception as e:
logger.exception("Error running job: '%s'", e)
else:
duration = time.time() - start
logger.info("Refreshed cache in %.6f seconds", duration) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cacheback(lifetime=None, fetch_on_miss=None, cache_alias=None, job_class=None, task_options=None, **job_class_kwargs):
""" Decorate function to cache its return value. :lifetime: How long to cache items for :fetch_on_miss: Whether to perform a synchronous fetch when no cached result is found :cache_alias: The Django cache alias to store the result into. :job_class: The class to use for running the cache refresh job. Defaults using the FunctionJob. :job_class_kwargs: Any extra kwargs to pass to job_class constructor. Useful with custom job_class implementations. """ |
if job_class is None:
job_class = FunctionJob
job = job_class(lifetime=lifetime, fetch_on_miss=fetch_on_miss,
cache_alias=cache_alias, task_options=task_options,
**job_class_kwargs)
def _wrapper(fn):
# using available_attrs to work around http://bugs.python.org/issue3445
@wraps(fn, assigned=available_attrs(fn))
def __wrapper(*args, **kwargs):
return job.get(fn, *args, **kwargs)
# Assign reference to unwrapped function so that we can access it
# later without descending into infinite regress.
__wrapper.fn = fn
# Assign reference to job so we can use the full Job API
__wrapper.job = job
return __wrapper
return _wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def angle(v1, v2):
"""Return the angle in radians between vectors 'v1' and 'v2'.""" |
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keep_high_angle(vertices, min_angle_deg):
"""Keep vertices with angles higher then given minimum.""" |
accepted = []
v = vertices
v1 = v[1] - v[0]
accepted.append((v[0][0], v[0][1]))
for i in range(1, len(v) - 2):
v2 = v[i + 1] - v[i - 1]
diff_angle = np.fabs(angle(v1, v2) * 180.0 / np.pi)
if diff_angle > min_angle_deg:
accepted.append((v[i][0], v[i][1]))
v1 = v[i] - v[i - 1]
accepted.append((v[-1][0], v[-1][1]))
return np.array(accepted, dtype=vertices.dtype) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_contourf_properties(stroke_width, fcolor, fill_opacity, contour_levels, contourf_idx, unit):
"""Set property values for Polygon.""" |
return {
"stroke": fcolor,
"stroke-width": stroke_width,
"stroke-opacity": 1,
"fill": fcolor,
"fill-opacity": fill_opacity,
"title": "%.2f" % contour_levels[contourf_idx] + ' ' + unit
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contour_to_geojson(contour, geojson_filepath=None, min_angle_deg=None, ndigits=5, unit='', stroke_width=1, geojson_properties=None, strdump=False, serialize=True):
"""Transform matplotlib.contour to geojson.""" |
collections = contour.collections
contour_index = 0
line_features = []
for collection in collections:
color = collection.get_edgecolor()
for path in collection.get_paths():
v = path.vertices
if len(v) < 3:
continue
coordinates = keep_high_angle(v, min_angle_deg)
if ndigits:
coordinates = np.around(coordinates, ndigits)
line = LineString(coordinates.tolist())
properties = {
"stroke-width": stroke_width,
"stroke": rgb2hex(color[0]),
"title": "%.2f" % contour.levels[contour_index] + ' ' + unit,
"level-value": float("%.6f" % contour.levels[contour_index]),
"level-index": contour_index
}
if geojson_properties:
properties.update(geojson_properties)
line_features.append(Feature(geometry=line, properties=properties))
contour_index += 1
feature_collection = FeatureCollection(line_features)
return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contourf_to_geojson_overlap(contourf, geojson_filepath=None, min_angle_deg=None, ndigits=5, unit='', stroke_width=1, fill_opacity=.9, geojson_properties=None, strdump=False, serialize=True):
"""Transform matplotlib.contourf to geojson with overlapping filled contours.""" |
polygon_features = []
contourf_idx = 0
for collection in contourf.collections:
color = collection.get_facecolor()
for path in collection.get_paths():
for coord in path.to_polygons():
if min_angle_deg:
coord = keep_high_angle(coord, min_angle_deg)
coord = np.around(coord, ndigits) if ndigits else coord
polygon = Polygon(coordinates=[coord.tolist()])
fcolor = rgb2hex(color[0])
properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit)
if geojson_properties:
properties.update(geojson_properties)
feature = Feature(geometry=polygon, properties=properties)
polygon_features.append(feature)
contourf_idx += 1
feature_collection = FeatureCollection(polygon_features)
return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contourf_to_geojson(contourf, geojson_filepath=None, min_angle_deg=None, ndigits=5, unit='', stroke_width=1, fill_opacity=.9, geojson_properties=None, strdump=False, serialize=True):
"""Transform matplotlib.contourf to geojson with MultiPolygons.""" |
polygon_features = []
mps = []
contourf_idx = 0
for coll in contourf.collections:
color = coll.get_facecolor()
for path in coll.get_paths():
for coord in path.to_polygons():
if min_angle_deg:
coord = keep_high_angle(coord, min_angle_deg)
coord = np.around(coord, ndigits) if ndigits else coord
op = MP(contourf.levels[contourf_idx], rgb2hex(color[0]))
if op in mps:
for i, k in enumerate(mps):
if k == op:
mps[i].add_coords(coord.tolist())
else:
op.add_coords(coord.tolist())
mps.append(op)
contourf_idx += 1
# starting here the multipolys will be extracted
contourf_idx = 0
for muli in mps:
polygon = muli.mpoly()
fcolor = muli.color
properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit)
if geojson_properties:
properties.update(geojson_properties)
feature = Feature(geometry=polygon, properties=properties)
polygon_features.append(feature)
contourf_idx += 1
feature_collection = FeatureCollection(polygon_features)
return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_authorize_callback(endpoint, provider_id):
"""Get a qualified URL for the provider to return to upon authorization param: endpoint: Absolute path to append to the application's host """ |
endpoint_prefix = config_value('BLUEPRINT_NAME')
url = url_for(endpoint_prefix + '.' + endpoint, provider_id=provider_id)
return request.url_root[:-1] + url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def login(provider_id):
"""Starts the provider login OAuth flow""" |
provider = get_provider_or_404(provider_id)
callback_url = get_authorize_callback('login', provider_id)
post_login = request.form.get('next', get_post_login_redirect())
session[config_value('POST_OAUTH_LOGIN_SESSION_KEY')] = post_login
return provider.authorize(callback_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(provider_id):
"""Starts the provider connection OAuth flow""" |
provider = get_provider_or_404(provider_id)
callback_url = get_authorize_callback('connect', provider_id)
allow_view = get_url(config_value('CONNECT_ALLOW_VIEW'))
pc = request.form.get('next', allow_view)
session[config_value('POST_OAUTH_CONNECT_SESSION_KEY')] = pc
return provider.authorize(callback_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_all_connections(provider_id):
"""Remove all connections for the authenticated user to the specified provider """ |
provider = get_provider_or_404(provider_id)
ctx = dict(provider=provider.name, user=current_user)
deleted = _datastore.delete_connections(user_id=current_user.get_id(),
provider_id=provider_id)
if deleted:
after_this_request(_commit)
msg = ('All connections to %s removed' % provider.name, 'info')
connection_removed.send(current_app._get_current_object(),
user=current_user._get_current_object(),
provider_id=provider_id)
else:
msg = ('Unable to remove connection to %(provider)s' % ctx, 'error')
do_flash(*msg)
return redirect(request.referrer) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_connection(provider_id, provider_user_id):
"""Remove a specific connection for the authenticated user to the specified provider """ |
provider = get_provider_or_404(provider_id)
ctx = dict(provider=provider.name, user=current_user,
provider_user_id=provider_user_id)
deleted = _datastore.delete_connection(user_id=current_user.get_id(),
provider_id=provider_id,
provider_user_id=provider_user_id)
if deleted:
after_this_request(_commit)
msg = ('Connection to %(provider)s removed' % ctx, 'info')
connection_removed.send(current_app._get_current_object(),
user=current_user._get_current_object(),
provider_id=provider_id)
else:
msg = ('Unabled to remove connection to %(provider)s' % ctx, 'error')
do_flash(*msg)
return redirect(request.referrer or get_post_login_redirect()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect_handler(cv, provider):
"""Shared method to handle the connection process :param connection_values: A dictionary containing the connection values :param provider_id: The provider ID the connection shoudl be made to """ |
cv.setdefault('user_id', current_user.get_id())
connection = _datastore.find_connection(
provider_id=cv['provider_id'], provider_user_id=cv['provider_user_id'])
if connection is None:
after_this_request(_commit)
connection = _datastore.create_connection(**cv)
msg = ('Connection established to %s' % provider.name, 'success')
connection_created.send(current_app._get_current_object(),
user=current_user._get_current_object(),
connection=connection)
else:
msg = ('A connection is already established with %s '
'to your account' % provider.name, 'notice')
connection_failed.send(current_app._get_current_object(),
user=current_user._get_current_object())
redirect_url = session.pop(config_value('POST_OAUTH_CONNECT_SESSION_KEY'),
get_url(config_value('CONNECT_ALLOW_VIEW')))
do_flash(*msg)
return redirect(redirect_url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def login_handler(response, provider, query):
"""Shared method to handle the signin process""" |
connection = _datastore.find_connection(**query)
if connection:
after_this_request(_commit)
token_pair = get_token_pair_from_oauth_response(provider, response)
if (token_pair['access_token'] != connection.access_token or
token_pair['secret'] != connection.secret):
connection.access_token = token_pair['access_token']
connection.secret = token_pair['secret']
_datastore.put(connection)
user = connection.user
login_user(user)
key = _social.post_oauth_login_session_key
redirect_url = session.pop(key, get_post_login_redirect())
login_completed.send(current_app._get_current_object(),
provider=provider, user=user)
return redirect(redirect_url)
login_failed.send(current_app._get_current_object(),
provider=provider,
oauth_response=response)
next = get_url(_security.login_manager.login_view)
msg = '%s account not associated with an existing user' % provider.name
do_flash(msg, 'error')
return redirect(next) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_app(self, app, datastore=None):
"""Initialize the application with the Social extension :param app: The Flask application :param datastore: Connection datastore instance """ |
datastore = datastore or self.datastore
for key, value in default_config.items():
app.config.setdefault(key, value)
providers = dict()
for key, config in app.config.items():
if not key.startswith('SOCIAL_') or config is None or key in default_config:
continue
suffix = key.lower().replace('social_', '')
default_module_name = 'flask_social.providers.%s' % suffix
module_name = config.get('module', default_module_name)
module = import_module(module_name)
config = update_recursive(module.config, config)
providers[config['id']] = OAuthRemoteApp(**config)
providers[config['id']].tokengetter(_get_token)
state = _get_state(app, datastore, providers)
app.register_blueprint(create_blueprint(state, __name__))
app.extensions['social'] = state
return state |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def postman(host, port=587, auth=(None, None), force_tls=False, options=None):
""" Creates a Postman object with TLS and Auth middleware. TLS is placed before authentication because usually authentication happens and is accepted only after TLS is enabled. :param auth: Tuple of (username, password) to be used to ``login`` to the server. :param force_tls: Whether TLS should be forced. :param options: Dictionary of keyword arguments to be used when the SMTP class is called. """ |
return Postman(
host=host,
port=port,
middlewares=[
middleware.tls(force=force_tls),
middleware.auth(*auth),
],
**options
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mime(self):
""" Returns the finalised mime object, after applying the internal headers. Usually this is not to be overriden. """ |
mime = self.mime_object()
self.headers.prepare(mime)
return mime |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_existing_model(model_name):
""" Try to find existing model class named `model_name`. :param model_name: String name of the model class. """ |
try:
model_cls = engine.get_document_cls(model_name)
log.debug('Model `{}` already exists. Using existing one'.format(
model_name))
return model_cls
except ValueError:
log.debug('Model `{}` does not exist'.format(model_name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare_relationship(config, model_name, raml_resource):
""" Create referenced model if it doesn't exist. When preparing a relationship, we check to see if the model that will be referenced already exists. If not, it is created so that it will be possible to use it in a relationship. Thus the first usage of this model in RAML file must provide its schema in POST method resource body schema. :param model_name: Name of model which should be generated. :param raml_resource: Instance of ramlfications.raml.ResourceNode for which :model_name: will be defined. """ |
if get_existing_model(model_name) is None:
plural_route = '/' + pluralize(model_name.lower())
route = '/' + model_name.lower()
for res in raml_resource.root.resources:
if res.method.upper() != 'POST':
continue
if res.path.endswith(plural_route) or res.path.endswith(route):
break
else:
raise ValueError('Model `{}` used in relationship is not '
'defined'.format(model_name))
setup_data_model(config, res, model_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_model_cls(config, schema, model_name, raml_resource, es_based=True):
""" Generate model class. Engine DB field types are determined using `type_fields` and only those types may be used. :param schema: Model schema dict parsed from RAML. :param model_name: String that is used as new model's name. :param raml_resource: Instance of ramlfications.raml.ResourceNode. :param es_based: Boolean indicating if generated model should be a subclass of Elasticsearch-based document class or not. It True, ESBaseDocument is used; BaseDocument is used otherwise. Defaults to True. """ |
from nefertari.authentication.models import AuthModelMethodsMixin
base_cls = engine.ESBaseDocument if es_based else engine.BaseDocument
model_name = str(model_name)
metaclass = type(base_cls)
auth_model = schema.get('_auth_model', False)
bases = []
if config.registry.database_acls:
from nefertari_guards import engine as guards_engine
bases.append(guards_engine.DocumentACLMixin)
if auth_model:
bases.append(AuthModelMethodsMixin)
bases.append(base_cls)
attrs = {
'__tablename__': model_name.lower(),
'_public_fields': schema.get('_public_fields') or [],
'_auth_fields': schema.get('_auth_fields') or [],
'_hidden_fields': schema.get('_hidden_fields') or [],
'_nested_relationships': schema.get('_nested_relationships') or [],
}
if '_nesting_depth' in schema:
attrs['_nesting_depth'] = schema.get('_nesting_depth')
# Generate fields from properties
properties = schema.get('properties', {})
for field_name, props in properties.items():
if field_name in attrs:
continue
db_settings = props.get('_db_settings')
if db_settings is None:
continue
field_kwargs = db_settings.copy()
field_kwargs['required'] = bool(field_kwargs.get('required'))
for default_attr_key in ('default', 'onupdate'):
value = field_kwargs.get(default_attr_key)
if is_callable_tag(value):
field_kwargs[default_attr_key] = resolve_to_callable(value)
type_name = (
field_kwargs.pop('type', 'string') or 'string').lower()
if type_name not in type_fields:
raise ValueError('Unknown type: {}'.format(type_name))
field_cls = type_fields[type_name]
if field_cls is engine.Relationship:
prepare_relationship(
config, field_kwargs['document'],
raml_resource)
if field_cls is engine.ForeignKeyField:
key = 'ref_column_type'
field_kwargs[key] = type_fields[field_kwargs[key]]
if field_cls is engine.ListField:
key = 'item_type'
field_kwargs[key] = type_fields[field_kwargs[key]]
attrs[field_name] = field_cls(**field_kwargs)
# Update model definition with methods and variables defined in registry
attrs.update(registry.mget(model_name))
# Generate new model class
model_cls = metaclass(model_name, tuple(bases), attrs)
setup_model_event_subscribers(config, model_cls, schema)
setup_fields_processors(config, model_cls, schema)
return model_cls, auth_model |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_model_generation(config, raml_resource):
""" Generates model name and runs `setup_data_model` to get or generate actual model class. :param raml_resource: Instance of ramlfications.raml.ResourceNode. """ |
model_name = generate_model_name(raml_resource)
try:
return setup_data_model(config, raml_resource, model_name)
except ValueError as ex:
raise ValueError('{}: {}'.format(model_name, str(ex))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_model_event_subscribers(config, model_cls, schema):
""" Set up model event subscribers. :param config: Pyramid Configurator instance. :param model_cls: Model class for which handlers should be connected. :param schema: Dict of model JSON schema. """ |
events_map = get_events_map()
model_events = schema.get('_event_handlers', {})
event_kwargs = {'model': model_cls}
for event_tag, subscribers in model_events.items():
type_, action = event_tag.split('_')
event_objects = events_map[type_][action]
if not isinstance(event_objects, list):
event_objects = [event_objects]
for sub_name in subscribers:
sub_func = resolve_to_callable(sub_name)
config.subscribe_to_events(
sub_func, event_objects, **event_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_fields_processors(config, model_cls, schema):
""" Set up model fields' processors. :param config: Pyramid Configurator instance. :param model_cls: Model class for field of which processors should be set up. :param schema: Dict of model JSON schema. """ |
properties = schema.get('properties', {})
for field_name, props in properties.items():
if not props:
continue
processors = props.get('_processors')
backref_processors = props.get('_backref_processors')
if processors:
processors = [resolve_to_callable(val) for val in processors]
setup_kwargs = {'model': model_cls, 'field': field_name}
config.add_field_processors(processors, **setup_kwargs)
if backref_processors:
db_settings = props.get('_db_settings', {})
is_relationship = db_settings.get('type') == 'relationship'
document = db_settings.get('document')
backref_name = db_settings.get('backref_name')
if not (is_relationship and document and backref_name):
continue
backref_processors = [
resolve_to_callable(val) for val in backref_processors]
setup_kwargs = {
'model': engine.get_document_cls(document),
'field': backref_name
}
config.add_field_processors(
backref_processors, **setup_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup_ticket_policy(config, params):
""" Setup Pyramid AuthTktAuthenticationPolicy. Notes: * Initial `secret` params value is considered to be a name of config param that represents a cookie name. * `auth_model.get_groups_by_userid` is used as a `callback`. * Also connects basic routes to perform authentication actions. :param config: Pyramid Configurator instance. :param params: Nefertari dictset which contains security scheme `settings`. """ |
from nefertari.authentication.views import (
TicketAuthRegisterView, TicketAuthLoginView,
TicketAuthLogoutView)
log.info('Configuring Pyramid Ticket Authn policy')
if 'secret' not in params:
raise ValueError(
'Missing required security scheme settings: secret')
params['secret'] = config.registry.settings[params['secret']]
auth_model = config.registry.auth_model
params['callback'] = auth_model.get_groups_by_userid
config.add_request_method(
auth_model.get_authuser_by_userid, 'user', reify=True)
policy = AuthTktAuthenticationPolicy(**params)
RegisterViewBase = TicketAuthRegisterView
if config.registry.database_acls:
class RegisterViewBase(ACLAssignRegisterMixin,
TicketAuthRegisterView):
pass
class RamsesTicketAuthRegisterView(RegisterViewBase):
Model = config.registry.auth_model
class RamsesTicketAuthLoginView(TicketAuthLoginView):
Model = config.registry.auth_model
class RamsesTicketAuthLogoutView(TicketAuthLogoutView):
Model = config.registry.auth_model
common_kw = {
'prefix': 'auth',
'factory': 'nefertari.acl.AuthenticationACL',
}
root = config.get_root_resource()
root.add('register', view=RamsesTicketAuthRegisterView, **common_kw)
root.add('login', view=RamsesTicketAuthLoginView, **common_kw)
root.add('logout', view=RamsesTicketAuthLogoutView, **common_kw)
return policy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup_apikey_policy(config, params):
""" Setup `nefertari.ApiKeyAuthenticationPolicy`. Notes: * User may provide model name in :params['user_model']: do define the name of the user model. * `auth_model.get_groups_by_token` is used to perform username and token check * `auth_model.get_token_credentials` is used to get username and token from userid * Also connects basic routes to perform authentication actions. Arguments: :config: Pyramid Configurator instance. :params: Nefertari dictset which contains security scheme `settings`. """ |
from nefertari.authentication.views import (
TokenAuthRegisterView, TokenAuthClaimView,
TokenAuthResetView)
log.info('Configuring ApiKey Authn policy')
auth_model = config.registry.auth_model
params['check'] = auth_model.get_groups_by_token
params['credentials_callback'] = auth_model.get_token_credentials
params['user_model'] = auth_model
config.add_request_method(
auth_model.get_authuser_by_name, 'user', reify=True)
policy = ApiKeyAuthenticationPolicy(**params)
RegisterViewBase = TokenAuthRegisterView
if config.registry.database_acls:
class RegisterViewBase(ACLAssignRegisterMixin,
TokenAuthRegisterView):
pass
class RamsesTokenAuthRegisterView(RegisterViewBase):
Model = auth_model
class RamsesTokenAuthClaimView(TokenAuthClaimView):
Model = auth_model
class RamsesTokenAuthResetView(TokenAuthResetView):
Model = auth_model
common_kw = {
'prefix': 'auth',
'factory': 'nefertari.acl.AuthenticationACL',
}
root = config.get_root_resource()
root.add('register', view=RamsesTokenAuthRegisterView, **common_kw)
root.add('token', view=RamsesTokenAuthClaimView, **common_kw)
root.add('reset_token', view=RamsesTokenAuthResetView, **common_kw)
return policy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_auth_policies(config, raml_root):
""" Setup authentication, authorization policies. Performs basic validation to check all the required values are present and performs authentication, authorization policies generation using generator functions from `AUTHENTICATION_POLICIES`. :param config: Pyramid Configurator instance. :param raml_root: Instance of ramlfications.raml.RootNode. """ |
log.info('Configuring auth policies')
secured_by_all = raml_root.secured_by or []
secured_by = [item for item in secured_by_all if item]
if not secured_by:
log.info('API is not secured. `secured_by` attribute '
'value missing.')
return
secured_by = secured_by[0]
schemes = {scheme.name: scheme
for scheme in raml_root.security_schemes}
if secured_by not in schemes:
raise ValueError(
'Undefined security scheme used in `secured_by`: {}'.format(
secured_by))
scheme = schemes[secured_by]
if scheme.type not in AUTHENTICATION_POLICIES:
raise ValueError('Unsupported security scheme type: {}'.format(
scheme.type))
# Setup Authentication policy
policy_generator = AUTHENTICATION_POLICIES[scheme.type]
params = dictset(scheme.settings or {})
authn_policy = policy_generator(config, params)
config.set_authentication_policy(authn_policy)
# Setup Authorization policy
authz_policy = ACLAuthorizationPolicy()
config.set_authorization_policy(authz_policy) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_authuser_model():
""" Define and return AuthUser model using nefertari base classes """ |
from nefertari.authentication.models import AuthUserMixin
from nefertari import engine
class AuthUser(AuthUserMixin, engine.BaseDocument):
__tablename__ = 'ramses_authuser'
return AuthUser |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_acl(config, model_cls, raml_resource, es_based=True):
""" Generate an ACL. Generated ACL class has a `item_model` attribute set to :model_cls:. ACLs used for collection and item access control are generated from a first security scheme with type `x-ACL`. If :raml_resource: has no x-ACL security schemes defined then ALLOW_ALL ACL is used. If the `collection` or `item` settings are empty, then ALLOW_ALL ACL is used. :param model_cls: Generated model class :param raml_resource: Instance of ramlfications.raml.ResourceNode for which ACL is being generated :param es_based: Boolean inidicating whether ACL should query ES or not when getting an object """ |
schemes = raml_resource.security_schemes or []
schemes = [sch for sch in schemes if sch.type == 'x-ACL']
if not schemes:
collection_acl = item_acl = []
log.debug('No ACL scheme applied. Using ACL: {}'.format(item_acl))
else:
sec_scheme = schemes[0]
log.debug('{} ACL scheme applied'.format(sec_scheme.name))
settings = sec_scheme.settings or {}
collection_acl = parse_acl(acl_string=settings.get('collection'))
item_acl = parse_acl(acl_string=settings.get('item'))
class GeneratedACLBase(object):
item_model = model_cls
def __init__(self, request, es_based=es_based):
super(GeneratedACLBase, self).__init__(request=request)
self.es_based = es_based
self._collection_acl = collection_acl
self._item_acl = item_acl
bases = [GeneratedACLBase]
if config.registry.database_acls:
from nefertari_guards.acl import DatabaseACLMixin as GuardsMixin
bases += [DatabaseACLMixin, GuardsMixin]
bases.append(BaseACL)
return type('GeneratedACL', tuple(bases), {}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getitem_es(self, key):
""" Override to support ACL filtering. To do so: passes `self.request` to `get_item` and uses `ACLFilterES`. """ |
from nefertari_guards.elasticsearch import ACLFilterES
es = ACLFilterES(self.item_model.__name__)
params = {
'id': key,
'request': self.request,
}
obj = es.get_item(**params)
obj.__acl__ = self.item_acl(obj)
obj.__parent__ = self
obj.__name__ = key
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_model_name(raml_resource):
""" Generate model name. :param raml_resource: Instance of ramlfications.raml.ResourceNode. """ |
resource_uri = get_resource_uri(raml_resource).strip('/')
resource_uri = re.sub('\W', ' ', resource_uri)
model_name = inflection.titleize(resource_uri)
return inflection.singularize(model_name).replace(' ', '') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resource_view_attrs(raml_resource, singular=False):
""" Generate view method names needed for `raml_resource` view. Collects HTTP method names from resource siblings and dynamic children if exist. Collected methods are then translated to `nefertari.view.BaseView` method names, each of which is used to process a particular HTTP method request. Maps of {HTTP_method: view_method} `collection_methods` and `item_methods` are used to convert collection and item methods respectively. :param raml_resource: Instance of ramlfications.raml.ResourceNode :param singular: Boolean indicating if resource is singular or not """ |
from .views import collection_methods, item_methods
# Singular resource doesn't have collection methods though
# it looks like a collection
if singular:
collection_methods = item_methods
siblings = get_resource_siblings(raml_resource)
http_methods = [sibl.method.lower() for sibl in siblings]
attrs = [collection_methods.get(method) for method in http_methods]
# Check if resource has dynamic child resource like collection/{id}
# If dynamic child resource exists, add its siblings' methods to attrs,
# as both resources are handled by a single view
children = get_resource_children(raml_resource)
http_submethods = [child.method.lower() for child in children
if is_dynamic_uri(child.path)]
attrs += [item_methods.get(method) for method in http_submethods]
return set(filter(bool, attrs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_events_map():
""" Prepare map of event subscribers. * Extends copies of BEFORE_EVENTS and AFTER_EVENTS maps with 'set' action. * Returns map of {before/after: {action: event class(es)}} """ |
from nefertari import events
set_keys = ('create', 'update', 'replace', 'update_many', 'register')
before_events = events.BEFORE_EVENTS.copy()
before_events['set'] = [before_events[key] for key in set_keys]
after_events = events.AFTER_EVENTS.copy()
after_events['set'] = [after_events[key] for key in set_keys]
return {
'before': before_events,
'after': after_events,
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def patch_view_model(view_cls, model_cls):
""" Patches view_cls.Model with model_cls. :param view_cls: View class "Model" param of which should be patched :param model_cls: Model class which should be used to patch view_cls.Model """ |
original_model = view_cls.Model
view_cls.Model = model_cls
try:
yield
finally:
view_cls.Model = original_model |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_route_name(resource_uri):
""" Get route name from RAML resource URI. :param resource_uri: String representing RAML resource URI. :returns string: String with route name, which is :resource_uri: stripped of non-word characters. """ |
resource_uri = resource_uri.strip('/')
resource_uri = re.sub('\W', '', resource_uri)
return resource_uri |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_resource(config, raml_resource, parent_resource):
""" Perform complete one resource configuration process This function generates: ACL, view, route, resource, database model for a given `raml_resource`. New nefertari resource is attached to `parent_resource` class which is an instance of `nefertari.resource.Resource`. Things to consider: * Top-level resources must be collection names. * No resources are explicitly created for dynamic (ending with '}') RAML resources as they are implicitly processed by parent collection resources. * Only part of resource path after last '/' is taken into account, thus each level of resource nesting should add one more path element. E.g. /stories -> /stories/{id} and not /stories -> /stories/mystories/{id}. Latter route will be generated at /stories/{id}. :param raml_resource: Instance of ramlfications.raml.ResourceNode. :param parent_resource: Parent nefertari resource object. """ |
from .models import get_existing_model
# Don't generate resources for dynamic routes as they are already
# generated by their parent
resource_uri = get_resource_uri(raml_resource)
if is_dynamic_uri(resource_uri):
if parent_resource.is_root:
raise Exception("Top-level resources can't be dynamic and must "
"represent collections instead")
return
route_name = get_route_name(resource_uri)
log.info('Configuring resource: `{}`. Parent: `{}`'.format(
route_name, parent_resource.uid or 'root'))
# Get DB model. If this is an attribute or singular resource,
# we don't need to get model
is_singular = singular_subresource(raml_resource, route_name)
is_attr_res = attr_subresource(raml_resource, route_name)
if not parent_resource.is_root and (is_attr_res or is_singular):
model_cls = parent_resource.view.Model
else:
model_name = generate_model_name(raml_resource)
model_cls = get_existing_model(model_name)
resource_kwargs = {}
# Generate ACL
log.info('Generating ACL for `{}`'.format(route_name))
resource_kwargs['factory'] = generate_acl(
config,
model_cls=model_cls,
raml_resource=raml_resource)
# Generate dynamic part name
if not is_singular:
resource_kwargs['id_name'] = dynamic_part_name(
raml_resource=raml_resource,
route_name=route_name,
pk_field=model_cls.pk_field())
# Generate REST view
log.info('Generating view for `{}`'.format(route_name))
view_attrs = resource_view_attrs(raml_resource, is_singular)
resource_kwargs['view'] = generate_rest_view(
config,
model_cls=model_cls,
attrs=view_attrs,
attr_view=is_attr_res,
singular=is_singular,
)
# In case of singular resource, model still needs to be generated,
# but we store it on a different view attribute
if is_singular:
model_name = generate_model_name(raml_resource)
view_cls = resource_kwargs['view']
view_cls._parent_model = view_cls.Model
view_cls.Model = get_existing_model(model_name)
# Create new nefertari resource
log.info('Creating new resource for `{}`'.format(route_name))
clean_uri = resource_uri.strip('/')
resource_args = (singularize(clean_uri),)
if not is_singular:
resource_args += (clean_uri,)
return parent_resource.add(*resource_args, **resource_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_server(raml_root, config):
""" Handle server generation process. :param raml_root: Instance of ramlfications.raml.RootNode. :param config: Pyramid Configurator instance. """ |
log.info('Server generation started')
if not raml_root.resources:
return
root_resource = config.get_root_resource()
generated_resources = {}
for raml_resource in raml_root.resources:
if raml_resource.path in generated_resources:
continue
# Get Nefertari parent resource
parent_resource = _get_nefertari_parent_resource(
raml_resource, generated_resources, root_resource)
# Get generated resource and store it
new_resource = generate_resource(
config, raml_resource, parent_resource)
if new_resource is not None:
generated_resources[raml_resource.path] = new_resource |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_rest_view(config, model_cls, attrs=None, es_based=True, attr_view=False, singular=False):
""" Generate REST view for a model class. :param model_cls: Generated DB model class. :param attr: List of strings that represent names of view methods, new generated view should support. Not supported methods are replaced with property that raises AttributeError to display MethodNotAllowed error. :param es_based: Boolean indicating if generated view should read from elasticsearch. If True - collection reads are performed from elasticsearch. Database is used for reads otherwise. Defaults to True. :param attr_view: Boolean indicating if ItemAttributeView should be used as a base class for generated view. :param singular: Boolean indicating if ItemSingularView should be used as a base class for generated view. """ |
valid_attrs = (list(collection_methods.values()) +
list(item_methods.values()))
missing_attrs = set(valid_attrs) - set(attrs)
if singular:
bases = [ItemSingularView]
elif attr_view:
bases = [ItemAttributeView]
elif es_based:
bases = [ESCollectionView]
else:
bases = [CollectionView]
if config.registry.database_acls:
from nefertari_guards.view import ACLFilterViewMixin
bases = [SetObjectACLMixin] + bases + [ACLFilterViewMixin]
bases.append(NefertariBaseView)
RESTView = type('RESTView', tuple(bases), {'Model': model_cls})
def _attr_error(*args, **kwargs):
raise AttributeError
for attr in missing_attrs:
setattr(RESTView, attr, property(_attr_error))
return RESTView |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_object_acl(self, obj):
""" Set object ACL on creation if not already present. """ |
if not obj._acl:
from nefertari_guards import engine as guards_engine
acl = self._factory(self.request).generate_item_acl(obj)
obj._acl = guards_engine.ACLField.stringify_acl(acl) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _location(self, obj):
""" Get location of the `obj` Arguments: :obj: self.Model instance. """ |
field_name = self.clean_id_name
return self.request.route_url(
self._resource.uid,
**{self._resource.id_name: getattr(obj, field_name)}) |
Subsets and Splits