text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
A decorator used to protect methods with HTTP Digest authentication.
<END_TASK>
<USER_TASK:>
Description:
def digest_auth(realm, auth_func):
"""A decorator used to protect methods with HTTP Digest authentication.
""" |
def digest_auth_decorator(func):
def func_replacement(self, *args, **kwargs):
# 'self' here is the RequestHandler object, which is inheriting
# from DigestAuthMixin to get 'get_authenticated_user'
if self.get_authenticated_user(auth_func, realm):
return func(self, *args, **kwargs)
return func_replacement
return digest_auth_decorator |
<SYSTEM_TASK:>
Get a color from the palette
<END_TASK>
<USER_TASK:>
Description:
def color_(self, i=None):
"""
Get a color from the palette
""" |
global palette, color_num
if i is not None:
color_num = i
if color_num == len(palette) - 1:
color_num = 0
res = palette[color_num]
color_num += 1
return res |
<SYSTEM_TASK:>
Set a unique color from a serie
<END_TASK>
<USER_TASK:>
Description:
def scolor(self):
"""
Set a unique color from a serie
""" |
global palette
color = palette[self.color_index]
if len(palette) - 1 == self.color_index:
self.color_index = 0
else:
self.color_index += 1
self.color(color) |
<SYSTEM_TASK:>
Determine if given filename is an image.
<END_TASK>
<USER_TASK:>
Description:
def is_image(filename):
"""Determine if given filename is an image.""" |
# note: isfile() also accepts symlinks
return os.path.isfile(filename) and filename.lower().endswith(ImageExts) |
<SYSTEM_TASK:>
Creates or updates a CBZ from files in the given comic directory.
<END_TASK>
<USER_TASK:>
Description:
def create_cbz(directory):
"""Creates or updates a CBZ from files in the given comic directory.""" |
if not os.path.isdir(directory):
print("ERROR: Directory", directory, "not found.")
return
base = os.path.basename(directory.rstrip(os.path.sep))
zipname = '%s.cbz' % base
zipname = os.path.join(directory, zipname)
d = os.path.join(directory, 'inorder')
if os.path.isdir(d):
# use directory with ordered symlinks
directory = d
if os.path.exists(zipname):
os.remove(zipname)
with zipfile.ZipFile(zipname, 'w') as myzip:
for filename in sorted(os.listdir(d)):
fullname = os.path.join(d, filename)
if is_image(fullname):
myzip.write(fullname)
myzip.comment = get_cbz_comment()
print("INFO: Created", zipname) |
<SYSTEM_TASK:>
Save all the translations of instance in post_save signal handler.
<END_TASK>
<USER_TASK:>
Description:
def translation_save_translated_fields(instance, **kwargs):
"""
Save all the translations of instance in post_save signal handler.
""" |
if not hasattr(instance, '_translation_cache'):
return
for l_id, translation in instance._translation_cache.iteritems():
# set the translation ID just in case the translation was
# created while instance was not stored in the DB yet
# note: we're using _get_pk_val here even though it is
# private, since that's the most reliable way to get the value
# on older Django (pk property did not exist yet)
translation.master_id = instance._get_pk_val()
translation.save() |
<SYSTEM_TASK:>
Fill the translation cache using information received in the
<END_TASK>
<USER_TASK:>
Description:
def fill_translation_cache(instance):
"""
Fill the translation cache using information received in the
instance objects as extra fields.
You can not do this in post_init because the extra fields are
assigned by QuerySet.iterator after model initialization.
""" |
if hasattr(instance, '_translation_cache'):
# do not refill the cache
return
instance._translation_cache = {}
# unsafed instances cannot have translations
if not instance.pk:
return
for language_code in get_language_code_list():
# see if translation for language_code was in the query
field_alias = get_translated_field_alias('code', language_code)
if getattr(instance, field_alias, None) is not None:
field_names = [f.attname for f in instance._meta.translation_model._meta.fields]
# if so, create a translation object and put it in the cache
field_data = {}
for fname in field_names:
field_data[fname] = getattr(instance,
get_translated_field_alias(fname, language_code))
translation = instance._meta.translation_model(**field_data)
instance._translation_cache[language_code] = translation
# In some situations an (existing in the DB) object is loaded
# without using the normal QuerySet. In such case fallback to
# loading the translations using a separate query.
# Unfortunately, this is indistinguishable from the situation when
# an object does not have any translations. Oh well, we'll have
# to live with this for the time being.
if len(instance._translation_cache.keys()) == 0:
for translation in instance.translations.all():
instance._translation_cache[translation.language_code] = translation |
<SYSTEM_TASK:>
Generate get_'field name' method for field field_name.
<END_TASK>
<USER_TASK:>
Description:
def getter_generator(field_name, short_description):
"""
Generate get_'field name' method for field field_name.
""" |
def get_translation_field(cls, language_code=None, fallback=False):
try:
return cls.get_translation(language_code,
fallback=fallback,
field=field_name)
except TranslationDoesNotExist:
return None
get_translation_field.short_description = short_description
return get_translation_field |
<SYSTEM_TASK:>
Generate set_'field name' method for field field_name.
<END_TASK>
<USER_TASK:>
Description:
def setter_generator(field_name):
"""
Generate set_'field name' method for field field_name.
""" |
def set_translation_field(cls, value, language_code=None):
setattr(cls.get_translation(language_code, True),
field_name, value)
set_translation_field.short_description = "set " + field_name
return set_translation_field |
<SYSTEM_TASK:>
Get a translation instance for the given `language_id_or_code`.
<END_TASK>
<USER_TASK:>
Description:
def get_translation(cls, language_code, create_if_necessary=False,
fallback=False, field=None):
"""
Get a translation instance for the given `language_id_or_code`.
If the translation does not exist:
1. if `create_if_necessary` is True, this function will create one
2. otherwise, if `fallback` is True, this function will search the
list of languages looking for the first existing translation
3. if all of the above fails to find a translation, raise the
TranslationDoesNotExist exception
""" |
# fill the cache if necessary
cls.fill_translation_cache()
if language_code is None:
language_code = getattr(cls, '_default_language', None)
if language_code is None:
language_code = get_default_language()
force = False
if GLL.is_active:
language_code = GLL.language_code
force = True
if language_code in cls._translation_cache:
transobj = cls._translation_cache.get(language_code, None)
if field is None:
return transobj
value = getattr(transobj, field)
if value or force or (not fallback):
return value
if create_if_necessary:
new_translation = cls._meta.translation_model(master=cls,
language_code=language_code)
cls._translation_cache[language_code] = new_translation
return new_translation
# only fall backif we're not in 'force' mode (GLL)
elif (not force) and fallback:
for fb_lang_code in get_fallbacks(language_code):
transobj = cls._translation_cache.get(fb_lang_code, None)
if transobj:
if field is None:
return transobj
else:
value = getattr(transobj, field)
if value:
return value
raise TranslationDoesNotExist(language_code) |
<SYSTEM_TASK:>
Handle the inner 'Translation' class.
<END_TASK>
<USER_TASK:>
Description:
def contribute_to_class(cls, main_cls, name):
"""
Handle the inner 'Translation' class.
""" |
# delay the creation of the *Translation until the master model is
# fully created
signals.class_prepared.connect(cls.finish_multilingual_class,
sender=main_cls, weak=False)
# connect the post_save signal on master class to a handler
# that saves translations
signals.post_save.connect(translation_save_translated_fields,
sender=main_cls) |
<SYSTEM_TASK:>
Return a list of fields with "unique" attribute, which needs to
<END_TASK>
<USER_TASK:>
Description:
def get_unique_fields(cls):
"""
Return a list of fields with "unique" attribute, which needs to
be augmented by the language.
""" |
unique_fields = []
for fname, field in cls.__dict__.items():
if isinstance(field, models.fields.Field):
if getattr(field,'unique',False):
try:
field.unique = False
except AttributeError:
# newer Django defines unique as a property
# that uses _unique to store data. We're
# jumping over the fence by setting _unique,
# so this sucks, but this happens early enough
# to be safe.
field._unique = False
unique_fields.append(fname)
return unique_fields |
<SYSTEM_TASK:>
Create a model with translations of a multilingual class.
<END_TASK>
<USER_TASK:>
Description:
def finish_multilingual_class(cls, *args, **kwargs):
"""
Create a model with translations of a multilingual class.
""" |
main_cls = kwargs['sender']
translation_model_name = main_cls.__name__ + "Translation"
# create the model with all the translatable fields
unique = [('language_code', 'master')]
for f in cls.get_unique_fields():
unique.append(('language_code',f))
class TransMeta:
pass
try:
meta = cls.Meta
except AttributeError:
meta = TransMeta
meta.ordering = ('language_code',)
meta.unique_together = tuple(unique)
meta.app_label = main_cls._meta.app_label
if not hasattr(meta, 'db_table'):
meta.db_table = main_cls._meta.db_table + '_translation'
trans_attrs = cls.__dict__.copy()
trans_attrs['Meta'] = meta
# TODO: increase the length of this field, but to what???
trans_attrs['language_code'] = models.CharField(max_length=15, blank=True,
choices=get_language_choices(),
db_index=True)
related_name = getattr(meta, 'related_name', 'translations')
if hasattr(meta, 'related_name'):
delattr(meta, 'related_name')
edit_inline = True
trans_attrs['master'] = TranslationForeignKey(main_cls, blank=False, null=False,
related_name=related_name,)
trans_attrs['__str__'] = lambda self: ("%s object, language_code=%s"
% (translation_model_name,
self.language_code))
trans_model = ModelBase(translation_model_name, (models.Model,), trans_attrs)
trans_model._meta.translated_fields = cls.create_translation_attrs(main_cls)
trans_model._meta.related_name = related_name
_old_init_name_map = main_cls._meta.__class__.init_name_map
def init_name_map(self):
cache = _old_init_name_map(self)
for name, field_and_lang_id in trans_model._meta.translated_fields.items():
#import sys; sys.stderr.write('TM %r\n' % trans_model)
cache[name] = (field_and_lang_id[0], trans_model, True, False)
return cache
main_cls._meta.init_name_map = instancemethod(init_name_map,
main_cls._meta,
main_cls._meta.__class__)
main_cls._meta.translation_model = trans_model
main_cls._meta.force_language = None
main_cls.Translation = trans_model
main_cls.get_translation = get_translation
main_cls.fill_translation_cache = fill_translation_cache |
<SYSTEM_TASK:>
Image file name is UNIX time stamp & something for most of the comics...
<END_TASK>
<USER_TASK:>
Description:
def namer(cls, imageUrl, pageUrl):
"""Image file name is UNIX time stamp & something for most of the comics...""" |
start = ''
tsmatch = compile(r'/(\d+)-').search(imageUrl)
if tsmatch:
start = datetime.utcfromtimestamp(int(tsmatch.group(1))).strftime("%Y-%m-%d")
else:
# There were only chapter 1, page 4 and 5 not matching when writing
# this...
start = '2015-04-11x'
return start + "-" + pageUrl.rsplit('/', 1)[-1] |
<SYSTEM_TASK:>
Use strip index number for image name.
<END_TASK>
<USER_TASK:>
Description:
def namer(cls, imageUrl, pageUrl):
"""Use strip index number for image name.""" |
index = int(compile(r'id=(\d+)').search(pageUrl).group(1))
ext = imageUrl.rsplit('.', 1)[1]
return "SnowFlakes-%d.%s" % (index, ext) |
<SYSTEM_TASK:>
Use page URL to construct meaningful image name.
<END_TASK>
<USER_TASK:>
Description:
def namer(cls, imageUrl, pageUrl):
"""Use page URL to construct meaningful image name.""" |
parts, year, month, stripname = pageUrl.rsplit('/', 3)
stripname = stripname.rsplit('.', 1)[0]
parts, imagename = imageUrl.rsplit('/', 1)
return '%s-%s-%s-%s' % (year, month, stripname, imagename) |
<SYSTEM_TASK:>
Overrides the Query method get_compiler in order to return
<END_TASK>
<USER_TASK:>
Description:
def get_compiler(self, using=None, connection=None):
""" Overrides the Query method get_compiler in order to return
an instance of the above custom compiler.
""" |
# Copy the body of this method from Django except the final
# return statement. We will ignore code coverage for this.
if using is None and connection is None: # pragma: no cover
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.annotation_select.items():
connection.ops.check_expression_support(aggregate)
# Instantiate the custom compiler.
return {
CTEUpdateQuery: CTEUpdateQueryCompiler,
CTEInsertQuery: CTEInsertQueryCompiler,
CTEDeleteQuery: CTEDeleteQueryCompiler,
CTEAggregateQuery: CTEAggregateQueryCompiler,
}.get(self.__class__, CTEQueryCompiler)(self, connection, using) |
<SYSTEM_TASK:>
Temporarily add the message to the stack to publish to RabbitMQ
<END_TASK>
<USER_TASK:>
Description:
def _add_to_publish_stack(self, exchange, routing_key, message, properties):
"""Temporarily add the message to the stack to publish to RabbitMQ
:param str exchange: The exchange to publish to
:param str routing_key: The routing key to publish with
:param str message: The message body
:param pika.BasicProperties: The message properties
""" |
global message_stack
message_stack.append((exchange, routing_key, message, properties)) |
<SYSTEM_TASK:>
Connect to RabbitMQ and assign a local attribute
<END_TASK>
<USER_TASK:>
Description:
def _connect_to_rabbitmq(self):
"""Connect to RabbitMQ and assign a local attribute""" |
global pending_rabbitmq_connection, rabbitmq_connection
if not rabbitmq_connection:
LOGGER.info('Creating a new RabbitMQ connection')
pending_rabbitmq_connection = self._new_rabbitmq_connection() |
<SYSTEM_TASK:>
Create a BasicProperties object, with the properties specified
<END_TASK>
<USER_TASK:>
Description:
def _new_message_properties(self, content_type=None, content_encoding=None,
headers=None, delivery_mode=None, priority=None,
correlation_id=None, reply_to=None,
expiration=None, message_id=None,
timestamp=None, message_type=None, user_id=None,
app_id=None):
"""Create a BasicProperties object, with the properties specified
:param str content_type: MIME content type
:param str content_encoding: MIME content encoding
:param dict headers: Message header field table
:param int delivery_mode: Non-persistent (1) or persistent (2)
:param int priority: Message priority, 0 to 9
:param str correlation_id: Application correlation identifier
:param str reply_to: Address to reply to
:param str expiration: Message expiration specification
:param str message_id: Application message identifier
:param int timestamp: Message timestamp
:param str message_type: Message type name
:param str user_id: Creating user id
:param str app_id: Creating application id
:rtype: pika.BasicProperties
""" |
return pika.BasicProperties(content_type, content_encoding, headers,
delivery_mode, priority, correlation_id,
reply_to, expiration, message_id, timestamp,
message_type, user_id, app_id) |
<SYSTEM_TASK:>
Called when pika is connected and has a channel open to publish
<END_TASK>
<USER_TASK:>
Description:
def _publish_deferred_messages(self):
"""Called when pika is connected and has a channel open to publish
any requests buffered.
""" |
global message_stack
if not self._rabbitmq_is_closed and message_stack:
LOGGER.info('Publishing %i deferred message(s)', len(message_stack))
while message_stack:
self._publish_message(*message_stack.pop()) |
<SYSTEM_TASK:>
Publish the message to RabbitMQ
<END_TASK>
<USER_TASK:>
Description:
def _publish_message(self, exchange, routing_key, message, properties):
"""Publish the message to RabbitMQ
:param str exchange: The exchange to publish to
:param str routing_key: The routing key to publish with
:param str message: The message body
:param pika.BasicProperties: The message properties
""" |
if self._rabbitmq_is_closed or not self._rabbitmq_channel:
LOGGER.warning('Temporarily buffering message to publish')
self._add_to_publish_stack(exchange, routing_key,
message, properties)
return
self._rabbitmq_channel.basic_publish(exchange, routing_key,
message, properties) |
<SYSTEM_TASK:>
Return a pika ConnectionParameters object using the configuration
<END_TASK>
<USER_TASK:>
Description:
def _rabbitmq_parameters(self):
"""Return a pika ConnectionParameters object using the configuration
from the configuration service. The configuration dictionary should
match the parameters for pika.connection.ConnectionParameters and
include an extra username and password variable.
:rtype: pika.ConnectionParameters
""" |
kwargs = dict(self._rabbitmq_config)
kwargs['credentials'] = pika.PlainCredentials(kwargs['username'],
kwargs['password'])
for key in ['username', 'password']:
del kwargs[key]
return pika.ConnectionParameters(**kwargs) |
<SYSTEM_TASK:>
Assign the channel object to the tinman global object.
<END_TASK>
<USER_TASK:>
Description:
def _set_rabbitmq_channel(self, channel):
"""Assign the channel object to the tinman global object.
:param pika.channel.Channel channel: The pika channel
""" |
setattr(self.application.attributes, self.CHANNEL, channel) |
<SYSTEM_TASK:>
Called when the RabbitMQ accepts the channel open request.
<END_TASK>
<USER_TASK:>
Description:
def on_rabbitmq_channel_open(self, channel):
"""Called when the RabbitMQ accepts the channel open request.
:param pika.channel.Channel channel: The channel opened with RabbitMQ
""" |
LOGGER.info('Channel %i is opened for communication with RabbitMQ',
channel.channel_number)
self._set_rabbitmq_channel(channel)
self._publish_deferred_messages() |
<SYSTEM_TASK:>
Prepare the handler, ensuring RabbitMQ is connected or start a new
<END_TASK>
<USER_TASK:>
Description:
def prepare(self):
"""Prepare the handler, ensuring RabbitMQ is connected or start a new
connection attempt.
""" |
super(RabbitMQRequestHandler, self).prepare()
if self._rabbitmq_is_closed:
self._connect_to_rabbitmq() |
<SYSTEM_TASK:>
This function runs one iteration of the IRC client. This is called in a loop
<END_TASK>
<USER_TASK:>
Description:
def run_once(self):
"""This function runs one iteration of the IRC client. This is called in a loop
by the run_loop function. It can be called separately, but most of the
time there is no need to do this.
""" |
packet = _parse_irc_packet(next(self.lines)) #Get next line from generator
for event_handler in list(self.on_packet_received):
event_handler(self, packet)
if packet.command == "PRIVMSG":
if packet.arguments[0].startswith("#"):
for event_handler in list(self.on_public_message):
event_handler(self, packet.arguments[0], packet.prefix.split("!")[0], packet.arguments[1])
else:
for event_handler in list(self.on_private_message):
event_handler(self, packet.prefix.split("!")[0], packet.arguments[1])
elif packet.command == "PING":
self.send_line("PONG :{}".format(packet.arguments[0]))
for event_handler in list(self.on_ping):
event_handler(self)
elif packet.command == "433" or packet.command == "437":
#Command 433 is "Nick in use"
#Add underscore to the nick
self.set_nick("{}_".format(self.nick))
elif packet.command == "001":
for event_handler in list(self.on_welcome):
event_handler(self)
elif packet.command == "JOIN":
for event_handler in list(self.on_join):
event_handler(self, packet.arguments[0], packet.prefix.split("!")[0])
elif packet.command == "PART":
for event_handler in list(self.on_leave):
event_handler(self, packet.arguments[0], packet.prefix.split("!")[0]) |
<SYSTEM_TASK:>
Connects to a given IRC server. After the connection is established, it calls
<END_TASK>
<USER_TASK:>
Description:
def connect(self, server, port=6667):
"""Connects to a given IRC server. After the connection is established, it calls
the on_connect event handler.
""" |
self.socket.connect((server, port))
self.lines = self._read_lines()
for event_handler in list(self.on_connect):
event_handler(self) |
<SYSTEM_TASK:>
Sets or changes your link. This should be called before joining channels, but
<END_TASK>
<USER_TASK:>
Description:
def set_nick(self, nick):
"""Sets or changes your link. This should be called before joining channels, but
can be called at any time afterwards. If the requested nickname is not
available, the library will keep adding an underscore until a suitable
nick is found.
""" |
self.nick = nick
self.send_line("NICK {}".format(nick)) |
<SYSTEM_TASK:>
Fit the model according to the given training data.
<END_TASK>
<USER_TASK:>
Description:
def fit(self, X, y, verbosity=0):
"""Fit the model according to the given training data.
Parameters
----------
X : List of ndarrays, one for each training example.
Each training example's shape is (string1_len, string2_len, n_features), where
string1_len and string2_len are the length of the two training strings and n_features the
number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
""" |
self.classes = list(set(y))
n_points = len(y)
if len(X) != n_points:
raise Exception('Number of training points should be the same as training labels.')
if not self._state_machine:
self._state_machine = DefaultStateMachine(self.classes)
# Initialize the parameters given the state machine, features, and target classes.
self.parameters = self._initialize_parameters(self._state_machine, X[0].shape[2])
# Create a new model object for each training example
models = [_Model(self._state_machine, x, ty) for x, ty in zip(X, y)]
self._evaluation_count = 0
def _objective(parameters):
gradient = np.zeros(self.parameters.shape)
ll = 0.0 # Log likelihood
# TODO: Embarrassingly parallel
for model in models:
dll, dgradient = model.forward_backward(parameters.reshape(self.parameters.shape))
ll += dll
gradient += dgradient
parameters_without_bias = np.array(parameters, dtype='float64') # exclude the bias parameters from being regularized
parameters_without_bias[0] = 0
ll -= self.l2_regularization * np.dot(parameters_without_bias.T, parameters_without_bias)
gradient = gradient.flatten() - 2.0 * self.l2_regularization * parameters_without_bias
if verbosity > 0:
if self._evaluation_count == 0:
print('{:10} {:10} {:10}'.format('Iteration', 'Log-likelihood', '|gradient|'))
if self._evaluation_count % verbosity == 0:
print('{:10} {:10.4} {:10.4}'.format(self._evaluation_count, ll, (abs(gradient).sum())))
self._evaluation_count += 1
# TODO: Allow some of the parameters to be frozen. ie. not trained. Can later also completely remove
# TODO: the computation associated with these parameters.
return -ll, -gradient
def _objective_copy_gradient(paramers, g):
nll, ngradient = _objective(paramers)
g[:] = ngradient
return nll
if self._optimizer:
self.optimizer_result = self._optimizer(_objective, self.parameters.flatten(), **self._optimizer_kwargs)
self.parameters = self.optimizer_result[0].reshape(self.parameters.shape)
else:
optimizer = lbfgs.LBFGS()
final_betas = optimizer.minimize(_objective_copy_gradient,
x0=self.parameters.flatten(),
progress=None)
self.optimizer_result = final_betas
self.parameters = final_betas.reshape(self.parameters.shape)
return self |
<SYSTEM_TASK:>
Predict the class for X.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, X):
"""Predict the class for X.
The predicted class for each sample in X is returned.
Parameters
----------
X : List of ndarrays, one for each training example.
Each training example's shape is (string1_len,
string2_len, n_features), where string1_len and
string2_len are the length of the two training strings and
n_features the number of features.
Returns
-------
y : iterable of shape = [n_samples]
The predicted classes.
""" |
return [self.classes[prediction.argmax()] for prediction in self.predict_proba(X)] |
<SYSTEM_TASK:>
Helper to create initial parameter vector with the correct shape.
<END_TASK>
<USER_TASK:>
Description:
def _initialize_parameters(state_machine, n_features):
""" Helper to create initial parameter vector with the correct shape. """ |
return np.zeros((state_machine.n_states
+ state_machine.n_transitions,
n_features)) |
<SYSTEM_TASK:>
Run the forward backward algorithm with the given parameters.
<END_TASK>
<USER_TASK:>
Description:
def forward_backward(self, parameters):
""" Run the forward backward algorithm with the given parameters. """ |
# If the features are sparse, we can use an optimization.
# I'm not using scipy.sparse here because we want to avoid a scipy dependency and also scipy.sparse doesn't seem
# to handle arrays of shape higher than 2.
if isinstance(self.sparse_x, str) and self.sparse_x == 'uninitialized':
if (self.x == 0).sum() * 1.0 / self.x.size > 0.6:
self.sparse_x = self._construct_sparse_features(self.x)
else:
self.sparse_x = 'not sparse'
I, J, K = self.x.shape
if not isinstance(self.sparse_x, str):
C = self.sparse_x[0].shape[2]
S, _ = parameters.shape
x_dot_parameters = np.zeros((I, J, S))
sparse_multiply(x_dot_parameters, self.sparse_x[0], self.sparse_x[1], parameters.T, I, J, K, C, S)
else:
x_dot_parameters = np.dot(self.x, parameters.T) # Pre-compute the dot product
alpha = self._forward(x_dot_parameters)
beta = self._backward(x_dot_parameters)
classes_to_ints = {k: i for i, k in enumerate(set(self.states_to_classes.values()))}
states_to_classes = np.array([classes_to_ints[self.states_to_classes[state]]
for state in range(max(self.states_to_classes.keys()) + 1)], dtype='int64')
if not isinstance(self.sparse_x, str):
ll, deriv = gradient_sparse(alpha, beta, parameters, states_to_classes,
self.sparse_x[0], self.sparse_x[1], classes_to_ints[self.y],
I, J, self.sparse_x[0].shape[2])
else:
ll, deriv = gradient(alpha, beta, parameters, states_to_classes,
self.x, classes_to_ints[self.y], I, J, K)
return ll, deriv |
<SYSTEM_TASK:>
Run forward algorithm to find the predicted distribution over classes.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, parameters, viterbi):
""" Run forward algorithm to find the predicted distribution over classes. """ |
x_dot_parameters = np.einsum('ijk,kl->ijl', self.x, parameters)
if not viterbi:
alpha = forward_predict(self._lattice, x_dot_parameters,
self.state_machine.n_states)
else:
alpha = forward_max_predict(self._lattice, x_dot_parameters,
self.state_machine.n_states)
I, J, _ = self.x.shape
class_Z = {}
Z = -np.inf
for state, predicted_class in self.states_to_classes.items():
weight = alpha[I - 1, J - 1, state]
class_Z[self.states_to_classes[state]] = weight
Z = np.logaddexp(Z, weight)
return {label: np.exp(class_z - Z) for label, class_z in class_Z.items()} |
<SYSTEM_TASK:>
Helper to calculate the forward weights.
<END_TASK>
<USER_TASK:>
Description:
def _forward(self, x_dot_parameters):
""" Helper to calculate the forward weights. """ |
return forward(self._lattice, x_dot_parameters,
self.state_machine.n_states) |
<SYSTEM_TASK:>
Helper to calculate the backward weights.
<END_TASK>
<USER_TASK:>
Description:
def _backward(self, x_dot_parameters):
""" Helper to calculate the backward weights. """ |
I, J, _ = self.x.shape
return backward(self._lattice, x_dot_parameters, I, J,
self.state_machine.n_states) |
<SYSTEM_TASK:>
Helper to construct a sparse representation of the features.
<END_TASK>
<USER_TASK:>
Description:
def _construct_sparse_features(self, x):
""" Helper to construct a sparse representation of the features. """ |
I, J, K = x.shape
new_array_height = (x != 0).sum(axis=2).max()
index_array = -np.ones((I, J, new_array_height), dtype='int64')
value_array = -np.ones((I, J, new_array_height), dtype='float64')
populate_sparse_features(x, index_array, value_array, I, J, K)
return index_array, value_array |
<SYSTEM_TASK:>
Convert the main dataframe to javascript code
<END_TASK>
<USER_TASK:>
Description:
def to_javascript_(self, table_name: str="data") -> str:
"""Convert the main dataframe to javascript code
:param table_name: javascript variable name, defaults to "data"
:param table_name: str, optional
:return: a javascript constant with the data
:rtype: str
:example: ``ds.to_javastript_("myconst")``
""" |
try:
renderer = pytablewriter.JavaScriptTableWriter
data = self._build_export(renderer, table_name)
return data
except Exception as e:
self.err(e, "Can not convert data to javascript code") |
<SYSTEM_TASK:>
Convert the main dataframe to markdown
<END_TASK>
<USER_TASK:>
Description:
def to_markdown_(self) -> str:
"""Convert the main dataframe to markdown
:return: markdown data
:rtype: str
:example: ``ds.to_markdown_()``
""" |
try:
renderer = pytablewriter.MarkdownTableWriter
data = self._build_export(renderer)
return data
except Exception as e:
self.err(e, "Can not convert data to markdown") |
<SYSTEM_TASK:>
Convert the main dataframe to restructured text
<END_TASK>
<USER_TASK:>
Description:
def to_rst_(self) -> str:
"""Convert the main dataframe to restructured text
:return: rst data
:rtype: str
:example: ``ds.to_rst_()``
""" |
try:
renderer = pytablewriter.RstGridTableWriter
data = self._build_export(renderer)
return data
except Exception as e:
self.err(e, "Can not convert data to restructured text") |
<SYSTEM_TASK:>
Convert the main dataframe to python a python list
<END_TASK>
<USER_TASK:>
Description:
def to_python_(self, table_name: str="data") -> list:
"""Convert the main dataframe to python a python list
:param table_name: python variable name, defaults to "data"
:param table_name: str, optional
:return: a python list of lists with the data
:rtype: str
:example: ``ds.to_python_("myvar")``
""" |
try:
renderer = pytablewriter.PythonCodeTableWriter
data = self._build_export(renderer, table_name)
return data
except Exception as e:
self.err(e, "Can not convert data to python list") |
<SYSTEM_TASK:>
Convert the main dataframe to a numpy array
<END_TASK>
<USER_TASK:>
Description:
def to_numpy_(self, table_name: str="data") -> numpy.array:
"""Convert the main dataframe to a numpy array
:param table_name: name of the python variable, defaults to "data"
:param table_name: str, optional
:return: a numpy array
:rtype: np.array
:example: ``ds.to_numpy_("myvar")``
""" |
try:
renderer = pytablewriter.NumpyTableWriter
data = self._build_export(renderer, table_name)
return data
except Exception as e:
self.err(e, "Can not convert data to numpy array") |
<SYSTEM_TASK:>
Returns a list of dictionary records from the main dataframe
<END_TASK>
<USER_TASK:>
Description:
def to_records_(self) -> dict:
"""Returns a list of dictionary records from the main dataframe
:return: a python dictionnary with the data
:rtype: str
:example: ``ds.to_records_()``
""" |
try:
dic = self.df.to_dict(orient="records")
return dic
except Exception as e:
self.err(e, "Can not convert data to records") |
<SYSTEM_TASK:>
Write the main dataframe to a csv file
<END_TASK>
<USER_TASK:>
Description:
def to_csv(self, filepath: str, index: bool=False, **kwargs):
"""Write the main dataframe to a csv file
:param filepath: path of the file to save
:type filepath: str
:param index: [description], defaults to False
:param index: bool, optional
:param \*args: arguments to pass to ``pd.to_csv``
:example: ``ds.to_csv_("myfile.csv", header=false)``
""" |
try:
self.start("Saving data to "+filepath + " ...")
if self.datapath is not None:
if filepath.startswith("/") is False and \
filepath.startswith(".") is False:
filepath = self.datapath + "/" + filepath
self.df.to_csv(filepath, encoding='utf-8', index=index, **kwargs)
self.end("Data exported to", filepath)
except Exception as e:
self.err(e, "Can not convert data to csv") |
<SYSTEM_TASK:>
Write the main dataframe to an Excell file
<END_TASK>
<USER_TASK:>
Description:
def to_excel(self, filepath: str, title: str):
"""Write the main dataframe to an Excell file
:param filepath: path of the Excel file to write
:type filepath: str
:param title: Title of the stylesheet
:type title: str
:example: ``ds.to_excel_("./myfile.xlsx", "My data")``
""" |
try:
self.start("Saving data to Excell file: "+filepath + " ...")
writer = pytablewriter.ExcelXlsxTableWriter()
writer.from_dataframe(self.df)
writer.open(filepath)
writer.make_worksheet(title)
writer.write_table()
writer.close()
self.end("File exported to", filepath)
except Exception as e:
self.err(e, "Can not convert data to Excel") |
<SYSTEM_TASK:>
Write the main dataframe to Hdf5 file
<END_TASK>
<USER_TASK:>
Description:
def to_hdf5(self, filepath: str):
"""Write the main dataframe to Hdf5 file
:param filepath: path where to save the file
:type filepath: str
:example: ``ds.to_hdf5_("./myfile.hdf5")``
""" |
try:
self.start("Saving data to Hdf5...")
dd.io.save(filepath, self.df)
self.end("Finished saving Hdf5 data")
except Exception as e:
self.err(e, "Can not convert data to Hdf5") |
<SYSTEM_TASK:>
Create a CMS Article and it's title for the given language
<END_TASK>
<USER_TASK:>
Description:
def create_article(tree, template, title, language, slug=None, description=None,
page_title=None, menu_title=None, meta_description=None,
created_by=None, image=None, publication_date=None, publication_end_date=None,
published=False, login_required=False, creation_date=None, categories=[]):
"""
Create a CMS Article and it's title for the given language
""" |
# validate tree
tree = tree.get_public_object()
assert tree.application_urls == 'CMSArticlesApp'
# validate template
assert template in [tpl[0] for tpl in settings.CMS_ARTICLES_TEMPLATES]
get_template(template)
# validate language:
assert language in get_language_list(tree.node.site_id), settings.CMS_LANGUAGES.get(tree.node.site_id)
# validate publication date
if publication_date:
assert isinstance(publication_date, datetime.date)
# validate publication end date
if publication_end_date:
assert isinstance(publication_end_date, datetime.date)
# validate creation date
if not creation_date:
creation_date = publication_date
if creation_date:
assert isinstance(creation_date, datetime.date)
# get username
if created_by:
try:
username = created_by.get_username()
except Exception:
username = force_text(created_by)
else:
username = 'script'
with current_user(username):
# create article
article = Article.objects.create(
tree=tree,
template=template,
login_required=login_required,
creation_date=creation_date,
publication_date=publication_date,
publication_end_date=publication_end_date,
languages=language,
)
for category in categories:
article.categories.add(category)
# create title
create_title(
article=article,
language=language,
title=title,
slug=slug,
description=description,
page_title=page_title,
menu_title=menu_title,
meta_description=meta_description,
creation_date=creation_date,
image=image,
)
# publish article
if published:
article.publish(language)
return article.reload() |
<SYSTEM_TASK:>
Create an article title.
<END_TASK>
<USER_TASK:>
Description:
def create_title(article, language, title, slug=None, description=None,
page_title=None, menu_title=None, meta_description=None,
creation_date=None, image=None):
"""
Create an article title.
""" |
# validate article
assert isinstance(article, Article)
# validate language:
assert language in get_language_list(article.tree.node.site_id)
# validate creation date
if creation_date:
assert isinstance(creation_date, datetime.date)
# set default slug:
if not slug:
slug = settings.CMS_ARTICLES_SLUG_FORMAT.format(
now=creation_date or now(),
slug=slugify(title),
)
# find unused slug:
base_slug = slug
qs = Title.objects.filter(language=language)
used_slugs = list(s for s in qs.values_list('slug', flat=True) if s.startswith(base_slug))
i = 1
while slug in used_slugs:
slug = '%s-%s' % (base_slug, i)
i += 1
# create title
title = Title.objects.create(
article=article,
language=language,
title=title,
slug=slug,
description=description,
page_title=page_title,
menu_title=menu_title,
meta_description=meta_description,
image=image,
)
return title |
<SYSTEM_TASK:>
Adds a TextPlugin with given content to given slot
<END_TASK>
<USER_TASK:>
Description:
def add_content(obj, language, slot, content):
"""
Adds a TextPlugin with given content to given slot
""" |
placeholder = obj.placeholders.get(slot=slot)
add_plugin(placeholder, TextPlugin, language, body=content) |
<SYSTEM_TASK:>
Construct a new request with the given tree as its contents, then ship
<END_TASK>
<USER_TASK:>
Description:
def request(self, tree, **kwargs):
"""
Construct a new request with the given tree as its contents, then ship
it to the OpenProvider API.
""" |
apirequest = lxml.etree.tostring(
E.openXML(
E.credentials(
E.username(self.username),
OE('password', self.password),
OE('hash', self.password_hash),
),
tree
),
method='c14n'
)
try:
apiresponse = self.session.post(self.url, data=apirequest)
apiresponse.raise_for_status()
except requests.RequestException as e:
raise ServiceUnavailable(str(e))
tree = lxml.objectify.fromstring(apiresponse.content)
if tree.reply.code == 0:
return Response(tree)
else:
klass = from_code(tree.reply.code)
desc = tree.reply.desc
code = tree.reply.code
data = getattr(tree.reply, 'data', '')
raise klass(u"{0} ({1}) {2}".format(desc, code, data), code) |
<SYSTEM_TASK:>
Create a Hive object based on JSON located in a local file.
<END_TASK>
<USER_TASK:>
Description:
def from_file(cls, fname, version=None, require_https=True):
"""
Create a Hive object based on JSON located in a local file.
""" |
if os.path.exists(fname):
with open(fname) as hive_file:
return cls(**json.load(hive_file)).from_version(version, require_https=require_https)
else:
raise MissingHive(fname) |
<SYSTEM_TASK:>
Create a Hive object based on JSON located at a remote URL.
<END_TASK>
<USER_TASK:>
Description:
def from_url(cls, url, version=None, require_https=False):
"""
Create a Hive object based on JSON located at a remote URL.
""" |
if "https://" in url:
require_https = True
if "http://" in url and require_https:
try:
hive = cls.from_url(url, version=version, require_https=False)
except HiveLoadedOverHTTP as err:
hive = err.hive
raise HiveLoadedOverHTTP(url, hive)
else:
try:
return cls(**download_as_json(url)).from_version(version, require_https)
except (ResponseException, URLError):
raise MissingHive(url) |
<SYSTEM_TASK:>
Try to find a hive for the given domain; raise an error if we have to
<END_TASK>
<USER_TASK:>
Description:
def from_domain(cls, domain, version=None, require_https=True):
"""
Try to find a hive for the given domain; raise an error if we have to
failover to HTTP and haven't explicitly suppressed it in the call.
""" |
url = 'https://' + domain + '/api/hive.json'
try:
return cls.from_url(url, version=version, require_https=require_https)
except MissingHive:
url = 'http://' + domain + '/api/hive.json'
return cls.from_url(url, version=version, require_https=require_https) |
<SYSTEM_TASK:>
Create a Hive object based on the information in the object
<END_TASK>
<USER_TASK:>
Description:
def from_version(self, version, require_https=False):
"""
Create a Hive object based on the information in the object
and the version passed into the method.
""" |
if version is None or self.version() == version:
return self
else:
return Hive.from_url(self.get_version_url(version), require_https=require_https) |
<SYSTEM_TASK:>
Retrieve the URL for the designated version of the hive.
<END_TASK>
<USER_TASK:>
Description:
def get_version_url(self, version):
"""
Retrieve the URL for the designated version of the hive.
""" |
for each_version in self.other_versions():
if version == each_version['version'] and 'location' in each_version:
return each_version.get('location')
raise VersionNotInHive(version) |
<SYSTEM_TASK:>
Returns the map with a marker to the default map
<END_TASK>
<USER_TASK:>
Description:
def marker_(self, lat, long, text, pmap, color=None, icon=None):
"""
Returns the map with a marker to the default map
""" |
try:
xmap = self._marker(lat, long, text, pmap, color, icon)
return xmap
except Exception as e:
self.err(e, self.marker_, "Can not get marker") |
<SYSTEM_TASK:>
Set the main map with a marker to the default map
<END_TASK>
<USER_TASK:>
Description:
def marker(self, lat, long, text, color=None, icon=None):
"""
Set the main map with a marker to the default map
""" |
try:
self.dsmap = self._marker(lat, long, text, self.dsmap, color, icon)
return self.dsmap
except Exception as e:
self.err(e, self.marker, "Can not get marker") |
<SYSTEM_TASK:>
Adds a marker to the default map
<END_TASK>
<USER_TASK:>
Description:
def _marker(self, lat, long, text, xmap, color=None, icon=None,
text_mark=False, style=None):
"""
Adds a marker to the default map
""" |
kwargs = {}
if icon is not None:
kwargs["icon"] = icon
if color is not None:
kwargs["color"] = color
if style is None:
style = "font-size:18pt;font-weight:bold;" + \
"color:black;border-radius:0.5"
try:
xicon1 = folium.Icon(**kwargs)
if text_mark is True:
xicon = DivIcon(
icon_size=(150, 36),
icon_anchor=(0, 0),
html='<div style="' + style + '">' + text + '</div>',
)
folium.Marker([lat, long], popup=text,
icon=xicon).add_to(xmap)
folium.Marker([lat, long], popup=text,
icon=xicon1).add_to(xmap)
return xmap
except Exception as e:
self.err(e, self._marker, "Can not get marker") |
<SYSTEM_TASK:>
Create an Optional Element.
<END_TASK>
<USER_TASK:>
Description:
def OE(element, value, transform=lambda x: x):
"""
Create an Optional Element.
Returns an Element as ElementMaker would, unless value is None. Optionally the value can be
transformed through a function.
>>> OE('elem', None)
None
>>> lxml.etree.tostring(OE('elem', 'value'))
<elem>value</elem>
>>> lxml.etree.tostring(OE('elem', True, int))
<elem>1</elem>
""" |
return E(element, transform(value)) if value is not None else None |
<SYSTEM_TASK:>
Return a function which call _call_command for the given name.
<END_TASK>
<USER_TASK:>
Description:
def _make_command_method(cls, command_name):
"""
Return a function which call _call_command for the given name.
Used to bind redis commands to our own calls
""" |
def func(self, *args, **kwargs):
return self._call_command(command_name, *args, **kwargs)
return func |
<SYSTEM_TASK:>
Check if the command to be executed is a modifier, to connect the object.
<END_TASK>
<USER_TASK:>
Description:
def _call_command(self, name, *args, **kwargs):
"""
Check if the command to be executed is a modifier, to connect the object.
Then call _traverse_command.
""" |
obj = getattr(self, '_instance', self) # _instance if a field, self if an instance
# The object may not be already connected, so if we want to update a
# field, connect it before.
# If the object as no PK yet, let the object create itself
if name in self.available_modifiers and obj._pk and not obj.connected:
obj.connect()
# Give priority to a "_call_{commmand}" method
meth = getattr(self, '_call_%s' % name, self._traverse_command)
return meth(name, *args, **kwargs) |
<SYSTEM_TASK:>
Add the key to the args and call the Redis command.
<END_TASK>
<USER_TASK:>
Description:
def _traverse_command(self, name, *args, **kwargs):
"""
Add the key to the args and call the Redis command.
""" |
if not name in self.available_commands:
raise AttributeError("%s is not an available command for %s" %
(name, self.__class__.__name__))
attr = getattr(self.connection, "%s" % name)
key = self.key
log.debug(u"Requesting %s with key %s and args %s" % (name, key, args))
result = attr(key, *args, **kwargs)
result = self.post_command(
sender=self,
name=name,
result=result,
args=args,
kwargs=kwargs
)
return result |
<SYSTEM_TASK:>
A helper to easily call the proxy_setter of the field
<END_TASK>
<USER_TASK:>
Description:
def proxy_set(self, value):
"""
A helper to easily call the proxy_setter of the field
""" |
setter = getattr(self, self.proxy_setter)
if isinstance(value, (list, tuple, set)):
result = setter(*value)
elif isinstance(value, dict):
result = setter(**value)
else:
result = setter(value)
return result |
<SYSTEM_TASK:>
A property to return the key used in redis for the current field.
<END_TASK>
<USER_TASK:>
Description:
def key(self):
"""
A property to return the key used in redis for the current field.
""" |
return self.make_key(
self._instance._name,
self._instance.pk.get(),
self.name,
) |
<SYSTEM_TASK:>
Call after we got the result of a redis command.
<END_TASK>
<USER_TASK:>
Description:
def post_command(self, sender, name, result, args, kwargs):
"""
Call after we got the result of a redis command.
By default, let the instance manage the post_modify signal
""" |
return self._instance.post_command(
sender=self,
name=name,
result=result,
args=args,
kwargs=kwargs
) |
<SYSTEM_TASK:>
Call the exists command to check if the redis key exists for the current
<END_TASK>
<USER_TASK:>
Description:
def exists(self):
"""
Call the exists command to check if the redis key exists for the current
field
""" |
try:
key = self.key
except DoesNotExist:
"""
If the object doesn't exists anymore, its PK is deleted, so the
"self.key" call will raise a DoesNotExist exception. We catch it
to return False, as the field doesn't exists too.
"""
return False
else:
return self.connection.exists(key) |
<SYSTEM_TASK:>
Instantiate the indexes only when asked
<END_TASK>
<USER_TASK:>
Description:
def _indexes(self):
"""Instantiate the indexes only when asked
Returns
-------
list
An empty list if the field is not indexable, else a list of all indexes
tied to the field.
If no indexes where passed when creating the field, the default indexes
from the field/model/database will be used.
If still no index classes, it will raise
Raises
------
ImplementationError
If no index classes available for this field
""" |
if not self.indexable:
return []
if not self.index_classes:
self.index_classes = self.get_default_indexes()[::1]
if not self.index_classes:
raise ImplementationError('%s field is indexable but has no indexes attached' %
self.__class__.__name__)
return [index_class(field=self) for index_class in self.index_classes] |
<SYSTEM_TASK:>
Tells if the field have an index matching the current one
<END_TASK>
<USER_TASK:>
Description:
def has_index(self, index):
"""Tells if the field have an index matching the current one
Parameters
-----------
index: type or BaseIndex
It could be an index instance, or an index class
Returns
-------
bool
Will be ``True`` if the current field has an index that is an instance
of the given index or of the class of the given index
""" |
klass = index if isclass(index) else index.__class__
for one_index in self._indexes:
if isinstance(one_index, klass):
return True
return False |
<SYSTEM_TASK:>
Attach the current field to an instance of a model. Can be overriden to
<END_TASK>
<USER_TASK:>
Description:
def _attach_to_instance(self, instance):
"""
Attach the current field to an instance of a model. Can be overriden to
do something when an instance is set
""" |
self._instance = instance
self.lockable = self.lockable and instance.lockable |
<SYSTEM_TASK:>
Tells if the current field is the one attached to the model, not instance
<END_TASK>
<USER_TASK:>
Description:
def attached_to_model(self):
"""Tells if the current field is the one attached to the model, not instance""" |
try:
if not bool(self._model):
return False
except AttributeError:
return False
else:
try:
return not bool(self._instance)
except AttributeError:
return True |
<SYSTEM_TASK:>
Add lock management and call parent.
<END_TASK>
<USER_TASK:>
Description:
def _call_command(self, name, *args, **kwargs):
"""
Add lock management and call parent.
""" |
meth = super(RedisField, self)._call_command
if self.indexable and name in self.available_modifiers:
with FieldLock(self):
try:
result = meth(name, *args, **kwargs)
except:
self._rollback_indexes()
raise
else:
return result
finally:
self._reset_indexes_caches()
else:
return meth(name, *args, **kwargs) |
<SYSTEM_TASK:>
Clear all indexes tied to this field
<END_TASK>
<USER_TASK:>
Description:
def clear_indexes(self, chunk_size=1000, aggressive=False, index_class=None):
"""Clear all indexes tied to this field
Parameters
----------
chunk_size: int
Default to 1000, it's the number of instances to load at once if not in aggressive mode.
aggressive: bool
Default to ``False``. When ``False``, the actual collection of instances will
be ran through to deindex all the values.
But when ``True``, the database keys will be scanned to find keys that matches the
pattern of the keys used by the indexes. This is a lot faster and may find forgotten keys.
But may also find keys not related to the index.
Should be set to ``True`` if you are not sure about the already indexed values.
index_class: type
Allow to clear only index(es) for this index class instead of all indexes.
Raises
------
AssertionError
If called from an instance field. It must be called from the model field
Also raised if the field is not indexable
Examples
--------
>>> MyModel.get_field('myfield').clear_indexes()
>>> MyModel.get_field('myfield').clear_indexes(index_class=MyIndex)
""" |
assert self.indexable, "Field not indexable"
assert self.attached_to_model, \
'`rebuild_indexes` can only be called on a field attached to the model'
for index in self._indexes:
if index_class and not isinstance(index, index_class):
continue
index.clear(chunk_size=chunk_size, aggressive=aggressive) |
<SYSTEM_TASK:>
Rebuild all indexes tied to this field
<END_TASK>
<USER_TASK:>
Description:
def rebuild_indexes(self, chunk_size=1000, aggressive_clear=False, index_class=None):
"""Rebuild all indexes tied to this field
Parameters
----------
chunk_size: int
Default to 1000, it's the number of instances to load at once.
aggressive_clear: bool
Will be passed to the `aggressive` argument of the `clear_indexes` method.
If `False`, all values will be normally deindexed. If `True`, the work
will be done at low level, scanning for keys that may match the ones used by the indexes
index_class: type
Allow to build only index(es) for this index class instead of all indexes.
Raises
------
AssertionError
If called from an instance field. It must be called from the model field
Also raised if the field is not indexable
Examples
--------
>>> MyModel.get_field('myfield').rebuild_indexes()
>>> MyModel.get_field('myfield').clear_indexes(index_class=MyIndex)
""" |
assert self.indexable, "Field not indexable"
assert self.attached_to_model, \
'`rebuild_indexes` can only be called on a field attached to the model'
for index in self._indexes:
if index_class and not isinstance(index, index_class):
continue
index.rebuild(chunk_size=chunk_size, aggressive_clear=aggressive_clear) |
<SYSTEM_TASK:>
Shortcut for commands that reset values of the field.
<END_TASK>
<USER_TASK:>
Description:
def _reset(self, command, *args, **kwargs):
"""
Shortcut for commands that reset values of the field.
All will be deindexed and reindexed.
""" |
if self.indexable:
self.deindex()
result = self._traverse_command(command, *args, **kwargs)
if self.indexable:
self.index()
return result |
<SYSTEM_TASK:>
Shortcut for commands that remove all values of the field.
<END_TASK>
<USER_TASK:>
Description:
def _del(self, command, *args, **kwargs):
"""
Shortcut for commands that remove all values of the field.
All will be deindexed.
""" |
if self.indexable:
self.deindex()
return self._traverse_command(command, *args, **kwargs) |
<SYSTEM_TASK:>
Helper for commands that only set a value to the field.
<END_TASK>
<USER_TASK:>
Description:
def _call_set(self, command, value, *args, **kwargs):
"""
Helper for commands that only set a value to the field.
""" |
if self.indexable:
current = self.proxy_get()
if normalize(current) != normalize(value):
if current is not None:
self.deindex(current)
if value is not None:
self.index(value)
return self._traverse_command(command, value, *args, **kwargs) |
<SYSTEM_TASK:>
Index only if value has been set.
<END_TASK>
<USER_TASK:>
Description:
def _call_setnx(self, command, value):
"""
Index only if value has been set.
""" |
result = self._traverse_command(command, value)
if self.indexable and value is not None and result:
self.index(value)
return result |
<SYSTEM_TASK:>
Shortcut for commands that only add values to the field.
<END_TASK>
<USER_TASK:>
Description:
def _add(self, command, *args, **kwargs):
"""
Shortcut for commands that only add values to the field.
Added values will be indexed.
""" |
if self.indexable:
self.index(args)
return self._traverse_command(command, *args, **kwargs) |
<SYSTEM_TASK:>
Shortcut for commands that only remove values from the field.
<END_TASK>
<USER_TASK:>
Description:
def _rem(self, command, *args, **kwargs):
"""
Shortcut for commands that only remove values from the field.
Removed values will be deindexed.
""" |
if self.indexable:
self.deindex(args)
return self._traverse_command(command, *args, **kwargs) |
<SYSTEM_TASK:>
Shortcut for commands that pop a value from the field, returning it while
<END_TASK>
<USER_TASK:>
Description:
def _pop(self, command, *args, **kwargs):
"""
Shortcut for commands that pop a value from the field, returning it while
removing it.
The returned value will be deindexed
""" |
result = self._traverse_command(command, *args, **kwargs)
if self.indexable:
self.deindex([result])
return result |
<SYSTEM_TASK:>
Index all values stored in the field, or only given ones if any.
<END_TASK>
<USER_TASK:>
Description:
def index(self, values=None, only_index=None):
"""
Index all values stored in the field, or only given ones if any.
""" |
assert self.indexable, "Field not indexable"
assert not only_index or self.has_index(only_index), "Invalid index"
if only_index:
only_index = only_index if isclass(only_index) else only_index.__class__
if values is None:
values = self.proxy_get()
for value in values:
if value is not None:
needs_to_check_uniqueness = bool(self.unique)
for index in self._indexes:
if only_index and not isinstance(index, only_index):
continue
index.add(value, check_uniqueness=needs_to_check_uniqueness and index.handle_uniqueness)
if needs_to_check_uniqueness and index.handle_uniqueness:
# uniqueness check is done for this value
needs_to_check_uniqueness = False |
<SYSTEM_TASK:>
This command update a score of a given value. But it can be a new value
<END_TASK>
<USER_TASK:>
Description:
def _call_zincrby(self, command, value, *args, **kwargs):
"""
This command update a score of a given value. But it can be a new value
of the sorted set, so we index it.
""" |
if self.indexable:
self.index([value])
return self._traverse_command(command, value, *args, **kwargs) |
<SYSTEM_TASK:>
Helper for lpushx and rpushx, that only index the new values if the list
<END_TASK>
<USER_TASK:>
Description:
def _pushx(self, command, *args, **kwargs):
"""
Helper for lpushx and rpushx, that only index the new values if the list
existed when the command was called
""" |
result = self._traverse_command(command, *args, **kwargs)
if self.indexable and result:
self.index(args)
return result |
<SYSTEM_TASK:>
Before setting the new value, get the previous one to deindex it. Then
<END_TASK>
<USER_TASK:>
Description:
def _call_lset(self, command, index, value, *args, **kwargs):
"""
Before setting the new value, get the previous one to deindex it. Then
call the command and index the new value, if exists
""" |
if self.indexable:
old_value = self.lindex(index)
self.deindex([old_value])
result = self._traverse_command(command, index, value, *args, **kwargs)
if self.indexable:
self.index([value])
return result |
<SYSTEM_TASK:>
Deal with dicts and field names.
<END_TASK>
<USER_TASK:>
Description:
def deindex(self, values=None, only_index=None):
"""
Deal with dicts and field names.
""" |
assert self.indexable, "Field not indexable"
assert not only_index or self.has_index(only_index), "Invalid index"
if only_index:
only_index = only_index if isclass(only_index) else only_index.__class__
if values is None:
values = self.proxy_get()
for field_name, value in iteritems(values):
if value is not None:
for index in self._indexes:
if only_index and not isinstance(index, only_index):
continue
index.remove(field_name, value) |
<SYSTEM_TASK:>
Add key AND the hash field to the args, and call the Redis command.
<END_TASK>
<USER_TASK:>
Description:
def _traverse_command(self, name, *args, **kwargs):
"""Add key AND the hash field to the args, and call the Redis command.""" |
args = list(args)
args.insert(0, self.name)
return super(InstanceHashField, self)._traverse_command(name, *args, **kwargs) |
<SYSTEM_TASK:>
Validate that a given new pk to set is always set, and return it.
<END_TASK>
<USER_TASK:>
Description:
def _validate(self, value):
"""
Validate that a given new pk to set is always set, and return it.
The returned value should be normalized, and will be used without check.
""" |
if value is None:
raise ValueError('The pk for %s is not "auto-increment", you must fill it' %
self._model.__name__)
value = self.normalize(value)
# Check that this pk does not already exist
if self.exists(value):
raise UniquenessError('PKField %s already exists for model %s)' %
(value, self._instance.__class__))
return value |
<SYSTEM_TASK:>
Return True if the given pk value exists for the given class.
<END_TASK>
<USER_TASK:>
Description:
def exists(self, value=None):
"""
Return True if the given pk value exists for the given class.
If no value is given, we use the value of the current field, which
is the value of the "_pk" attribute of its instance.
""" |
try:
if not value:
value = self.get()
except (AttributeError, DoesNotExist):
# If the instance is deleted, the _pk attribute doesn't exist
# anymore. So we catch the AttributeError to return False (this pk
# field doesn't exist anymore) in this specific case
return False
else:
return self.connection.sismember(self.collection_key, value) |
<SYSTEM_TASK:>
We do not call the default getter as we have the value cached in the
<END_TASK>
<USER_TASK:>
Description:
def get(self):
"""
We do not call the default getter as we have the value cached in the
instance in its _pk attribute
""" |
if not hasattr(self, '_instance'):
raise ImplementationError("Impossible to get the PK of an unbound field")
if not hasattr(self._instance, '_pk'):
raise DoesNotExist("The current object doesn't exists anymore")
if not self._instance._pk:
self.set(value=None)
return self.normalize(self._instance._pk) |
<SYSTEM_TASK:>
Validate that a given new pk to set is always set to None, then return
<END_TASK>
<USER_TASK:>
Description:
def _validate(self, value):
"""
Validate that a given new pk to set is always set to None, then return
a new pk
""" |
if value is not None:
raise ValueError('The pk for %s is "auto-increment", you must not fill it' %
self._model.__name__)
key = self._instance.make_key(self._model._name, 'max_pk')
return self.normalize(self.connection.incr(key)) |
<SYSTEM_TASK:>
Really acquire the lock only if it's not a sub-lock. Then save the
<END_TASK>
<USER_TASK:>
Description:
def acquire(self, *args, **kwargs):
"""
Really acquire the lock only if it's not a sub-lock. Then save the
sub-lock status.
""" |
if not self.field.lockable:
return
if self.already_locked_by_model:
self.sub_lock_mode = True
return
self.already_locked_by_model = True
super(FieldLock, self).acquire(*args, **kwargs) |
<SYSTEM_TASK:>
Really release the lock only if it's not a sub-lock. Then save the
<END_TASK>
<USER_TASK:>
Description:
def release(self, *args, **kwargs):
"""
Really release the lock only if it's not a sub-lock. Then save the
sub-lock status and mark the model as unlocked.
""" |
if not self.field.lockable:
return
if self.sub_lock_mode:
return
super(FieldLock, self).release(*args, **kwargs)
self.already_locked_by_model = self.sub_lock_mode = False |
<SYSTEM_TASK:>
Create a new class with the given index classes
<END_TASK>
<USER_TASK:>
Description:
def compose(cls, index_classes, key=None, transform=None, name=None):
"""Create a new class with the given index classes
Parameters
-----------
index_classes: list
The list of index classes to be used in the multi-index class to create
name: str
The name of the new multi-index class. If not set, it will be the same
as the current class
key: str
A key to augment the default key of each index, to avoid collision.
transform: callable
None by default, can be set to a function that will transform the value to be indexed.
This callable can accept one (`value`) or two (`self`, `value`) arguments
""" |
attrs = {}
if index_classes:
attrs['index_classes'] = index_classes
klass = type(str(name or cls.__name__), (cls, ), attrs)
# let the ``configure`` method manage some fields
configure_attrs = {}
if key is not None:
configure_attrs['key'] = key
if transform is not None:
configure_attrs['transform'] = transform
if configure_attrs:
klass = klass.configure(**configure_attrs)
return klass |
<SYSTEM_TASK:>
Tell if one of the managed indexes can be used for the given filter prefix
<END_TASK>
<USER_TASK:>
Description:
def can_handle_suffix(self, suffix):
"""Tell if one of the managed indexes can be used for the given filter prefix
For parameters, see BaseIndex.can_handle_suffix
""" |
for index in self._indexes:
if index.can_handle_suffix(suffix):
return True
return False |
<SYSTEM_TASK:>
Prepare args to be used by a sub-index
<END_TASK>
<USER_TASK:>
Description:
def prepare_args(self, args, transform=True):
"""Prepare args to be used by a sub-index
Parameters
----------
args: list
The while list of arguments passed to add, check_uniqueness, get_filtered_keys...
transform: bool
If ``True``, the last entry in `args`, ie the value, will be transformed.
Else it will be kept as is.
""" |
updated_args = list(args)
if transform:
updated_args[-1] = self.transform_value(updated_args[-1])
if self.key:
updated_args.insert(-1, self.key)
return updated_args |
<SYSTEM_TASK:>
For a unique index, check if the given args are not used twice
<END_TASK>
<USER_TASK:>
Description:
def check_uniqueness(self, *args):
"""For a unique index, check if the given args are not used twice
For the parameters, seen BaseIndex.check_uniqueness
""" |
self.get_unique_index().check_uniqueness(*self.prepare_args(args, transform=False)) |
<SYSTEM_TASK:>
Add the instance tied to the field to all the indexes
<END_TASK>
<USER_TASK:>
Description:
def add(self, *args, **kwargs):
"""Add the instance tied to the field to all the indexes
For the parameters, seen BaseIndex.add
""" |
check_uniqueness = kwargs.pop('check_uniqueness', False)
args = self.prepare_args(args)
for index in self._indexes:
index.add(*args, check_uniqueness=check_uniqueness and index.handle_uniqueness, **kwargs)
if check_uniqueness and index.handle_uniqueness:
check_uniqueness = False |
<SYSTEM_TASK:>
Remove the instance tied to the field from all the indexes
<END_TASK>
<USER_TASK:>
Description:
def remove(self, *args):
"""Remove the instance tied to the field from all the indexes
For the parameters, seen BaseIndex.remove
""" |
args = self.prepare_args(args)
for index in self._indexes:
index.remove(*args) |
<SYSTEM_TASK:>
Returns the index keys to be used by the collection for the given args
<END_TASK>
<USER_TASK:>
Description:
def get_filtered_keys(self, suffix, *args, **kwargs):
"""Returns the index keys to be used by the collection for the given args
For the parameters, see BaseIndex.get_filtered_keys
""" |
args = self.prepare_args(args, transform=False)
for index in self._indexes:
if index.can_handle_suffix(suffix):
return index.get_filtered_keys(suffix, *args, **kwargs) |
<SYSTEM_TASK:>
Create a new index class with the given info
<END_TASK>
<USER_TASK:>
Description:
def configure(cls, **kwargs):
"""Create a new index class with the given info
This allow to avoid creating a new class when only few changes are
to be made
Parameters
----------
kwargs: dict
prefix: str
The string part to use in the collection, before the normal suffix.
For example `foo` to filter on `myfiled__foo__eq=`
This prefix will also be used by the indexes to store the data at
a different place than the same index without prefix.
transform: callable
A function that will transform the value to be used as the reference
for the index, before the call to `normalize_value`.
If can be extraction of a date, or any computation.
The filter in the collection will then have to use a transformed value,
for example `birth_date__year=1976` if the transform take a date and
transform it to a year.
handle_uniqueness: bool
To make the index handle or not the uniqueness
key: str
To override the key used by the index. Two indexes for the same field of
the same type must not have the same key or data will be saved at the same place.
Note that the default key is None for `EqualIndex`, `text-range` for
`TextRangeIndex` and `number-range` for `NumberRangeIndex`
name: str
The name of the new multi-index class. If not set, it will be the same
as the current class
Returns
-------
type
A new class based on `cls`, with the new attributes set
""" |
attrs = {}
for key in ('prefix', 'handle_uniqueness', 'key'):
if key in kwargs:
attrs[key] = kwargs.pop(key)
if 'transform' in kwargs:
attrs['transform'] = staticmethod(kwargs.pop('transform'))
name = kwargs.pop('name', None)
if kwargs:
raise TypeError('%s.configure only accepts these named arguments: %s' % (
cls.__name__,
', '.join(('prefix', 'transform', 'handle_uniqueness', 'key', 'name')),
))
return type((str if PY3 else oldstr)(name or cls.__name__), (cls, ), attrs) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.