text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Copy file to new destination
<END_TASK>
<USER_TASK:>
Description:
def copy_file(self, file_id, dest_folder_id):
"""Copy file to new destination
Args:
file_id (int): ID of the folder.
dest_folder_id (int): ID of parent folder you are copying to.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxError: 409 - Item with the same name already exists.
In this case you will need download the file and upload a new version to your destination.
(Box currently doesn't have a method to copy a new verison.)
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem.
""" |
return self.__request("POST", "/files/" + unicode(file_id) + "/copy",
data={ "parent": {"id": unicode(dest_folder_id)} }) |
<SYSTEM_TASK:>
Processes the received message. I don't need to
<END_TASK>
<USER_TASK:>
Description:
def ack(self, msg):
"""Processes the received message. I don't need to
generate an ack message.
""" |
self.log.info("senderID:%s Received: %s " % (self.senderID, msg['body']))
return stomper.NO_REPONSE_NEEDED |
<SYSTEM_TASK:>
Helper for clear operations.
<END_TASK>
<USER_TASK:>
Description:
def _clear(self, pipe=None):
"""Helper for clear operations.
:param pipe: Redis pipe in case update is performed as a part
of transaction.
:type pipe: :class:`redis.client.StrictPipeline` or
:class:`redis.client.StrictRedis`
""" |
redis = self.redis if pipe is None else pipe
redis.delete(self.key) |
<SYSTEM_TASK:>
Convert negative indexes into their positive equivalents.
<END_TASK>
<USER_TASK:>
Description:
def _normalize_index(self, index, pipe=None):
"""Convert negative indexes into their positive equivalents.""" |
pipe = self.redis if pipe is None else pipe
len_self = self.__len__(pipe)
positive_index = index if index >= 0 else len_self + index
return len_self, positive_index |
<SYSTEM_TASK:>
Helper simplifying code within watched transaction.
<END_TASK>
<USER_TASK:>
Description:
def _transaction(self, fn, *extra_keys):
"""Helper simplifying code within watched transaction.
Takes *fn*, function treated as a transaction. Returns whatever
*fn* returns. ``self.key`` is watched. *fn* takes *pipe* as the
only argument.
:param fn: Closure treated as a transaction.
:type fn: function *fn(pipe)*
:param extra_keys: Optional list of additional keys to watch.
:type extra_keys: list
:rtype: whatever *fn* returns
""" |
results = []
def trans(pipe):
results.append(fn(pipe))
self.redis.transaction(trans, self.key, *extra_keys)
return results[0] |
<SYSTEM_TASK:>
STOMP negative acknowledge command.
<END_TASK>
<USER_TASK:>
Description:
def nack(messageid, subscriptionid, transactionid=None):
"""STOMP negative acknowledge command.
NACK is the opposite of ACK. It is used to tell the server that the client
did not consume the message. The server can then either send the message to
a different client, discard it, or put it in a dead letter queue. The exact
behavior is server specific.
messageid:
This is the id of the message we are acknowledging,
what else could it be? ;)
subscriptionid:
This is the id of the subscription that applies to the message.
transactionid:
This is the id that all actions in this transaction
will have. If this is not given then a random UUID
will be generated for this.
""" |
header = 'subscription:%s\nmessage-id:%s' % (subscriptionid, messageid)
if transactionid:
header += '\ntransaction:%s' % transactionid
return "NACK\n%s\n\n\x00\n" % header |
<SYSTEM_TASK:>
STOMP connect command.
<END_TASK>
<USER_TASK:>
Description:
def connect(username, password, host, heartbeats=(0,0)):
"""STOMP connect command.
username, password:
These are the needed auth details to connect to the
message server.
After sending this we will receive a CONNECTED
message which will contain our session id.
""" |
if len(heartbeats) != 2:
raise ValueError('Invalid heartbeat %r' % heartbeats)
cx, cy = heartbeats
return "CONNECT\naccept-version:1.1\nhost:%s\nheart-beat:%i,%i\nlogin:%s\npasscode:%s\n\n\x00\n" % (host, cx, cy, username, password) |
<SYSTEM_TASK:>
Called when a MESSAGE has been received.
<END_TASK>
<USER_TASK:>
Description:
def ack(self, msg):
"""Called when a MESSAGE has been received.
Override this method to handle received messages.
This function will generate an acknowledge message
for the given message and transaction (if present).
""" |
message_id = msg['headers']['message-id']
subscription = msg['headers']['subscription']
transaction_id = None
if 'transaction-id' in msg['headers']:
transaction_id = msg['headers']['transaction-id']
# print "acknowledging message id <%s>." % message_id
return ack(message_id, subscription, transaction_id) |
<SYSTEM_TASK:>
I pull one complete message off the buffer and return it decoded
<END_TASK>
<USER_TASK:>
Description:
def getOneMessage ( self ):
"""
I pull one complete message off the buffer and return it decoded
as a dict. If there is no complete message in the buffer, I
return None.
Note that the buffer can contain more than once message. You
should therefore call me in a loop until I return None.
""" |
( mbytes, hbytes ) = self._findMessageBytes ( self.buffer )
if not mbytes:
return None
msgdata = self.buffer[:mbytes]
self.buffer = self.buffer[mbytes:]
hdata = msgdata[:hbytes]
elems = hdata.split ( '\n' )
cmd = elems.pop ( 0 )
headers = {}
# We can't use a simple split because the value can legally contain
# colon characters (for example, the session returned by ActiveMQ).
for e in elems:
try:
i = e.find ( ':' )
except ValueError:
continue
k = e[:i].strip()
v = e[i+1:].strip()
headers [ k ] = v
# hbytes points to the start of the '\n\n' at the end of the header,
# so 2 bytes beyond this is the start of the body. The body EXCLUDES
# the final two bytes, which are '\x00\n'. Note that these 2 bytes
# are UNRELATED to the 2-byte '\n\n' that Frame.pack() used to insert
# into the data stream.
body = msgdata[hbytes+2:-2]
msg = { 'cmd' : cmd,
'headers' : headers,
'body' : body,
}
return msg |
<SYSTEM_TASK:>
Defines a signal handler for syncing an individual entity. Called when
<END_TASK>
<USER_TASK:>
Description:
def delete_entity_signal_handler(sender, instance, **kwargs):
"""
Defines a signal handler for syncing an individual entity. Called when
an entity is saved or deleted.
""" |
if instance.__class__ in entity_registry.entity_registry:
Entity.all_objects.delete_for_obj(instance) |
<SYSTEM_TASK:>
Defines a signal handler for saving an entity. Syncs the entity to
<END_TASK>
<USER_TASK:>
Description:
def save_entity_signal_handler(sender, instance, **kwargs):
"""
Defines a signal handler for saving an entity. Syncs the entity to
the entity mirror table.
""" |
if instance.__class__ in entity_registry.entity_registry:
sync_entities(instance)
if instance.__class__ in entity_registry.entity_watching:
sync_entities_watching(instance) |
<SYSTEM_TASK:>
Enables all of the signals for syncing entities. Everything is True by default, except for the post_bulk_operation
<END_TASK>
<USER_TASK:>
Description:
def turn_on_syncing(for_post_save=True, for_post_delete=True, for_m2m_changed=True, for_post_bulk_operation=False):
"""
Enables all of the signals for syncing entities. Everything is True by default, except for the post_bulk_operation
signal. The reason for this is because when any bulk operation occurs on any mirrored entity model, it will
result in every single entity being synced again. This is not a desired behavior by the majority of users, and
should only be turned on explicitly.
""" |
if for_post_save:
post_save.connect(save_entity_signal_handler, dispatch_uid='save_entity_signal_handler')
if for_post_delete:
post_delete.connect(delete_entity_signal_handler, dispatch_uid='delete_entity_signal_handler')
if for_m2m_changed:
m2m_changed.connect(m2m_changed_entity_signal_handler, dispatch_uid='m2m_changed_entity_signal_handler')
if for_post_bulk_operation:
post_bulk_operation.connect(bulk_operation_signal_handler, dispatch_uid='bulk_operation_signal_handler') |
<SYSTEM_TASK:>
Yield each of the elements from the collection, without pulling them
<END_TASK>
<USER_TASK:>
Description:
def scan_elements(self):
"""
Yield each of the elements from the collection, without pulling them
all into memory.
.. warning::
This method is not available on the set collections provided
by Python.
This method may return the element multiple times.
See the `Redis SCAN documentation
<http://redis.io/commands/scan#scan-guarantees>`_ for details.
""" |
for x in self.redis.sscan_iter(self.key):
yield self._unpickle(x) |
<SYSTEM_TASK:>
Return descriptions of the places stored in the collection that are
<END_TASK>
<USER_TASK:>
Description:
def places_within_radius(
self, place=None, latitude=None, longitude=None, radius=0, **kwargs
):
"""
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
""" |
kwargs['withdist'] = True
kwargs['withcoord'] = True
kwargs['withhash'] = False
kwargs.setdefault('sort', 'ASC')
unit = kwargs.setdefault('unit', 'km')
# Make the query
if place is not None:
response = self.redis.georadiusbymember(
self.key, self._pickle(place), radius, **kwargs
)
elif (latitude is not None) and (longitude is not None):
response = self.redis.georadius(
self.key, longitude, latitude, radius, **kwargs
)
else:
raise ValueError(
'Must specify place, or both latitude and longitude'
)
# Assemble the result
ret = []
for item in response:
ret.append(
{
'place': self._unpickle(item[0]),
'distance': item[1],
'unit': unit,
'latitude': item[2][1],
'longitude': item[2][0],
}
)
return ret |
<SYSTEM_TASK:>
Rotate the deque n steps to the right.
<END_TASK>
<USER_TASK:>
Description:
def rotate(self, n=1):
"""
Rotate the deque n steps to the right.
If n is negative, rotate to the left.
""" |
# No work to do for a 0-step rotate
if n == 0:
return
def rotate_trans(pipe):
# Synchronize the cache before rotating
if self.writeback:
self._sync_helper(pipe)
# Rotating len(self) times has no effect.
len_self = self.__len__(pipe)
steps = abs_n % len_self
# When n is positive we can use the built-in Redis command
if forward:
pipe.multi()
for __ in range(steps):
pipe.rpoplpush(self.key, self.key)
# When n is negative we must use Python
else:
for __ in range(steps):
pickled_value = pipe.lpop(self.key)
pipe.rpush(self.key, pickled_value)
forward = n >= 0
abs_n = abs(n)
self._transaction(rotate_trans) |
<SYSTEM_TASK:>
Given a list of super entities, return the entities that have those as a subset of their super entities.
<END_TASK>
<USER_TASK:>
Description:
def is_sub_to_all(self, *super_entities):
"""
Given a list of super entities, return the entities that have those as a subset of their super entities.
""" |
if super_entities:
if len(super_entities) == 1:
# Optimize for the case of just one super entity since this is a much less intensive query
has_subset = EntityRelationship.objects.filter(
super_entity=super_entities[0]).values_list('sub_entity', flat=True)
else:
# Get a list of entities that have super entities with all types
has_subset = EntityRelationship.objects.filter(
super_entity__in=super_entities).values('sub_entity').annotate(Count('super_entity')).filter(
super_entity__count=len(set(super_entities))).values_list('sub_entity', flat=True)
return self.filter(id__in=has_subset)
else:
return self |
<SYSTEM_TASK:>
Given a list of super entities, return the entities that have super entities that interset with those provided.
<END_TASK>
<USER_TASK:>
Description:
def is_sub_to_any(self, *super_entities):
"""
Given a list of super entities, return the entities that have super entities that interset with those provided.
""" |
if super_entities:
return self.filter(id__in=EntityRelationship.objects.filter(
super_entity__in=super_entities).values_list('sub_entity', flat=True))
else:
return self |
<SYSTEM_TASK:>
Find all entities that have super_entities of any of the specified kinds
<END_TASK>
<USER_TASK:>
Description:
def is_sub_to_any_kind(self, *super_entity_kinds):
"""
Find all entities that have super_entities of any of the specified kinds
""" |
if super_entity_kinds:
# get the pks of the desired subs from the relationships table
if len(super_entity_kinds) == 1:
entity_pks = EntityRelationship.objects.filter(
super_entity__entity_kind=super_entity_kinds[0]
).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True)
else:
entity_pks = EntityRelationship.objects.filter(
super_entity__entity_kind__in=super_entity_kinds
).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True)
# return a queryset limited to only those pks
return self.filter(pk__in=entity_pks)
else:
return self |
<SYSTEM_TASK:>
Given a saved entity model object, return the associated entity.
<END_TASK>
<USER_TASK:>
Description:
def get_for_obj(self, entity_model_obj):
"""
Given a saved entity model object, return the associated entity.
""" |
return self.get(entity_type=ContentType.objects.get_for_model(
entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id) |
<SYSTEM_TASK:>
Delete the entities associated with a model object.
<END_TASK>
<USER_TASK:>
Description:
def delete_for_obj(self, entity_model_obj):
"""
Delete the entities associated with a model object.
""" |
return self.filter(
entity_type=ContentType.objects.get_for_model(
entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id).delete(
force=True) |
<SYSTEM_TASK:>
Return all the entities in the group.
<END_TASK>
<USER_TASK:>
Description:
def all_entities(self, is_active=True):
"""
Return all the entities in the group.
Because groups can contain both individual entities, as well
as whole groups of entities, this method acts as a convenient
way to get a queryset of all the entities in the group.
""" |
return self.get_all_entities(return_models=True, is_active=is_active) |
<SYSTEM_TASK:>
Add an entity, or sub-entity group to this EntityGroup.
<END_TASK>
<USER_TASK:>
Description:
def add_entity(self, entity, sub_entity_kind=None):
"""
Add an entity, or sub-entity group to this EntityGroup.
:type entity: Entity
:param entity: The entity to add.
:type sub_entity_kind: Optional EntityKind
:param sub_entity_kind: If a sub_entity_kind is given, all
sub_entities of the entity will be added to this
EntityGroup.
""" |
membership = EntityGroupMembership.objects.create(
entity_group=self,
entity=entity,
sub_entity_kind=sub_entity_kind,
)
return membership |
<SYSTEM_TASK:>
Add many entities and sub-entity groups to this EntityGroup.
<END_TASK>
<USER_TASK:>
Description:
def bulk_add_entities(self, entities_and_kinds):
"""
Add many entities and sub-entity groups to this EntityGroup.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to add to the group. In the pairs the entity-kind can be
``None``, to add a single entity, or some entity kind to
add all sub-entities of that kind.
""" |
memberships = [EntityGroupMembership(
entity_group=self,
entity=entity,
sub_entity_kind=sub_entity_kind,
) for entity, sub_entity_kind in entities_and_kinds]
created = EntityGroupMembership.objects.bulk_create(memberships)
return created |
<SYSTEM_TASK:>
Remove an entity, or sub-entity group to this EntityGroup.
<END_TASK>
<USER_TASK:>
Description:
def remove_entity(self, entity, sub_entity_kind=None):
"""
Remove an entity, or sub-entity group to this EntityGroup.
:type entity: Entity
:param entity: The entity to remove.
:type sub_entity_kind: Optional EntityKind
:param sub_entity_kind: If a sub_entity_kind is given, all
sub_entities of the entity will be removed from this
EntityGroup.
""" |
EntityGroupMembership.objects.get(
entity_group=self,
entity=entity,
sub_entity_kind=sub_entity_kind,
).delete() |
<SYSTEM_TASK:>
Remove many entities and sub-entity groups to this EntityGroup.
<END_TASK>
<USER_TASK:>
Description:
def bulk_remove_entities(self, entities_and_kinds):
"""
Remove many entities and sub-entity groups to this EntityGroup.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to remove from the group. In the pairs, the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind.
""" |
criteria = [
Q(entity=entity, sub_entity_kind=entity_kind)
for entity, entity_kind in entities_and_kinds
]
criteria = reduce(lambda q1, q2: q1 | q2, criteria, Q())
EntityGroupMembership.objects.filter(
criteria, entity_group=self).delete() |
<SYSTEM_TASK:>
Update the group to the given entities and sub-entity groups.
<END_TASK>
<USER_TASK:>
Description:
def bulk_overwrite(self, entities_and_kinds):
"""
Update the group to the given entities and sub-entity groups.
After this operation, the only members of this EntityGroup
will be the given entities, and sub-entity groups.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to set to the EntityGroup. In the pairs the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind.
""" |
EntityGroupMembership.objects.filter(entity_group=self).delete()
return self.bulk_add_entities(entities_and_kinds) |
<SYSTEM_TASK:>
Create a slug for each Work already in the DB.
<END_TASK>
<USER_TASK:>
Description:
def set_slug(apps, schema_editor, class_name):
"""
Create a slug for each Work already in the DB.
""" |
Cls = apps.get_model('spectator_events', class_name)
for obj in Cls.objects.all():
obj.slug = generate_slug(obj.pk)
obj.save(update_fields=['slug']) |
<SYSTEM_TASK:>
Convert descriptor and rows to Pandas
<END_TASK>
<USER_TASK:>
Description:
def convert_descriptor_and_rows(self, descriptor, rows):
"""Convert descriptor and rows to Pandas
""" |
# Prepare
primary_key = None
schema = tableschema.Schema(descriptor)
if len(schema.primary_key) == 1:
primary_key = schema.primary_key[0]
elif len(schema.primary_key) > 1:
message = 'Multi-column primary keys are not supported'
raise tableschema.exceptions.StorageError(message)
# Get data/index
data_rows = []
index_rows = []
jtstypes_map = {}
for row in rows:
values = []
index = None
for field, value in zip(schema.fields, row):
try:
if isinstance(value, float) and np.isnan(value):
value = None
if value and field.type == 'integer':
value = int(value)
value = field.cast_value(value)
except tableschema.exceptions.CastError:
value = json.loads(value)
# http://pandas.pydata.org/pandas-docs/stable/gotchas.html#support-for-integer-na
if value is None and field.type in ('number', 'integer'):
jtstypes_map[field.name] = 'number'
value = np.NaN
if field.name == primary_key:
index = value
else:
values.append(value)
data_rows.append(tuple(values))
index_rows.append(index)
# Get dtypes
dtypes = []
for field in schema.fields:
if field.name != primary_key:
field_name = field.name
if six.PY2:
field_name = field.name.encode('utf-8')
dtype = self.convert_type(jtstypes_map.get(field.name, field.type))
dtypes.append((field_name, dtype))
# Create dataframe
index = None
columns = schema.headers
array = np.array(data_rows, dtype=dtypes)
if primary_key:
index_field = schema.get_field(primary_key)
index_dtype = self.convert_type(index_field.type)
index_class = pd.Index
if index_field.type in ['datetime', 'date']:
index_class = pd.DatetimeIndex
index = index_class(index_rows, name=primary_key, dtype=index_dtype)
columns = filter(lambda column: column != primary_key, schema.headers)
dataframe = pd.DataFrame(array, index=index, columns=columns)
return dataframe |
<SYSTEM_TASK:>
Restore descriptor from Pandas
<END_TASK>
<USER_TASK:>
Description:
def restore_descriptor(self, dataframe):
"""Restore descriptor from Pandas
""" |
# Prepare
fields = []
primary_key = None
# Primary key
if dataframe.index.name:
field_type = self.restore_type(dataframe.index.dtype)
field = {
'name': dataframe.index.name,
'type': field_type,
'constraints': {'required': True},
}
fields.append(field)
primary_key = dataframe.index.name
# Fields
for column, dtype in dataframe.dtypes.iteritems():
sample = dataframe[column].iloc[0] if len(dataframe) else None
field_type = self.restore_type(dtype, sample=sample)
field = {'name': column, 'type': field_type}
# TODO: provide better required indication
# if dataframe[column].isnull().sum() == 0:
# field['constraints'] = {'required': True}
fields.append(field)
# Descriptor
descriptor = {}
descriptor['fields'] = fields
if primary_key:
descriptor['primaryKey'] = primary_key
return descriptor |
<SYSTEM_TASK:>
Restore row from Pandas
<END_TASK>
<USER_TASK:>
Description:
def restore_row(self, row, schema, pk):
"""Restore row from Pandas
""" |
result = []
for field in schema.fields:
if schema.primary_key and schema.primary_key[0] == field.name:
if field.type == 'number' and np.isnan(pk):
pk = None
if pk and field.type == 'integer':
pk = int(pk)
result.append(field.cast_value(pk))
else:
value = row[field.name]
if field.type == 'number' and np.isnan(value):
value = None
if value and field.type == 'integer':
value = int(value)
elif field.type == 'datetime':
value = value.to_pydatetime()
result.append(field.cast_value(value))
return result |
<SYSTEM_TASK:>
Restore type from Pandas
<END_TASK>
<USER_TASK:>
Description:
def restore_type(self, dtype, sample=None):
"""Restore type from Pandas
""" |
# Pandas types
if pdc.is_bool_dtype(dtype):
return 'boolean'
elif pdc.is_datetime64_any_dtype(dtype):
return 'datetime'
elif pdc.is_integer_dtype(dtype):
return 'integer'
elif pdc.is_numeric_dtype(dtype):
return 'number'
# Python types
if sample is not None:
if isinstance(sample, (list, tuple)):
return 'array'
elif isinstance(sample, datetime.date):
return 'date'
elif isinstance(sample, isodate.Duration):
return 'duration'
elif isinstance(sample, dict):
return 'object'
elif isinstance(sample, six.string_types):
return 'string'
elif isinstance(sample, datetime.time):
return 'time'
return 'string' |
<SYSTEM_TASK:>
Returns an HTML link to the supplied URL, but only using the domain as the
<END_TASK>
<USER_TASK:>
Description:
def domain_urlize(value):
"""
Returns an HTML link to the supplied URL, but only using the domain as the
text. Strips 'www.' from the start of the domain, if present.
e.g. if `my_url` is 'http://www.example.org/foo/' then:
{{ my_url|domain_urlize }}
returns:
<a href="http://www.example.org/foo/" rel="nofollow">example.org</a>
""" |
parsed_uri = urlparse(value)
domain = '{uri.netloc}'.format(uri=parsed_uri)
if domain.startswith('www.'):
domain = domain[4:]
return format_html('<a href="{}" rel="nofollow">{}</a>',
value,
domain
) |
<SYSTEM_TASK:>
Returns the name of the current URL, namespaced, or False.
<END_TASK>
<USER_TASK:>
Description:
def current_url_name(context):
"""
Returns the name of the current URL, namespaced, or False.
Example usage:
{% current_url_name as url_name %}
<a href="#"{% if url_name == 'myapp:home' %} class="active"{% endif %}">Home</a>
""" |
url_name = False
if context.request.resolver_match:
url_name = "{}:{}".format(
context.request.resolver_match.namespace,
context.request.resolver_match.url_name
)
return url_name |
<SYSTEM_TASK:>
Displays a card showing the Creators who have the most Readings
<END_TASK>
<USER_TASK:>
Description:
def most_read_creators_card(num=10):
"""
Displays a card showing the Creators who have the most Readings
associated with their Publications.
In spectator_core tags, rather than spectator_reading so it can still be
used on core pages, even if spectator_reading isn't installed.
""" |
if spectator_apps.is_enabled('reading'):
object_list = most_read_creators(num=num)
object_list = chartify(object_list, 'num_readings', cutoff=1)
return {
'card_title': 'Most read authors',
'score_attr': 'num_readings',
'object_list': object_list,
} |
<SYSTEM_TASK:>
Displays a card showing the Venues that have the most Events.
<END_TASK>
<USER_TASK:>
Description:
def most_visited_venues_card(num=10):
"""
Displays a card showing the Venues that have the most Events.
In spectator_core tags, rather than spectator_events so it can still be
used on core pages, even if spectator_events isn't installed.
""" |
if spectator_apps.is_enabled('events'):
object_list = most_visited_venues(num=num)
object_list = chartify(object_list, 'num_visits', cutoff=1)
return {
'card_title': 'Most visited venues',
'score_attr': 'num_visits',
'object_list': object_list,
} |
<SYSTEM_TASK:>
Create a slug for each Creator already in the DB.
<END_TASK>
<USER_TASK:>
Description:
def set_slug(apps, schema_editor):
"""
Create a slug for each Creator already in the DB.
""" |
Creator = apps.get_model('spectator_core', 'Creator')
for c in Creator.objects.all():
c.slug = generate_slug(c.pk)
c.save(update_fields=['slug']) |
<SYSTEM_TASK:>
Copy the ClassicalWork and DancePiece data to use the new through models.
<END_TASK>
<USER_TASK:>
Description:
def forwards(apps, schema_editor):
"""
Copy the ClassicalWork and DancePiece data to use the new through models.
""" |
Event = apps.get_model('spectator_events', 'Event')
ClassicalWorkSelection = apps.get_model(
'spectator_events', 'ClassicalWorkSelection')
DancePieceSelection = apps.get_model(
'spectator_events', 'DancePieceSelection')
for event in Event.objects.all():
for work in event.classicalworks.all():
selection = ClassicalWorkSelection(
classical_work=work,
event=event)
selection.save()
for piece in event.dancepieces.all():
selection = DancePieceSelection(
dance_piece=piece,
event=event)
selection.save() |
<SYSTEM_TASK:>
Set the venue_name field of all Events that have a Venue.
<END_TASK>
<USER_TASK:>
Description:
def forwards(apps, schema_editor):
"""
Set the venue_name field of all Events that have a Venue.
""" |
Event = apps.get_model('spectator_events', 'Event')
for event in Event.objects.all():
if event.venue is not None:
event.venue_name = event.venue.name
event.save() |
<SYSTEM_TASK:>
Migrate all 'exhibition' Events to the new 'museum' Event kind.
<END_TASK>
<USER_TASK:>
Description:
def forwards(apps, schema_editor):
"""
Migrate all 'exhibition' Events to the new 'museum' Event kind.
""" |
Event = apps.get_model('spectator_events', 'Event')
for ev in Event.objects.filter(kind='exhibition'):
ev.kind = 'museum'
ev.save() |
<SYSTEM_TASK:>
Given a QuerySet it will go through and add a `chart_position` property to
<END_TASK>
<USER_TASK:>
Description:
def chartify(qs, score_field, cutoff=0, ensure_chartiness=True):
"""
Given a QuerySet it will go through and add a `chart_position` property to
each object returning a list of the objects.
If adjacent objects have the same 'score' (based on `score_field`) then
they will have the same `chart_position`. This can then be used in
templates for the `value` of <li> elements in an <ol>.
By default any objects with a score of 0 or less will be removed.
By default, if all the items in the chart have the same position, no items
will be returned (it's not much of a chart).
Keyword arguments:
qs -- The QuerySet
score_field -- The name of the numeric field that each object in the
QuerySet has, that will be used to compare their positions.
cutoff -- Any objects with a score of this value or below will be removed
from the list. Set to None to disable this.
ensure_chartiness -- If True, then if all items in the list have the same
score, an empty list will be returned.
""" |
chart = []
position = 0
prev_obj = None
for counter, obj in enumerate(qs):
score = getattr(obj, score_field)
if score != getattr(prev_obj, score_field, None):
position = counter + 1
if cutoff is None or score > cutoff:
obj.chart_position = position
chart.append(obj)
prev_obj = obj
if ensure_chartiness and len(chart) > 0:
if getattr(chart[0], score_field) == getattr(chart[-1], score_field):
chart = []
return chart |
<SYSTEM_TASK:>
Gets Venues in order of how many Events have been held there.
<END_TASK>
<USER_TASK:>
Description:
def by_visits(self, event_kind=None):
"""
Gets Venues in order of how many Events have been held there.
Adds a `num_visits` field to each one.
event_kind filters by kind of Event, e.g. 'theatre', 'cinema', etc.
""" |
qs = self.get_queryset()
if event_kind is not None:
qs = qs.filter(event__kind=event_kind)
qs = qs.annotate(num_visits=Count('event')) \
.order_by('-num_visits', 'name_sort')
return qs |
<SYSTEM_TASK:>
Gets Works in order of how many times they've been attached to
<END_TASK>
<USER_TASK:>
Description:
def by_views(self, kind=None):
"""
Gets Works in order of how many times they've been attached to
Events.
kind is the kind of Work, e.g. 'play', 'movie', etc.
""" |
qs = self.get_queryset()
if kind is not None:
qs = qs.filter(kind=kind)
qs = qs.annotate(num_views=Count('event')) \
.order_by('-num_views', 'title_sort')
return qs |
<SYSTEM_TASK:>
Attempt to make a version of the string that has the surname, if any,
<END_TASK>
<USER_TASK:>
Description:
def naturalize_person(self, string):
"""
Attempt to make a version of the string that has the surname, if any,
at the start.
'John, Brown' to 'Brown, John'
'Sir John Brown Jr' to 'Brown, Sir John Jr'
'Prince' to 'Prince'
string -- The string to change.
""" |
suffixes = [
'Jr', 'Jr.', 'Sr', 'Sr.',
'I', 'II', 'III', 'IV', 'V',
]
# Add lowercase versions:
suffixes = suffixes + [s.lower() for s in suffixes]
# If a name has a capitalised particle in we use that to sort.
# So 'Le Carre, John' but 'Carre, John le'.
particles = [
'Le', 'La',
'Von', 'Van',
'Du', 'De',
]
surname = '' # Smith
names = '' # Fred James
suffix = '' # Jr
sort_string = string
parts = string.split(' ')
if parts[-1] in suffixes:
# Remove suffixes entirely, as we'll add them back on the end.
suffix = parts[-1]
parts = parts[0:-1] # Remove suffix from parts
sort_string = ' '.join(parts)
if len(parts) > 1:
if parts[-2] in particles:
# From ['Alan', 'Barry', 'Le', 'Carré']
# to ['Alan', 'Barry', 'Le Carré']:
parts = parts[0:-2] + [ ' '.join(parts[-2:]) ]
# From 'David Foster Wallace' to 'Wallace, David Foster':
sort_string = '{}, {}'.format(parts[-1], ' '.join(parts[:-1]))
if suffix:
# Add it back on.
sort_string = '{} {}'.format(sort_string, suffix)
# In case this name has any numbers in it.
sort_string = self._naturalize_numbers(sort_string)
return sort_string |
<SYSTEM_TASK:>
Copying data from the old `Event.movie` and `Event.play` ForeignKey fields
<END_TASK>
<USER_TASK:>
Description:
def forward(apps, schema_editor):
"""
Copying data from the old `Event.movie` and `Event.play` ForeignKey fields
into the new `Event.movies` and `Event.plays` ManyToManyFields.
""" |
Event = apps.get_model('spectator_events', 'Event')
MovieSelection = apps.get_model('spectator_events', 'MovieSelection')
PlaySelection = apps.get_model('spectator_events', 'PlaySelection')
for event in Event.objects.all():
if event.movie is not None:
selection = MovieSelection(event=event, movie=event.movie)
selection.save()
if event.play is not None:
selection = PlaySelection(event=event, play=event.play)
selection.save() |
<SYSTEM_TASK:>
Create a slug for each Event already in the DB.
<END_TASK>
<USER_TASK:>
Description:
def set_slug(apps, schema_editor):
"""
Create a slug for each Event already in the DB.
""" |
Event = apps.get_model('spectator_events', 'Event')
for e in Event.objects.all():
e.slug = generate_slug(e.pk)
e.save(update_fields=['slug']) |
<SYSTEM_TASK:>
Return a standard ``Page`` instance with custom, digg-specific
<END_TASK>
<USER_TASK:>
Description:
def page(self, number, *args, **kwargs):
"""Return a standard ``Page`` instance with custom, digg-specific
page ranges attached.
""" |
page = super().page(number, *args, **kwargs)
number = int(number) # we know this will work
# easier access
num_pages, body, tail, padding, margin = \
self.num_pages, self.body, self.tail, self.padding, self.margin
# put active page in middle of main range
main_range = list(map(int, [
math.floor(number-body/2.0)+1, # +1 = shift odd body to right
math.floor(number+body/2.0)]))
# adjust bounds
if main_range[0] < 1:
main_range = list(map(abs(main_range[0]-1).__add__, main_range))
if main_range[1] > num_pages:
main_range = list(map((num_pages-main_range[1]).__add__, main_range))
# Determine leading and trailing ranges; if possible and appropriate,
# combine them with the main range, in which case the resulting main
# block might end up considerable larger than requested. While we
# can't guarantee the exact size in those cases, we can at least try
# to come as close as possible: we can reduce the other boundary to
# max padding, instead of using half the body size, which would
# otherwise be the case. If the padding is large enough, this will
# of course have no effect.
# Example:
# total pages=100, page=4, body=5, (default padding=2)
# 1 2 3 [4] 5 6 ... 99 100
# total pages=100, page=4, body=5, padding=1
# 1 2 3 [4] 5 ... 99 100
# If it were not for this adjustment, both cases would result in the
# first output, regardless of the padding value.
if main_range[0] <= tail+margin:
leading = []
main_range = [1, max(body, min(number+padding, main_range[1]))]
main_range[0] = 1
else:
leading = list(range(1, tail+1))
# basically same for trailing range, but not in ``left_align`` mode
if self.align_left:
trailing = []
else:
if main_range[1] >= num_pages-(tail+margin)+1:
trailing = []
if not leading:
# ... but handle the special case of neither leading nor
# trailing ranges; otherwise, we would now modify the
# main range low bound, which we just set in the previous
# section, again.
main_range = [1, num_pages]
else:
main_range = [min(num_pages-body+1, max(number-padding, main_range[0])), num_pages]
else:
trailing = list(range(num_pages-tail+1, num_pages+1))
# finally, normalize values that are out of bound; this basically
# fixes all the things the above code screwed up in the simple case
# of few enough pages where one range would suffice.
main_range = [max(main_range[0], 1), min(main_range[1], num_pages)]
# make the result of our calculations available as custom ranges
# on the ``Page`` instance.
page.main_range = list(range(main_range[0], main_range[1]+1))
page.leading_range = leading
page.trailing_range = trailing
page.page_range = reduce(lambda x, y: x+((x and y) and [False])+y,
[page.leading_range, page.main_range, page.trailing_range])
page.__class__ = DiggPage
return page |
<SYSTEM_TASK:>
Get the version number without importing the mrcfile package.
<END_TASK>
<USER_TASK:>
Description:
def version():
"""Get the version number without importing the mrcfile package.""" |
namespace = {}
with open(os.path.join('mrcfile', 'version.py')) as f:
exec(f.read(), namespace)
return namespace['__version__'] |
<SYSTEM_TASK:>
Unless we're on the front page we'll have a kind_slug like 'movies'.
<END_TASK>
<USER_TASK:>
Description:
def get_event_kind(self):
"""
Unless we're on the front page we'll have a kind_slug like 'movies'.
We need to translate that into an event `kind` like 'movie'.
""" |
slug = self.kwargs.get('kind_slug', None)
if slug is None:
return None # Front page; showing all Event kinds.
else:
slugs_to_kinds = {v:k for k,v in Event.KIND_SLUGS.items()}
return slugs_to_kinds.get(slug, None) |
<SYSTEM_TASK:>
We'll have a kind_slug like 'movies'.
<END_TASK>
<USER_TASK:>
Description:
def get_work_kind(self):
"""
We'll have a kind_slug like 'movies'.
We need to translate that into a work `kind` like 'movie'.
""" |
slugs_to_kinds = {v:k for k,v in Work.KIND_SLUGS.items()}
return slugs_to_kinds.get(self.kind_slug, None) |
<SYSTEM_TASK:>
Returns a list of dicts, one per country that has at least one Venue
<END_TASK>
<USER_TASK:>
Description:
def get_countries(self):
"""
Returns a list of dicts, one per country that has at least one Venue
in it.
Each dict has 'code' and 'name' elements.
The list is sorted by the country 'name's.
""" |
qs = Venue.objects.values('country') \
.exclude(country='') \
.distinct() \
.order_by('country')
countries = []
for c in qs:
countries.append({
'code': c['country'],
'name': Venue.get_country_name(c['country'])
})
return sorted(countries, key=lambda k: k['name']) |
<SYSTEM_TASK:>
Re-save all the Works because something earlier didn't create their slugs.
<END_TASK>
<USER_TASK:>
Description:
def forwards(apps, schema_editor):
"""
Re-save all the Works because something earlier didn't create their slugs.
""" |
Work = apps.get_model('spectator_events', 'Work')
for work in Work.objects.all():
if not work.slug:
work.slug = generate_slug(work.pk)
work.save() |
<SYSTEM_TASK:>
Displays years and the number of events per year.
<END_TASK>
<USER_TASK:>
Description:
def annual_event_counts_card(kind='all', current_year=None):
"""
Displays years and the number of events per year.
kind is an Event kind (like 'cinema', 'gig', etc.) or 'all' (default).
current_year is an optional date object representing the year we're already
showing information about.
""" |
if kind == 'all':
card_title = 'Events per year'
else:
card_title = '{} per year'.format(Event.get_kind_name_plural(kind))
return {
'card_title': card_title,
'kind': kind,
'years': annual_event_counts(kind=kind),
'current_year': current_year
} |
<SYSTEM_TASK:>
Displays the tabs to different event_list pages.
<END_TASK>
<USER_TASK:>
Description:
def event_list_tabs(counts, current_kind, page_number=1):
"""
Displays the tabs to different event_list pages.
`counts` is a dict of number of events for each kind, like:
{'all': 30, 'gig': 12, 'movie': 18,}
`current_kind` is the event kind that's active, if any. e.g. 'gig',
'movie', etc.
`page_number` is the current page of this kind of events we're on.
""" |
return {
'counts': counts,
'current_kind': current_kind,
'page_number': page_number,
# A list of all the kinds we might show tabs for, like
# ['gig', 'movie', 'play', ...]
'event_kinds': Event.get_kinds(),
# A dict of data about each kind, keyed by kind ('gig') including
# data about 'name', 'name_plural' and 'slug':
'event_kinds_data': Event.get_kinds_data(),
} |
<SYSTEM_TASK:>
Displays Events that happened on the supplied date.
<END_TASK>
<USER_TASK:>
Description:
def day_events_card(date):
"""
Displays Events that happened on the supplied date.
`date` is a date object.
""" |
d = date.strftime(app_settings.DATE_FORMAT)
card_title = 'Events on {}'.format(d)
return {
'card_title': card_title,
'event_list': day_events(date=date),
} |
<SYSTEM_TASK:>
Displays a card showing the Creators that are associated with the most Events.
<END_TASK>
<USER_TASK:>
Description:
def most_seen_creators_card(event_kind=None, num=10):
"""
Displays a card showing the Creators that are associated with the most Events.
""" |
object_list = most_seen_creators(event_kind=event_kind, num=num)
object_list = chartify(object_list, 'num_events', cutoff=1)
return {
'card_title': 'Most seen people/groups',
'score_attr': 'num_events',
'object_list': object_list,
} |
<SYSTEM_TASK:>
Returns a QuerySet of the Creators that are associated with the most Works.
<END_TASK>
<USER_TASK:>
Description:
def most_seen_creators_by_works(work_kind=None, role_name=None, num=10):
"""
Returns a QuerySet of the Creators that are associated with the most Works.
""" |
return Creator.objects.by_works(kind=work_kind, role_name=role_name)[:num] |
<SYSTEM_TASK:>
Displays a card showing the Creators that are associated with the most Works.
<END_TASK>
<USER_TASK:>
Description:
def most_seen_creators_by_works_card(work_kind=None, role_name=None, num=10):
"""
Displays a card showing the Creators that are associated with the most Works.
e.g.:
{% most_seen_creators_by_works_card work_kind='movie' role_name='Director' num=5 %}
""" |
object_list = most_seen_creators_by_works(
work_kind=work_kind, role_name=role_name, num=num)
object_list = chartify(object_list, 'num_works', cutoff=1)
# Attempt to create a sensible card title...
if role_name:
# Yes, this pluralization is going to break at some point:
creators_name = '{}s'.format(role_name.capitalize())
else:
creators_name = 'People/groups'
if work_kind:
works_name = Work.get_kind_name_plural(work_kind).lower()
else:
works_name = 'works'
card_title = '{} with most {}'.format(creators_name, works_name)
return {
'card_title': card_title,
'score_attr': 'num_works',
'object_list': object_list,
} |
<SYSTEM_TASK:>
Displays a card showing the Works that are associated with the most Events.
<END_TASK>
<USER_TASK:>
Description:
def most_seen_works_card(kind=None, num=10):
"""
Displays a card showing the Works that are associated with the most Events.
""" |
object_list = most_seen_works(kind=kind, num=num)
object_list = chartify(object_list, 'num_views', cutoff=1)
if kind:
card_title = 'Most seen {}'.format(
Work.get_kind_name_plural(kind).lower())
else:
card_title = 'Most seen works'
return {
'card_title': card_title,
'score_attr': 'num_views',
'object_list': object_list,
'name_attr': 'title',
'use_cite': True,
} |
<SYSTEM_TASK:>
Change all Movie objects into Work objects, and their associated
<END_TASK>
<USER_TASK:>
Description:
def forwards(apps, schema_editor):
"""
Change all Movie objects into Work objects, and their associated
data into WorkRole and WorkSelection models, then delete the Movie.
""" |
Movie = apps.get_model('spectator_events', 'Movie')
Work = apps.get_model('spectator_events', 'Work')
WorkRole = apps.get_model('spectator_events', 'WorkRole')
WorkSelection = apps.get_model('spectator_events', 'WorkSelection')
for m in Movie.objects.all():
work = Work.objects.create(
kind='movie',
title=m.title,
title_sort=m.title_sort,
year=m.year,
imdb_id=m.imdb_id
)
for role in m.roles.all():
WorkRole.objects.create(
creator=role.creator,
work=work,
role_name=role.role_name,
role_order=role.role_order
)
for selection in m.events.all():
WorkSelection.objects.create(
event=selection.event,
work=work,
order=selection.order
)
m.delete() |
<SYSTEM_TASK:>
Paginate the queryset, if needed.
<END_TASK>
<USER_TASK:>
Description:
def paginate_queryset(self, queryset, page_size):
"""
Paginate the queryset, if needed.
This is EXACTLY the same as the standard ListView.paginate_queryset()
except for this line:
page = paginator.page(page_number, softlimit=True)
Because we want to use the DiggPaginator's softlimit option.
So that if you're viewing a page of, say, Flickr photos, and you switch
from viewing by Uploaded Time to viewing by Taken Time, the new
ordering might have fewer pages. In that case we want to see the final
page, not a 404. The softlimit does that, but I can't see how to use
it without copying all of this...
""" |
paginator = self.get_paginator(
queryset,
page_size,
orphans = self.get_paginate_orphans(),
allow_empty_first_page = self.get_allow_empty(),
body = self.paginator_body,
margin = self.paginator_margin,
padding = self.paginator_padding,
tail = self.paginator_tail,
)
page_kwarg = self.page_kwarg
page = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_("Page is not 'last', nor can it be converted to an int."))
try:
page = paginator.page(page_number, softlimit=False)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage as e:
raise Http404(_('Invalid page (%(page_number)s): %(message)s') % {
'page_number': page_number,
'message': str(e)
}) |
<SYSTEM_TASK:>
Returns a QuerySet of Publications that were being read on `date`.
<END_TASK>
<USER_TASK:>
Description:
def day_publications(date):
"""
Returns a QuerySet of Publications that were being read on `date`.
`date` is a date tobject.
""" |
readings = Reading.objects \
.filter(start_date__lte=date) \
.filter(
Q(end_date__gte=date)
|
Q(end_date__isnull=True)
)
if readings:
return Publication.objects.filter(reading__in=readings) \
.select_related('series') \
.prefetch_related('roles__creator') \
.distinct()
else:
return Publication.objects.none() |
<SYSTEM_TASK:>
Displays Publications that were being read on `date`.
<END_TASK>
<USER_TASK:>
Description:
def day_publications_card(date):
"""
Displays Publications that were being read on `date`.
`date` is a date tobject.
""" |
d = date.strftime(app_settings.DATE_FORMAT)
card_title = 'Reading on {}'.format(d)
return {
'card_title': card_title,
'publication_list': day_publications(date=date),
} |
<SYSTEM_TASK:>
Change Events with kind 'movie' to 'cinema'
<END_TASK>
<USER_TASK:>
Description:
def forwards(apps, schema_editor):
"""
Change Events with kind 'movie' to 'cinema'
and Events with kind 'play' to 'theatre'.
Purely for more consistency.
""" |
Event = apps.get_model('spectator_events', 'Event')
for ev in Event.objects.filter(kind='movie'):
ev.kind = 'cinema'
ev.save()
for ev in Event.objects.filter(kind='play'):
ev.kind = 'theatre'
ev.save() |
<SYSTEM_TASK:>
Get the environment variable or return exception.
<END_TASK>
<USER_TASK:>
Description:
def get_env_variable(var_name, default=None):
"""Get the environment variable or return exception.""" |
try:
return os.environ[var_name]
except KeyError:
if default is None:
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
else:
return default |
<SYSTEM_TASK:>
Having added the new 'exhibition' Work type, we're going to assume that
<END_TASK>
<USER_TASK:>
Description:
def forwards(apps, schema_editor):
"""
Having added the new 'exhibition' Work type, we're going to assume that
every Event of type 'museum' should actually have one Exhibition attached.
So, we'll add one, with the same title as the Event.
And we'll move all Creators from the Event to the Exhibition.
""" |
Event = apps.get_model('spectator_events', 'Event')
Work = apps.get_model('spectator_events', 'Work')
WorkRole = apps.get_model('spectator_events', 'WorkRole')
WorkSelection = apps.get_model('spectator_events', 'WorkSelection')
for event in Event.objects.filter(kind='museum'):
# Create a new Work based on this Event's details.
work = Work.objects.create(
kind='exhibition',
title=event.title,
title_sort=event.title_sort
)
# This doesn't generate the slug field automatically because Django.
# So we'll have to do it manually. Graarhhh.
work.slug = generate_slug(work.pk)
work.save()
# Associate the new Work with the Event.
WorkSelection.objects.create(
event=event,
work=work
)
# Associate any Creators on the Event with the new Work.
for role in event.roles.all():
WorkRole.objects.create(
creator=role.creator,
work=work,
role_name=role.role_name,
role_order=role.role_order
)
# Remove Creators from the Event.
role.delete() |
<SYSTEM_TASK:>
The Creators who have been most-read, ordered by number of readings.
<END_TASK>
<USER_TASK:>
Description:
def by_readings(self, role_names=['', 'Author']):
"""
The Creators who have been most-read, ordered by number of readings.
By default it will only include Creators whose role was left empty,
or is 'Author'.
Each Creator will have a `num_readings` attribute.
""" |
if not spectator_apps.is_enabled('reading'):
raise ImproperlyConfigured("To use the CreatorManager.by_readings() method, 'spectator.reading' must by in INSTALLED_APPS.")
qs = self.get_queryset()
qs = qs.filter(publication_roles__role_name__in=role_names) \
.exclude(publications__reading__isnull=True) \
.annotate(num_readings=Count('publications__reading')) \
.order_by('-num_readings', 'name_sort')
return qs |
<SYSTEM_TASK:>
Get the Creators involved in the most Events.
<END_TASK>
<USER_TASK:>
Description:
def by_events(self, kind=None):
"""
Get the Creators involved in the most Events.
This only counts Creators directly involved in an Event.
i.e. if a Creator is the director of a movie Work, and an Event was
a viewing of that movie, that Event wouldn't count. Unless they were
also directly involved in the Event (e.g. speaking after the movie).
kind - If supplied, only Events with that `kind` value will be counted.
""" |
if not spectator_apps.is_enabled('events'):
raise ImproperlyConfigured("To use the CreatorManager.by_events() method, 'spectator.events' must by in INSTALLED_APPS.")
qs = self.get_queryset()
if kind is not None:
qs = qs.filter(events__kind=kind)
qs = qs.annotate(num_events=Count('events', distinct=True)) \
.order_by('-num_events', 'name_sort')
return qs |
<SYSTEM_TASK:>
Get the Creators involved in the most Works.
<END_TASK>
<USER_TASK:>
Description:
def by_works(self, kind=None, role_name=None):
"""
Get the Creators involved in the most Works.
kind - If supplied, only Works with that `kind` value will be counted.
role_name - If supplied, only Works on which the role is that will be counted.
e.g. To get all 'movie' Works on which the Creators had the role 'Director':
Creator.objects.by_works(kind='movie', role_name='Director')
""" |
if not spectator_apps.is_enabled('events'):
raise ImproperlyConfigured("To use the CreatorManager.by_works() method, 'spectator.events' must by in INSTALLED_APPS.")
qs = self.get_queryset()
filter_kwargs = {}
if kind is not None:
filter_kwargs['works__kind'] = kind
if role_name is not None:
filter_kwargs['work_roles__role_name'] = role_name
if filter_kwargs:
qs = qs.filter(**filter_kwargs)
qs = qs.annotate(num_works=Count('works', distinct=True)) \
.order_by('-num_works', 'name_sort')
return qs |
<SYSTEM_TASK:>
Clean argument to related object
<END_TASK>
<USER_TASK:>
Description:
def clean_options(self,
using_keytab=False, principal=None,
keytab_file=None, ccache_file=None,
password=None):
"""Clean argument to related object
:param bool using_keytab: refer to ``krbContext.__init__``.
:param str principal: refer to ``krbContext.__init__``.
:param str keytab_file: refer to ``krbContext.__init__``.
:param str ccache_file: refer to ``krbContext.__init__``.
:param str password: refer to ``krbContext.__init__``.
:return: a mapping containing cleaned names and values, which are used
internally.
:rtype: dict
:raises ValueError: principal is missing or given keytab file does not
exist, when initialize from a keytab.
""" |
cleaned = {}
if using_keytab:
if principal is None:
raise ValueError('Principal is required when using key table.')
princ_name = gssapi.names.Name(
principal, gssapi.names.NameType.kerberos_principal)
if keytab_file is None:
cleaned['keytab'] = DEFAULT_KEYTAB
elif not os.path.exists(keytab_file):
raise ValueError(
'Keytab file {0} does not exist.'.format(keytab_file))
else:
cleaned['keytab'] = keytab_file
else:
if principal is None:
principal = get_login()
princ_name = gssapi.names.Name(principal,
gssapi.names.NameType.user)
cleaned['using_keytab'] = using_keytab
cleaned['principal'] = princ_name
cleaned['ccache'] = ccache_file or DEFAULT_CCACHE
cleaned['password'] = password
return cleaned |
<SYSTEM_TASK:>
Initialize credential cache with keytab
<END_TASK>
<USER_TASK:>
Description:
def init_with_keytab(self):
"""Initialize credential cache with keytab""" |
creds_opts = {
'usage': 'initiate',
'name': self._cleaned_options['principal'],
}
store = {}
if self._cleaned_options['keytab'] != DEFAULT_KEYTAB:
store['client_keytab'] = self._cleaned_options['keytab']
if self._cleaned_options['ccache'] != DEFAULT_CCACHE:
store['ccache'] = self._cleaned_options['ccache']
if store:
creds_opts['store'] = store
creds = gssapi.creds.Credentials(**creds_opts)
try:
creds.lifetime
except gssapi.exceptions.ExpiredCredentialsError:
new_creds_opts = copy.deepcopy(creds_opts)
# Get new credential and put it into a temporary ccache
if 'store' in new_creds_opts:
new_creds_opts['store']['ccache'] = _get_temp_ccache()
else:
new_creds_opts['store'] = {'ccache': _get_temp_ccache()}
creds = gssapi.creds.Credentials(**new_creds_opts)
# Then, store new credential back to original specified ccache,
# whatever a given ccache file or the default one.
_store = None
# If default cccache is used, no need to specify ccache in store
# parameter passed to ``creds.store``.
if self._cleaned_options['ccache'] != DEFAULT_CCACHE:
_store = {'ccache': store['ccache']}
creds.store(usage='initiate', store=_store, overwrite=True) |
<SYSTEM_TASK:>
Initialize credential cache with password
<END_TASK>
<USER_TASK:>
Description:
def init_with_password(self):
"""Initialize credential cache with password
**Causion:** once you enter password from command line, or pass it to
API directly, the given password is not encrypted always. Although
getting credential with password works, from security point of view, it
is strongly recommended **NOT** use it in any formal production
environment. If you need to initialize credential in an application to
application Kerberos authentication context, keytab has to be used.
:raises IOError: when trying to prompt to input password from command
line but no attry is available.
""" |
creds_opts = {
'usage': 'initiate',
'name': self._cleaned_options['principal'],
}
if self._cleaned_options['ccache'] != DEFAULT_CCACHE:
creds_opts['store'] = {'ccache': self._cleaned_options['ccache']}
cred = gssapi.creds.Credentials(**creds_opts)
try:
cred.lifetime
except gssapi.exceptions.ExpiredCredentialsError:
password = self._cleaned_options['password']
if not password:
if not sys.stdin.isatty():
raise IOError(
'krbContext is not running from a terminal. So, you '
'need to run kinit with your principal manually before'
' anything goes.')
# If there is no password specified via API call, prompt to
# enter one in order to continue to get credential. BUT, in
# some cases, blocking program and waiting for input of
# password is really bad, which may be only suitable for some
# simple use cases, for example, writing some scripts to test
# something that need Kerberos authentication. Anyway, whether
# it is really to enter a password from command line, it
# depends on concrete use cases totally.
password = getpass.getpass()
cred = gssapi.raw.acquire_cred_with_password(
self._cleaned_options['principal'], password)
ccache = self._cleaned_options['ccache']
if ccache == DEFAULT_CCACHE:
gssapi.raw.store_cred(cred.creds,
usage='initiate',
overwrite=True)
else:
gssapi.raw.store_cred_into({'ccache': ccache},
cred.creds,
usage='initiate',
overwrite=True) |
<SYSTEM_TASK:>
Generate a dictionary with template names and file paths.
<END_TASK>
<USER_TASK:>
Description:
def templates(self):
"""Generate a dictionary with template names and file paths.""" |
templates = {}
result = []
if self.entry_point_group_templates:
result = self.load_entry_point_group_templates(
self.entry_point_group_templates) or []
for template in result:
for name, path in template.items():
templates[name] = path
return templates |
<SYSTEM_TASK:>
Register mappings from a package under given alias.
<END_TASK>
<USER_TASK:>
Description:
def register_mappings(self, alias, package_name):
"""Register mappings from a package under given alias.
:param alias: The alias.
:param package_name: The package name.
""" |
# For backwards compatibility, we also allow for ES2 mappings to be
# placed at the root level of the specified package path, and not in
# the `<package-path>/v2` directory.
if ES_VERSION[0] == 2:
try:
resource_listdir(package_name, 'v2')
package_name += '.v2'
except (OSError, IOError) as ex:
if getattr(ex, 'errno', 0) != errno.ENOENT:
raise
warnings.warn(
"Having mappings in a path which doesn't specify the "
"Elasticsearch version is deprecated. Please move your "
"mappings to a subfolder named according to the "
"Elasticsearch version which your mappings are intended "
"for. (e.g. '{}/v2/{}')".format(
package_name, alias),
PendingDeprecationWarning)
else:
package_name = '{}.v{}'.format(package_name, ES_VERSION[0])
def _walk_dir(aliases, *parts):
root_name = build_index_name(self.app, *parts)
resource_name = os.path.join(*parts)
if root_name not in aliases:
self.number_of_indexes += 1
data = aliases.get(root_name, {})
for filename in resource_listdir(package_name, resource_name):
index_name = build_index_name(
self.app,
*(parts + (filename, ))
)
file_path = os.path.join(resource_name, filename)
if resource_isdir(package_name, file_path):
_walk_dir(data, *(parts + (filename, )))
continue
ext = os.path.splitext(filename)[1]
if ext not in {'.json', }:
continue
assert index_name not in data, 'Duplicate index'
data[index_name] = self.mappings[index_name] = \
resource_filename(
package_name, os.path.join(resource_name, filename))
self.number_of_indexes += 1
aliases[root_name] = data
# Start the recursion here:
_walk_dir(self.aliases, alias) |
<SYSTEM_TASK:>
Register templates from the provided directory.
<END_TASK>
<USER_TASK:>
Description:
def register_templates(self, directory):
"""Register templates from the provided directory.
:param directory: The templates directory.
""" |
try:
resource_listdir(directory, 'v{}'.format(ES_VERSION[0]))
directory = '{}/v{}'.format(directory, ES_VERSION[0])
except (OSError, IOError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise OSError(
"Please move your templates to a subfolder named "
"according to the Elasticsearch version "
"which your templates are intended "
"for. (e.g. '{}.v{}')".format(directory,
ES_VERSION[0]))
result = {}
module_name, parts = directory.split('.')[0], directory.split('.')[1:]
parts = tuple(parts)
def _walk_dir(parts):
resource_name = os.path.join(*parts)
for filename in resource_listdir(module_name, resource_name):
template_name = build_index_name(
self.app,
*(parts[1:] + (filename, ))
)
file_path = os.path.join(resource_name, filename)
if resource_isdir(module_name, file_path):
_walk_dir((parts + (filename, )))
continue
ext = os.path.splitext(filename)[1]
if ext not in {'.json', }:
continue
result[template_name] = resource_filename(
module_name, os.path.join(resource_name, filename))
# Start the recursion here:
_walk_dir(parts)
return result |
<SYSTEM_TASK:>
Flush and refresh one or more indices.
<END_TASK>
<USER_TASK:>
Description:
def flush_and_refresh(self, index):
"""Flush and refresh one or more indices.
.. warning::
Do not call this method unless you know what you are doing. This
method is only intended to be called during tests.
""" |
self.client.indices.flush(wait_if_ongoing=True, index=index)
self.client.indices.refresh(index=index)
self.client.cluster.health(
wait_for_status='yellow', request_timeout=30)
return True |
<SYSTEM_TASK:>
Get version of Elasticsearch running on the cluster.
<END_TASK>
<USER_TASK:>
Description:
def cluster_version(self):
"""Get version of Elasticsearch running on the cluster.""" |
versionstr = self.client.info()['version']['number']
return [int(x) for x in versionstr.split('.')] |
<SYSTEM_TASK:>
Get a filtered list of aliases based on configuration.
<END_TASK>
<USER_TASK:>
Description:
def active_aliases(self):
"""Get a filtered list of aliases based on configuration.
Returns aliases and their mappings that are defined in the
`SEARCH_MAPPINGS` config variable. If the `SEARCH_MAPPINGS` is set to
`None` (the default), all aliases are included.
""" |
whitelisted_aliases = self.app.config.get('SEARCH_MAPPINGS')
if whitelisted_aliases is None:
return self.aliases
else:
return {k: v for k, v in self.aliases.items()
if k in whitelisted_aliases} |
<SYSTEM_TASK:>
Yield tuple with created index name and responses from a client.
<END_TASK>
<USER_TASK:>
Description:
def create(self, ignore=None):
"""Yield tuple with created index name and responses from a client.""" |
ignore = ignore or []
def _create(tree_or_filename, alias=None):
"""Create indices and aliases by walking DFS."""
# Iterate over aliases:
for name, value in tree_or_filename.items():
if isinstance(value, dict):
for result in _create(value, alias=name):
yield result
else:
with open(value, 'r') as body:
yield name, self.client.indices.create(
index=name,
body=json.load(body),
ignore=ignore,
)
if alias:
yield alias, self.client.indices.put_alias(
index=list(_get_indices(tree_or_filename)),
name=alias,
ignore=ignore,
)
for result in _create(self.active_aliases):
yield result |
<SYSTEM_TASK:>
Yield tuple with registered template and response from client.
<END_TASK>
<USER_TASK:>
Description:
def put_templates(self, ignore=None):
"""Yield tuple with registered template and response from client.""" |
ignore = ignore or []
def _replace_prefix(template_path, body):
"""Replace index prefix in template request body."""
pattern = '__SEARCH_INDEX_PREFIX__'
prefix = self.app.config['SEARCH_INDEX_PREFIX'] or ''
if prefix:
assert pattern in body, "You are using the prefix `{0}`, "
"but the template `{1}` does not contain the "
"pattern `{2}`.".format(prefix, template_path, pattern)
return body.replace(pattern, prefix)
def _put_template(template):
"""Put template in search client."""
with open(self.templates[template], 'r') as fp:
body = fp.read()
replaced_body = _replace_prefix(self.templates[template], body)
return self.templates[template],\
current_search_client.indices.put_template(
name=template,
body=json.loads(replaced_body),
ignore=ignore,
)
for template in self.templates:
yield _put_template(template) |
<SYSTEM_TASK:>
Yield tuple with deleted index name and responses from a client.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, ignore=None):
"""Yield tuple with deleted index name and responses from a client.""" |
ignore = ignore or []
def _delete(tree_or_filename, alias=None):
"""Delete indexes and aliases by walking DFS."""
if alias:
yield alias, self.client.indices.delete_alias(
index=list(_get_indices(tree_or_filename)),
name=alias,
ignore=ignore,
)
# Iterate over aliases:
for name, value in tree_or_filename.items():
if isinstance(value, dict):
for result in _delete(value, alias=name):
yield result
else:
yield name, self.client.indices.delete(
index=name,
ignore=ignore,
)
for result in _delete(self.active_aliases):
yield result |
<SYSTEM_TASK:>
Start the poor_consumer.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Start the poor_consumer.""" |
try:
opts, args = getopt.getopt(sys.argv[1:], "h:v", ["help", "nack=",
"servers=", "queues="])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit()
# defaults
nack = 0.0
verbose = False
servers = "localhost:7712,localhost:7711"
queues = "test"
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("--nack"):
nack = float(a)
elif o in ("--servers"):
servers = a
elif o in ("--queues"):
queues = a
else:
assert False, "unhandled option"
# prepare servers and queus for pydisque
servers = servers.split(",")
queues = queues.split(",")
c = Client(servers)
c.connect()
while True:
jobs = c.get_job(queues)
for queue_name, job_id, job in jobs:
rnd = random.random()
# as this is a test processor, we don't do any validation on
# the actual job body, so lets just pay attention to id's
if rnd >= nack:
print ">>> received job:", job_id
c.ack_job(job_id)
else:
print ">>> bouncing job:", job_id
c.nack_job(job_id) |
<SYSTEM_TASK:>
Connect to one of the Disque nodes.
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""
Connect to one of the Disque nodes.
You can get current connection with connected_node property
:returns: nothing
""" |
self.connected_node = None
for i, node in self.nodes.items():
host, port = i.split(':')
port = int(port)
redis_client = redis.Redis(host, port, **self.client_kw_args)
try:
ret = redis_client.execute_command('HELLO')
format_version, node_id = ret[0], ret[1]
others = ret[2:]
self.nodes[i] = Node(node_id, host, port, redis_client)
self.connected_node = self.nodes[i]
except redis.exceptions.ConnectionError:
pass
if not self.connected_node:
raise ConnectionError('couldnt connect to any nodes')
logger.info("connected to node %s" % self.connected_node) |
<SYSTEM_TASK:>
Execute a command on the connected server.
<END_TASK>
<USER_TASK:>
Description:
def execute_command(self, *args, **kwargs):
"""Execute a command on the connected server.""" |
try:
return self.get_connection().execute_command(*args, **kwargs)
except ConnectionError as e:
logger.warn('trying to reconnect')
self.connect()
logger.warn('connected')
raise |
<SYSTEM_TASK:>
Add a job to a queue.
<END_TASK>
<USER_TASK:>
Description:
def add_job(self, queue_name, job, timeout=200, replicate=None, delay=None,
retry=None, ttl=None, maxlen=None, asynchronous=None):
"""
Add a job to a queue.
ADDJOB queue_name job <ms-timeout> [REPLICATE <count>] [DELAY <sec>]
[RETRY <sec>] [TTL <sec>] [MAXLEN <count>] [ASYNC]
:param queue_name: is the name of the queue, any string, basically.
:param job: is a string representing the job.
:param timeout: is the command timeout in milliseconds.
:param replicate: count is the number of nodes the job should be
replicated to.
:param delay: sec is the number of seconds that should elapse
before the job is queued by any server.
:param retry: sec period after which, if no ACK is received, the
job is put again into the queue for delivery. If RETRY is 0,
the job has an at-most-once delivery semantics.
:param ttl: sec is the max job life in seconds. After this time,
the job is deleted even if it was not successfully delivered.
:param maxlen: count specifies that if there are already count
messages queued for the specified queue name, the message is
refused and an error reported to the client.
:param asynchronous: asks the server to let the command return ASAP and
replicate the job to other nodes in the background. The job
gets queued ASAP, while normally the job is put into the queue
only when the client gets a positive reply. Changing the name of this
argument as async is reserved keyword in python 3.7
:returns: job_id
""" |
command = ['ADDJOB', queue_name, job, timeout]
if replicate:
command += ['REPLICATE', replicate]
if delay:
command += ['DELAY', delay]
if retry is not None:
command += ['RETRY', retry]
if ttl:
command += ['TTL', ttl]
if maxlen:
command += ['MAXLEN', maxlen]
if asynchronous:
command += ['ASYNC']
# TODO(canardleteer): we need to handle "-PAUSE" messages more
# appropriately, for now it's up to the person using the library
# to handle a generic ResponseError on their own.
logger.debug("sending job - %s", command)
job_id = self.execute_command(*command)
logger.debug("sent job - %s", command)
logger.debug("job_id: %s " % job_id)
return job_id |
<SYSTEM_TASK:>
Return some number of jobs from specified queues.
<END_TASK>
<USER_TASK:>
Description:
def get_job(self, queues, timeout=None, count=None, nohang=False, withcounters=False):
"""
Return some number of jobs from specified queues.
GETJOB [NOHANG] [TIMEOUT <ms-timeout>] [COUNT <count>] [WITHCOUNTERS] FROM
queue1 queue2 ... queueN
:param queues: name of queues
:returns: list of tuple(job_id, queue_name, job), tuple(job_id, queue_name, job, nacks, additional_deliveries) or empty list
:rtype: list
""" |
assert queues
command = ['GETJOB']
if nohang:
command += ['NOHANG']
if timeout:
command += ['TIMEOUT', timeout]
if count:
command += ['COUNT', count]
if withcounters:
command += ['WITHCOUNTERS']
command += ['FROM'] + queues
results = self.execute_command(*command)
if not results:
return []
if withcounters:
return [(job_id, queue_name, job, nacks, additional_deliveries) for
job_id, queue_name, job, _, nacks, _, additional_deliveries in results]
else:
return [(job_id, queue_name, job) for
job_id, queue_name, job in results] |
<SYSTEM_TASK:>
Describe the job.
<END_TASK>
<USER_TASK:>
Description:
def show(self, job_id, return_dict=False):
"""
Describe the job.
:param job_id:
""" |
rtn = self.execute_command('SHOW', job_id)
if return_dict:
grouped = self._grouper(rtn, 2)
rtn = dict((a, b) for a, b in grouped)
return rtn |
<SYSTEM_TASK:>
Pause a queue.
<END_TASK>
<USER_TASK:>
Description:
def pause(self, queue_name, kw_in=None, kw_out=None, kw_all=None,
kw_none=None, kw_state=None, kw_bcast=None):
"""
Pause a queue.
Unfortunately, the PAUSE keywords are mostly reserved words in Python,
so I've been a little creative in the function variable names. Open
to suggestions to change it (canardleteer)
:param queue_name: The job queue we are modifying.
:param kw_in: pause the queue in input.
:param kw_out: pause the queue in output.
:param kw_all: pause the queue in input and output (same as specifying
both the in and out options).
:param kw_none: clear the paused state in input and output.
:param kw_state: just report the current queue state.
:param kw_bcast: send a PAUSE command to all the reachable nodes of
the cluster to set the same queue in the other nodes
to the same state.
""" |
command = ["PAUSE", queue_name]
if kw_in:
command += ["in"]
if kw_out:
command += ["out"]
if kw_all:
command += ["all"]
if kw_none:
command += ["none"]
if kw_state:
command += ["state"]
if kw_bcast:
command += ["bcast"]
return self.execute_command(*command) |
<SYSTEM_TASK:>
Iterate all the existing queues in the local node.
<END_TASK>
<USER_TASK:>
Description:
def qscan(self, cursor=0, count=None, busyloop=None, minlen=None,
maxlen=None, importrate=None):
"""
Iterate all the existing queues in the local node.
:param count: An hint about how much work to do per iteration.
:param busyloop: Block and return all the elements in a busy loop.
:param minlen: Don't return elements with less than count jobs queued.
:param maxlen: Don't return elements with more than count jobs queued.
:param importrate: Only return elements with an job import rate
(from other nodes) >= rate.
""" |
command = ["QSCAN", cursor]
if count:
command += ["COUNT", count]
if busyloop:
command += ["BUSYLOOP"]
if minlen:
command += ["MINLEN", minlen]
if maxlen:
command += ["MAXLEN", maxlen]
if importrate:
command += ["IMPORTRATE", importrate]
return self.execute_command(*command) |
<SYSTEM_TASK:>
Iterate all the existing jobs in the local node.
<END_TASK>
<USER_TASK:>
Description:
def jscan(self, cursor=0, count=None, busyloop=None, queue=None,
state=None, reply=None):
"""Iterate all the existing jobs in the local node.
:param count: An hint about how much work to do per iteration.
:param busyloop: Block and return all the elements in a busy loop.
:param queue: Return only jobs in the specified queue.
:param state: Must be a list - Return jobs in the specified state.
Can be used multiple times for a logic OR.
:param reply: None or string {"all", "id"} - Job reply type. Type can
be all or id. Default is to report just the job ID. If all is
specified the full job state is returned like for the SHOW command.
""" |
command = ["JSCAN", cursor]
if count:
command += ["COUNT", count]
if busyloop:
command += ["BUSYLOOP"]
if queue:
command += ["QUEUE", queue]
if type(state) is list:
for s in state:
command += ["STATE", s]
if reply:
command += ["REPLY", reply]
return self.execute_command(*command) |
<SYSTEM_TASK:>
Build an index name from parts.
<END_TASK>
<USER_TASK:>
Description:
def build_index_name(app, *parts):
"""Build an index name from parts.
:param parts: Parts that should be combined to make an index name.
""" |
base_index = os.path.splitext(
'-'.join([part for part in parts if part])
)[0]
return prefix_index(app=app, index=base_index) |
<SYSTEM_TASK:>
Initialize registered aliases and mappings.
<END_TASK>
<USER_TASK:>
Description:
def init(force):
"""Initialize registered aliases and mappings.""" |
click.secho('Creating indexes...', fg='green', bold=True, file=sys.stderr)
with click.progressbar(
current_search.create(ignore=[400] if force else None),
length=current_search.number_of_indexes) as bar:
for name, response in bar:
bar.label = name
click.secho('Putting templates...', fg='green', bold=True, file=sys.stderr)
with click.progressbar(
current_search.put_templates(ignore=[400] if force else None),
length=len(current_search.templates.keys())) as bar:
for response in bar:
bar.label = response |
<SYSTEM_TASK:>
Delete index by its name.
<END_TASK>
<USER_TASK:>
Description:
def delete(index_name, force, verbose):
"""Delete index by its name.""" |
result = current_search_client.indices.delete(
index=index_name,
ignore=[400, 404] if force else None,
)
if verbose:
click.echo(json.dumps(result)) |
<SYSTEM_TASK:>
Return records by their identifiers.
<END_TASK>
<USER_TASK:>
Description:
def get_records(self, ids):
"""Return records by their identifiers.
:param ids: A list of record identifier.
:returns: A list of records.
""" |
return self.query(Ids(values=[str(id_) for id_ in ids])) |
<SYSTEM_TASK:>
Return faceted search instance with defaults set.
<END_TASK>
<USER_TASK:>
Description:
def faceted_search(cls, query=None, filters=None, search=None):
"""Return faceted search instance with defaults set.
:param query: Elastic DSL query object (``Q``).
:param filters: Dictionary with selected facet values.
:param search: An instance of ``Search`` class. (default: ``cls()``).
""" |
search_ = search or cls()
class RecordsFacetedSearch(FacetedSearch):
"""Pass defaults from ``cls.Meta`` object."""
index = prefix_index(app=current_app, index=search_._index[0])
doc_types = getattr(search_.Meta, 'doc_types', ['_all'])
fields = getattr(search_.Meta, 'fields', ('*', ))
facets = getattr(search_.Meta, 'facets', {})
def search(self):
"""Use ``search`` or ``cls()`` instead of default Search."""
# Later versions of `elasticsearch-dsl` (>=5.1.0) changed the
# Elasticsearch FacetedResponse class constructor signature.
if ES_VERSION[0] > 2:
return search_.response_class(FacetedResponse)
return search_.response_class(partial(FacetedResponse, self))
return RecordsFacetedSearch(query=query, filters=filters or {}) |
<SYSTEM_TASK:>
Add the preference param to the ES request and return a new Search.
<END_TASK>
<USER_TASK:>
Description:
def with_preference_param(self):
"""Add the preference param to the ES request and return a new Search.
The preference param avoids the bouncing effect with multiple
replicas, documented on ES documentation.
See: https://www.elastic.co/guide/en/elasticsearch/guide/current
/_search_options.html#_preference for more information.
""" |
user_hash = self._get_user_hash()
if user_hash:
return self.params(preference=user_hash)
return self |
<SYSTEM_TASK:>
Calculate a digest based on request's User-Agent and IP address.
<END_TASK>
<USER_TASK:>
Description:
def _get_user_hash(self):
"""Calculate a digest based on request's User-Agent and IP address.""" |
if request:
user_hash = '{ip}-{ua}'.format(ip=request.remote_addr,
ua=self._get_user_agent())
alg = hashlib.md5()
alg.update(user_hash.encode('utf8'))
return alg.hexdigest()
return None |
<SYSTEM_TASK:>
Beautify JSON string or file.
<END_TASK>
<USER_TASK:>
Description:
def beautify(filename=None, json_str=None):
"""Beautify JSON string or file.
Keyword arguments:
:param filename: use its contents as json string instead of
json_str param.
:param json_str: json string to be beautified.
""" |
if filename is not None:
with open(filename) as json_file:
json_str = json.load(json_file)
return json.dumps(json_str, indent=4, sort_keys=True) |
<SYSTEM_TASK:>
Replace strings giving some info on where
<END_TASK>
<USER_TASK:>
Description:
def replace(pretty, old_str, new_str):
""" Replace strings giving some info on where
the replacement was done
""" |
out_str = ''
line_number = 1
changes = 0
for line in pretty.splitlines(keepends=True):
new_line = line.replace(old_str, new_str)
if line.find(old_str) != -1:
logging.debug('%s', line_number)
logging.debug('< %s', line)
logging.debug('> %s', new_line)
changes += 1
out_str += new_line
line_number += 1
logging.info('Total changes(%s): %s', old_str, changes)
return out_str |
<SYSTEM_TASK:>
Wait for and then return a connected socket..
<END_TASK>
<USER_TASK:>
Description:
def receive_connection():
"""Wait for and then return a connected socket..
Opens a TCP connection on port 8080, and waits for a single client.
""" |
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(("localhost", 8080))
server.listen(1)
client = server.accept()[0]
server.close()
return client |
<SYSTEM_TASK:>
Send message to client and close the connection.
<END_TASK>
<USER_TASK:>
Description:
def send_message(client, message):
"""Send message to client and close the connection.""" |
print(message)
client.send("HTTP/1.1 200 OK\r\n\r\n{}".format(message).encode("utf-8"))
client.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.