text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
RabbitMQ checks the connection
<END_TASK>
<USER_TASK:>
Description:
def check_mq_connection(self):
"""
RabbitMQ checks the connection
It displays on the screen whether or not you have a connection.
""" |
import pika
from zengine.client_queue import BLOCKING_MQ_PARAMS
from pika.exceptions import ProbableAuthenticationError, ConnectionClosed
try:
connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS)
channel = connection.channel()
if channel.is_open:
print(__(u"{0}RabbitMQ is working{1}").format(CheckList.OKGREEN, CheckList.ENDC))
elif self.channel.is_closed or self.channel.is_closing:
print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC))
except ConnectionClosed as e:
print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC), e)
except ProbableAuthenticationError as e:
print(__(u"{0}RabbitMQ username and password wrong{1}").format(CheckList.FAIL,
CheckList.ENDC)) |
<SYSTEM_TASK:>
It brings the environment variables to the screen.
<END_TASK>
<USER_TASK:>
Description:
def check_encoding_and_env():
"""
It brings the environment variables to the screen.
The user checks to see if they are using the correct variables.
""" |
import sys
import os
if sys.getfilesystemencoding() in ['utf-8', 'UTF-8']:
print(__(u"{0}File system encoding correct{1}").format(CheckList.OKGREEN,
CheckList.ENDC))
else:
print(__(u"{0}File system encoding wrong!!{1}").format(CheckList.FAIL,
CheckList.ENDC))
check_env_list = ['RIAK_PROTOCOL', 'RIAK_SERVER', 'RIAK_PORT', 'REDIS_SERVER',
'DEFAULT_BUCKET_TYPE', 'PYOKO_SETTINGS',
'MQ_HOST', 'MQ_PORT', 'MQ_USER', 'MQ_VHOST',
]
env = os.environ
for k, v in env.items():
if k in check_env_list:
print(__(u"{0}{1} : {2}{3}").format(CheckList.BOLD, k, v, CheckList.ENDC)) |
<SYSTEM_TASK:>
Finds if the game is over.
<END_TASK>
<USER_TASK:>
Description:
def no_moves(position):
"""
Finds if the game is over.
:type: position: Board
:rtype: bool
""" |
return position.no_moves(color.white) \
or position.no_moves(color.black) |
<SYSTEM_TASK:>
Finds if particular King is checkmated.
<END_TASK>
<USER_TASK:>
Description:
def is_checkmate(position, input_color):
"""
Finds if particular King is checkmated.
:type: position: Board
:type: input_color: Color
:rtype: bool
""" |
return position.no_moves(input_color) and \
position.get_king(input_color).in_check(position) |
<SYSTEM_TASK:>
Handles pagination of object listings.
<END_TASK>
<USER_TASK:>
Description:
def _paginate(self, current_page, query_set, per_page=10):
"""
Handles pagination of object listings.
Args:
current_page int:
Current page number
query_set (:class:`QuerySet<pyoko:pyoko.db.queryset.QuerySet>`):
Object listing queryset.
per_page int:
Objects per page.
Returns:
QuerySet object, pagination data dict as a tuple
""" |
total_objects = query_set.count()
total_pages = int(total_objects / per_page or 1)
# add orphans to last page
current_per_page = per_page + (
total_objects % per_page if current_page == total_pages else 0)
pagination_data = dict(page=current_page,
total_pages=total_pages,
total_objects=total_objects,
per_page=current_per_page)
query_set = query_set.set_params(rows=current_per_page, start=(current_page - 1) * per_page)
return query_set, pagination_data |
<SYSTEM_TASK:>
Creates a message for the given channel.
<END_TASK>
<USER_TASK:>
Description:
def create_message(current):
"""
Creates a message for the given channel.
.. code-block:: python
# request:
{
'view':'_zops_create_message',
'message': {
'channel': key, # of channel
'body': string, # message text.,
'type': int, # zengine.messaging.model.MSG_TYPES,
'attachments': [{
'description': string, # can be blank,
'name': string, # file name with extension,
'content': string, # base64 encoded file content
}]}
# response:
{
'status': 'Created',
'code': 201,
'msg_key': key, # key of the message object,
}
""" |
msg = current.input['message']
msg_obj = Channel.add_message(msg['channel'], body=msg['body'], typ=msg['type'],
sender=current.user,
title=msg['title'], receiver=msg['receiver'] or None)
current.output = {
'msg_key': msg_obj.key,
'status': 'Created',
'code': 201
}
if 'attachment' in msg:
for atch in msg['attachments']:
typ = current._dedect_file_type(atch['name'], atch['content'])
Attachment(channel_id=msg['channel'], msg=msg_obj, name=atch['name'],
file=atch['content'], description=atch['description'], typ=typ).save() |
<SYSTEM_TASK:>
Initial display of channel content.
<END_TASK>
<USER_TASK:>
Description:
def show_channel(current, waited=False):
"""
Initial display of channel content.
Returns channel description, members, no of members, last 20 messages etc.
.. code-block:: python
# request:
{
'view':'_zops_show_channel',
'key': key,
}
# response:
{
'channel_key': key,
'description': string,
'no_of_members': int,
'member_list': [
{'name': string,
'is_online': bool,
'avatar_url': string,
}],
'name': string,
'last_messages': [MSG_DICT]
'status': 'OK',
'code': 200
}
""" |
ch = Channel(current).objects.get(current.input['key'])
sbs = ch.get_subscription_for_user(current.user_id)
current.output = {'key': current.input['key'],
'description': ch.description,
'name': sbs.name,
'actions': sbs.get_actions(),
'avatar_url': ch.get_avatar(current.user),
'no_of_members': len(ch.subscriber_set),
'member_list': [{'name': sb.user.full_name,
'is_online': sb.user.is_online(),
'avatar_url': sb.user.get_avatar_url()
} for sb in ch.subscriber_set.objects.all()],
'last_messages': [],
'status': 'OK',
'code': 200
}
for msg in ch.get_last_messages():
current.output['last_messages'].insert(0, msg.serialize(current.user)) |
<SYSTEM_TASK:>
Get old messages for a channel. 20 messages per request
<END_TASK>
<USER_TASK:>
Description:
def channel_history(current):
"""
Get old messages for a channel. 20 messages per request
.. code-block:: python
# request:
{
'view':'_zops_channel_history,
'channel_key': key,
'timestamp': datetime, # timestamp data of oldest shown message
}
# response:
{
'messages': [MSG_DICT, ],
'status': 'OK',
'code': 200
}
""" |
current.output = {
'status': 'OK',
'code': 201,
'messages': []
}
for msg in list(Message.objects.filter(channel_id=current.input['channel_key'],
updated_at__lte=current.input['timestamp'])[:20]):
current.output['messages'].insert(0, msg.serialize(current.user))
# FIXME: looks like pyoko's __lt is broken
# TODO: convert lte to lt and remove this block, when __lt filter fixed
if current.output['messages']:
current.output['messages'].pop(-1) |
<SYSTEM_TASK:>
Push timestamp of latest message of an ACTIVE channel.
<END_TASK>
<USER_TASK:>
Description:
def report_last_seen_message(current):
"""
Push timestamp of latest message of an ACTIVE channel.
This view should be called with timestamp of latest message;
- When user opens (clicks on) a channel.
- Periodically (eg: setInterval for 15secs) while user staying in a channel.
.. code-block:: python
# request:
{
'view':'_zops_last_seen_msg',
'channel_key': key,
'key': key,
'timestamp': datetime,
}
# response:
{
'status': 'OK',
'code': 200,
}
""" |
sbs = Subscriber(current).objects.filter(channel_id=current.input['channel_key'],
user_id=current.user_id)[0]
sbs.last_seen_msg_time = current.input['timestamp']
sbs.save()
current.output = {
'status': 'OK',
'code': 200} |
<SYSTEM_TASK:>
List channel memberships of current user
<END_TASK>
<USER_TASK:>
Description:
def list_channels(current):
"""
List channel memberships of current user
.. code-block:: python
# request:
{
'view':'_zops_list_channels',
}
# response:
{
'channels': [
{'name': string, # name of channel
'key': key, # key of channel
'unread': int, # unread message count
'type': int, # channel type,
# 15: public channels (chat room/broadcast channel distinction
comes from "read_only" flag)
# 10: direct channels
# 5: one and only private channel which is "Notifications"
'read_only': boolean,
# true if this is a read-only subscription to a broadcast channel
# false if it's a public chat room
'actions':[('action name', 'view name'),]
},]
}
""" |
current.output = {
'status': 'OK',
'code': 200,
'channels': []}
for sbs in current.user.subscriptions.objects.filter(is_visible=True):
try:
current.output['channels'].append(sbs.get_channel_listing())
except ObjectDoesNotExist:
# FIXME: This should not happen,
log.exception("UNPAIRED DIRECT EXCHANGES!!!!")
sbs.delete() |
<SYSTEM_TASK:>
Number of unread messages for current user
<END_TASK>
<USER_TASK:>
Description:
def unread_count(current):
"""
Number of unread messages for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_count',
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': int,
'messages': int,
}
""" |
unread_ntf = 0
unread_msg = 0
for sbs in current.user.subscriptions.objects.filter(is_visible=True):
try:
if sbs.channel.key == current.user.prv_exchange:
unread_ntf += sbs.unread_count()
else:
unread_msg += sbs.unread_count()
except ObjectDoesNotExist:
# FIXME: This should not happen,
log.exception("MULTIPLE PRV EXCHANGES!!!!")
sbs.delete()
current.output = {
'status': 'OK',
'code': 200,
'notifications': unread_ntf,
'messages': unread_msg
} |
<SYSTEM_TASK:>
Returns last N notifications for current user
<END_TASK>
<USER_TASK:>
Description:
def get_notifications(current):
"""
Returns last N notifications for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_messages',
'amount': int, # Optional, defaults to 8
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': [{'title':string,
'body': string,
'channel_key': key,
'type': int,
'url': string, # could be a in app JS URL prefixed with "#" or
# full blown URL prefixed with "http"
'message_key': key,
'timestamp': datetime},],
}
""" |
current.output = {
'status': 'OK',
'code': 200,
'notifications': [],
}
amount = current.input.get('amount', 8)
try:
notif_sbs = current.user.subscriptions.objects.get(channel_id=current.user.prv_exchange)
except MultipleObjectsReturned:
# FIXME: This should not happen,
log.exception("MULTIPLE PRV EXCHANGES!!!!")
sbs = current.user.subscriptions.objects.filter(channel_id=current.user.prv_exchange)
sbs[0].delete()
notif_sbs = sbs[1]
for msg in notif_sbs.channel.message_set.objects.all()[:amount]:
current.output['notifications'].insert(0, {
'title': msg.msg_title,
'body': msg.body,
'type': msg.typ,
'url': msg.url,
'channel_key': msg.channel.key,
'message_key': msg.key,
'timestamp': msg.updated_at}) |
<SYSTEM_TASK:>
Create a public channel. Can be a broadcast channel or normal chat room.
<END_TASK>
<USER_TASK:>
Description:
def create_channel(current):
"""
Create a public channel. Can be a broadcast channel or normal chat room.
Chat room and broadcast distinction will be made at user subscription phase.
.. code-block:: python
# request:
{
'view':'_zops_create_channel',
'name': string,
'description': string,
}
# response:
{
'description': string,
'name': string,
'no_of_members': int,
'member_list': [
{'name': string,
'is_online': bool,
'avatar_url': string,
}],
'last_messages': [MSG_DICT]
'status': 'Created',
'code': 201,
'key': key, # of just created channel
}
""" |
channel = Channel(name=current.input['name'],
description=current.input['description'],
owner=current.user,
typ=15).save()
with BlockSave(Subscriber):
Subscriber.objects.get_or_create(user=channel.owner,
channel=channel,
can_manage=True,
can_leave=False)
current.input['key'] = channel.key
show_channel(current)
current.output.update({
'status': 'Created',
'code': 201
}) |
<SYSTEM_TASK:>
Subscribe users of a given unit to given channel
<END_TASK>
<USER_TASK:>
Description:
def add_unit_to_channel(current):
"""
Subscribe users of a given unit to given channel
JSON API:
.. code-block:: python
# request:
{
'view':'_zops_add_unit_to_channel',
'unit_key': key,
'channel_key': key,
'read_only': boolean, # true if this is a Broadcast channel,
# false if it's a normal chat room
}
# response:
{
'existing': [key,], # existing members
'newly_added': [key,], # newly added members
'status': 'Created',
'code': 201
}
""" |
read_only = current.input['read_only']
newly_added, existing = [], []
for member_key in UnitModel.get_user_keys(current, current.input['unit_key']):
sb, new = Subscriber(current).objects.get_or_create(user_id=member_key,
read_only=read_only,
channel_id=current.input['channel_key'])
if new:
newly_added.append(member_key)
else:
existing.append(member_key)
current.output = {
'existing': existing,
'newly_added': newly_added,
'status': 'OK',
'code': 201
} |
<SYSTEM_TASK:>
Search users for adding to a public room
<END_TASK>
<USER_TASK:>
Description:
def search_user(current):
"""
Search users for adding to a public room
or creating one to one direct messaging
.. code-block:: python
# request:
{
'view':'_zops_search_user',
'query': string,
}
# response:
{
'results': [('full_name', 'key', 'avatar_url'), ],
'status': 'OK',
'code': 200
}
""" |
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
qs = UserModel(current).objects.exclude(key=current.user_id).search_on(
*settings.MESSAGING_USER_SEARCH_FIELDS,
contains=current.input['query'])
# FIXME: somehow exclude(key=current.user_id) not working with search_on()
for user in qs:
if user.key != current.user_id:
current.output['results'].append((user.full_name, user.key, user.get_avatar_url())) |
<SYSTEM_TASK:>
Search on units for subscribing it's users to a channel
<END_TASK>
<USER_TASK:>
Description:
def search_unit(current):
"""
Search on units for subscribing it's users to a channel
.. code-block:: python
# request:
{
'view':'_zops_search_unit',
'query': string,
}
# response:
{
'results': [('name', 'key'), ],
'status': 'OK',
'code': 200
}
""" |
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
for user in UnitModel(current).objects.search_on(*settings.MESSAGING_UNIT_SEARCH_FIELDS,
contains=current.input['query']):
current.output['results'].append((user.name, user.key)) |
<SYSTEM_TASK:>
Create a One-To-One channel between current and selected user.
<END_TASK>
<USER_TASK:>
Description:
def create_direct_channel(current):
"""
Create a One-To-One channel between current and selected user.
.. code-block:: python
# request:
{
'view':'_zops_create_direct_channel',
'user_key': key,
}
# response:
{
'description': string,
'no_of_members': int,
'member_list': [
{'name': string,
'is_online': bool,
'avatar_url': string,
}],
'last_messages': [MSG_DICT]
'status': 'Created',
'code': 201,
'channel_key': key, # of just created channel
'name': string, # name of subscribed channel
}
""" |
channel, sub_name = Channel.get_or_create_direct_channel(current.user_id,
current.input['user_key'])
current.input['key'] = channel.key
show_channel(current)
current.output.update({
'status': 'Created',
'code': 201
}) |
<SYSTEM_TASK:>
Search in messages. If "channel_key" given, search will be limited to that channel,
<END_TASK>
<USER_TASK:>
Description:
def find_message(current):
"""
Search in messages. If "channel_key" given, search will be limited to that channel,
otherwise search will be performed on all of user's subscribed channels.
.. code-block:: python
# request:
{
'view':'_zops_search_unit,
'channel_key': key,
'query': string,
'page': int,
}
# response:
{
'results': [MSG_DICT, ],
'pagination': {
'page': int, # current page
'total_pages': int,
'total_objects': int,
'per_page': int, # object per page
},
'status': 'OK',
'code': 200
}
""" |
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
query_set = Message(current).objects.search_on(['msg_title', 'body', 'url'],
contains=current.input['query'])
if current.input['channel_key']:
query_set = query_set.filter(channel_id=current.input['channel_key'])
else:
subscribed_channels = Subscriber.objects.filter(user_id=current.user_id).values_list(
"channel_id", flatten=True)
query_set = query_set.filter(channel_id__in=subscribed_channels)
query_set, pagination_data = _paginate(current_page=current.input['page'], query_set=query_set)
current.output['pagination'] = pagination_data
for msg in query_set:
current.output['results'].append(msg.serialize(current.user)) |
<SYSTEM_TASK:>
Delete a channel
<END_TASK>
<USER_TASK:>
Description:
def delete_channel(current):
"""
Delete a channel
.. code-block:: python
# request:
{
'view':'_zops_delete_channel,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
}
""" |
ch_key = current.input['channel_key']
ch = Channel(current).objects.get(owner_id=current.user_id, key=ch_key)
ch.delete()
Subscriber.objects.filter(channel_id=ch_key).delete()
Message.objects.filter(channel_id=ch_key).delete()
current.output = {'status': 'Deleted', 'code': 200} |
<SYSTEM_TASK:>
Update channel name or description
<END_TASK>
<USER_TASK:>
Description:
def edit_channel(current):
"""
Update channel name or description
.. code-block:: python
# request:
{
'view':'_zops_edit_channel,
'channel_key': key,
'name': string,
'description': string,
}
# response:
{
'status': 'OK',
'code': 200
}
""" |
ch = Channel(current).objects.get(owner_id=current.user_id,
key=current.input['channel_key'])
ch.name = current.input['name']
ch.description = current.input['description']
ch.save()
for sbs in ch.subscriber_set.objects.all():
sbs.name = ch.name
sbs.save()
current.output = {'status': 'OK', 'code': 200} |
<SYSTEM_TASK:>
Pin a channel to top of channel list
<END_TASK>
<USER_TASK:>
Description:
def pin_channel(current):
"""
Pin a channel to top of channel list
.. code-block:: python
# request:
{
'view':'_zops_pin_channel,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
}
""" |
try:
Subscriber(current).objects.filter(user_id=current.user_id,
channel_id=current.input['channel_key']).update(
pinned=True)
current.output = {'status': 'OK', 'code': 200}
except ObjectDoesNotExist:
raise HTTPError(404, "") |
<SYSTEM_TASK:>
Edit a message a user own.
<END_TASK>
<USER_TASK:>
Description:
def edit_message(current):
"""
Edit a message a user own.
.. code-block:: python
# request:
{
'view':'_zops_edit_message',
'message': {
'body': string, # message text
'key': key
}
}
# response:
{
'status': string, # 'OK' for success
'code': int, # 200 for success
}
""" |
current.output = {'status': 'OK', 'code': 200}
in_msg = current.input['message']
try:
msg = Message(current).objects.get(sender_id=current.user_id, key=in_msg['key'])
msg.body = in_msg['body']
msg.save()
except ObjectDoesNotExist:
raise HTTPError(404, "") |
<SYSTEM_TASK:>
Flag inappropriate messages
<END_TASK>
<USER_TASK:>
Description:
def flag_message(current):
"""
Flag inappropriate messages
.. code-block:: python
# request:
{
'view':'_zops_flag_message',
'message_key': key,
}
# response:
{
'
'status': 'Created',
'code': 201,
}
""" |
current.output = {'status': 'Created', 'code': 201}
FlaggedMessage.objects.get_or_create(user_id=current.user_id,
message_id=current.input['key']) |
<SYSTEM_TASK:>
remove flag of a message
<END_TASK>
<USER_TASK:>
Description:
def unflag_message(current):
"""
remove flag of a message
.. code-block:: python
# request:
{
'view':'_zops_flag_message',
'key': key,
}
# response:
{
'
'status': 'OK',
'code': 200,
}
""" |
current.output = {'status': 'OK', 'code': 200}
FlaggedMessage(current).objects.filter(user_id=current.user_id,
message_id=current.input['key']).delete() |
<SYSTEM_TASK:>
Returns applicable actions for current user for given message key
<END_TASK>
<USER_TASK:>
Description:
def get_message_actions(current):
"""
Returns applicable actions for current user for given message key
.. code-block:: python
# request:
{
'view':'_zops_get_message_actions',
'key': key,
}
# response:
{
'actions':[('name_string', 'cmd_string'),]
'status': string, # 'OK' for success
'code': int, # 200 for success
}
""" |
current.output = {'status': 'OK',
'code': 200,
'actions': Message.objects.get(
current.input['key']).get_actions_for(current.user)} |
<SYSTEM_TASK:>
Remove a message from favorites
<END_TASK>
<USER_TASK:>
Description:
def remove_from_favorites(current):
"""
Remove a message from favorites
.. code-block:: python
# request:
{
'view':'_zops_remove_from_favorites,
'key': key,
}
# response:
{
'status': 'OK',
'code': 200
}
""" |
try:
current.output = {'status': 'OK', 'code': 200}
Favorite(current).objects.get(user_id=current.user_id,
key=current.input['key']).delete()
except ObjectDoesNotExist:
raise HTTPError(404, "") |
<SYSTEM_TASK:>
List user's favorites. If "channel_key" given, will return favorites belong to that channel.
<END_TASK>
<USER_TASK:>
Description:
def list_favorites(current):
"""
List user's favorites. If "channel_key" given, will return favorites belong to that channel.
.. code-block:: python
# request:
{
'view':'_zops_list_favorites,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
'favorites':[{'key': key,
'channel_key': key,
'message_key': key,
'message_summary': string, # max 60 char
'channel_name': string,
},]
}
""" |
current.output = {'status': 'OK', 'code': 200, 'favorites': []}
query_set = Favorite(current).objects.filter(user_id=current.user_id)
if current.input['channel_key']:
query_set = query_set.filter(channel_id=current.input['channel_key'])
current.output['favorites'] = [{
'key': fav.key,
'channel_key': fav.channel.key,
'message_key': fav.message.key,
'message_summary': fav.summary,
'channel_name': fav.channel_name
} for fav in query_set] |
<SYSTEM_TASK:>
Creates a direct messaging channel between two user
<END_TASK>
<USER_TASK:>
Description:
def get_or_create_direct_channel(cls, initiator_key, receiver_key):
"""
Creates a direct messaging channel between two user
Args:
initiator: User, who want's to make first contact
receiver: User, other party
Returns:
(Channel, receiver_name)
""" |
existing = cls.objects.OR().filter(
code_name='%s_%s' % (initiator_key, receiver_key)).filter(
code_name='%s_%s' % (receiver_key, initiator_key))
receiver_name = UserModel.objects.get(receiver_key).full_name
if existing:
channel = existing[0]
else:
channel_name = '%s_%s' % (initiator_key, receiver_key)
channel = cls(is_direct=True, code_name=channel_name, typ=10).blocking_save()
with BlockSave(Subscriber):
Subscriber.objects.get_or_create(channel=channel,
user_id=initiator_key,
name=receiver_name)
Subscriber.objects.get_or_create(channel=channel,
user_id=receiver_key,
name=UserModel.objects.get(initiator_key).full_name)
return channel, receiver_name |
<SYSTEM_TASK:>
Creates MQ exchange for this channel
<END_TASK>
<USER_TASK:>
Description:
def create_exchange(self):
"""
Creates MQ exchange for this channel
Needs to be defined only once.
""" |
mq_channel = self._connect_mq()
mq_channel.exchange_declare(exchange=self.code_name,
exchange_type='fanout',
durable=True) |
<SYSTEM_TASK:>
Deletes MQ exchange for this channel
<END_TASK>
<USER_TASK:>
Description:
def delete_exchange(self):
"""
Deletes MQ exchange for this channel
Needs to be defined only once.
""" |
mq_channel = self._connect_mq()
mq_channel.exchange_delete(exchange=self.code_name) |
<SYSTEM_TASK:>
serialized form for channel listing
<END_TASK>
<USER_TASK:>
Description:
def get_channel_listing(self):
"""
serialized form for channel listing
""" |
return {'name': self.name,
'key': self.channel.key,
'type': self.channel.typ,
'read_only': self.read_only,
'is_online': self.is_online(),
'actions': self.get_actions(),
'unread': self.unread_count()} |
<SYSTEM_TASK:>
Creates user's private exchange
<END_TASK>
<USER_TASK:>
Description:
def create_exchange(self):
"""
Creates user's private exchange
Actually user's private channel needed to be defined only once,
and this should be happened when user first created.
But since this has a little performance cost,
to be safe we always call it before binding to the channel we currently subscribe
""" |
channel = self._connect_mq()
channel.exchange_declare(exchange=self.user.prv_exchange,
exchange_type='fanout',
durable=True) |
<SYSTEM_TASK:>
Serializes message for given user.
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, user=None):
"""
Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object
""" |
return {
'content': self.body,
'type': self.typ,
'updated_at': self.updated_at,
'timestamp': self.updated_at,
'is_update': not hasattr(self, 'unsaved'),
'attachments': [attachment.serialize() for attachment in self.attachment_set],
'title': self.msg_title,
'url': self.url,
'sender_name': self.sender.full_name,
'sender_key': self.sender.key,
'channel_key': self.channel.key,
'cmd': 'message',
'avatar_url': self.sender.avatar,
'key': self.key,
} |
<SYSTEM_TASK:>
Re-publishes updated message
<END_TASK>
<USER_TASK:>
Description:
def _republish(self):
"""
Re-publishes updated message
""" |
mq_channel = self.channel._connect_mq()
mq_channel.basic_publish(exchange=self.channel.key, routing_key='',
body=json.dumps(self.serialize())) |
<SYSTEM_TASK:>
Provide a reasonable default crawl name using the user name and date
<END_TASK>
<USER_TASK:>
Description:
def defaultCrawlId():
"""
Provide a reasonable default crawl name using the user name and date
""" |
timestamp = datetime.now().isoformat().replace(':', '_')
user = getuser()
return '_'.join(('crawl', user, timestamp)) |
<SYSTEM_TASK:>
Call the Nutch Server, do some error checking, and return the response.
<END_TASK>
<USER_TASK:>
Description:
def call(self, verb, servicePath, data=None, headers=None, forceText=False, sendJson=True):
"""Call the Nutch Server, do some error checking, and return the response.
:param verb: One of nutch.RequestVerbs
:param servicePath: path component of URL to append to endpoint, e.g. '/config'
:param data: Data to attach to this request
:param headers: headers to attach to this request, default are JsonAcceptHeader
:param forceText: don't trust the response headers and just get the text
:param sendJson: Whether to treat attached data as JSON or not
""" |
default_data = {} if sendJson else ""
data = data if data else default_data
headers = headers if headers else JsonAcceptHeader.copy()
if not sendJson:
headers.update(TextSendHeader)
if verb not in RequestVerbs:
die('Server call verb must be one of %s' % str(RequestVerbs.keys()))
if Verbose:
echo2("%s Endpoint:" % verb.upper(), servicePath)
echo2("%s Request data:" % verb.upper(), data)
echo2("%s Request headers:" % verb.upper(), headers)
verbFn = RequestVerbs[verb]
if sendJson:
resp = verbFn(self.serverEndpoint + servicePath, json=data, headers=headers)
else:
resp = verbFn(self.serverEndpoint + servicePath, data=data, headers=headers)
if Verbose:
echo2("Response headers:", resp.headers)
echo2("Response status:", resp.status_code)
if resp.status_code != 200:
if self.raiseErrors:
error = NutchException("Unexpected server response: %d" % resp.status_code)
error.status_code = resp.status_code
raise error
else:
warn('Nutch server returned status:', resp.status_code)
if forceText or 'content-type' not in resp.headers or resp.headers['content-type'] == 'text/plain':
if Verbose:
echo2("Response text:", resp.text)
return resp.text
content_type = resp.headers['content-type']
if content_type == 'application/json' and not forceText:
if Verbose:
echo2("Response JSON:", resp.json())
return resp.json()
else:
die('Did not understand server response: %s' % resp.headers) |
<SYSTEM_TASK:>
Return list of jobs at this endpoint.
<END_TASK>
<USER_TASK:>
Description:
def list(self, allJobs=False):
"""
Return list of jobs at this endpoint.
Call get(allJobs=True) to see all jobs, not just the ones managed by this Client
""" |
jobs = self.server.call('get', '/job')
return [Job(job['id'], self.server) for job in jobs if allJobs or self._job_owned(job)] |
<SYSTEM_TASK:>
Given a completed job, start the next job in the round, or return None
<END_TASK>
<USER_TASK:>
Description:
def _nextJob(self, job, nextRound=True):
"""
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
""" |
jobInfo = job.info()
assert jobInfo['state'] == 'FINISHED'
roundEnd = False
if jobInfo['type'] == 'INJECT':
nextCommand = 'GENERATE'
elif jobInfo['type'] == 'GENERATE':
nextCommand = 'FETCH'
elif jobInfo['type'] == 'FETCH':
nextCommand = 'PARSE'
elif jobInfo['type'] == 'PARSE':
nextCommand = 'UPDATEDB'
elif jobInfo['type'] == 'UPDATEDB':
nextCommand = 'INVERTLINKS'
elif jobInfo['type'] == 'INVERTLINKS':
nextCommand = 'DEDUP'
elif jobInfo['type'] == 'DEDUP':
if self.enable_index:
nextCommand = 'INDEX'
else:
roundEnd = True
elif jobInfo['type'] == 'INDEX':
roundEnd = True
else:
raise NutchException("Unrecognized job type {}".format(jobInfo['type']))
if roundEnd:
if nextRound and self.currentRound < self.totalRounds:
nextCommand = 'GENERATE'
self.currentRound += 1
else:
return None
return self.jobClient.create(nextCommand) |
<SYSTEM_TASK:>
Check the status of the current job, activate the next job if it's finished, and return the active job
<END_TASK>
<USER_TASK:>
Description:
def progress(self, nextRound=True):
"""
Check the status of the current job, activate the next job if it's finished, and return the active job
If the current job has failed, a NutchCrawlException will be raised with no jobs attached.
:param nextRound: whether to start jobs from the next round if the current job/round is completed.
:return: the currently running Job, or None if no jobs are running.
""" |
currentJob = self.currentJob
if currentJob is None:
return currentJob
jobInfo = currentJob.info()
if jobInfo['state'] == 'RUNNING':
return currentJob
elif jobInfo['state'] == 'FINISHED':
nextJob = self._nextJob(currentJob, nextRound)
self.currentJob = nextJob
return nextJob
else:
error = NutchCrawlException("Unexpected job state: {}".format(jobInfo['state']))
error.current_job = currentJob
raise NutchCrawlException |
<SYSTEM_TASK:>
Execute all jobs in the current round and return when they have finished.
<END_TASK>
<USER_TASK:>
Description:
def nextRound(self):
"""
Execute all jobs in the current round and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached
to the exception.
:return: a list of all completed Jobs
""" |
finishedJobs = []
if self.currentJob is None:
self.currentJob = self.jobClient.create('GENERATE')
activeJob = self.progress(nextRound=False)
while activeJob:
oldJob = activeJob
activeJob = self.progress(nextRound=False) # updates self.currentJob
if oldJob and oldJob != activeJob:
finishedJobs.append(oldJob)
sleep(self.sleepTime)
self.currentRound += 1
return finishedJobs |
<SYSTEM_TASK:>
Execute all queued rounds and return when they have finished.
<END_TASK>
<USER_TASK:>
Description:
def waitAll(self):
"""
Execute all queued rounds and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs attached
to the exception
:return: a list of jobs completed for each round, organized by round (list-of-lists)
""" |
finishedRounds = [self.nextRound()]
while self.currentRound < self.totalRounds:
finishedRounds.append(self.nextRound())
return finishedRounds |
<SYSTEM_TASK:>
Create a JobClient for listing and creating jobs.
<END_TASK>
<USER_TASK:>
Description:
def Jobs(self, crawlId=None):
"""
Create a JobClient for listing and creating jobs.
The JobClient inherits the confId from the Nutch client.
:param crawlId: crawlIds to use for this client. If not provided, will be generated
by nutch.defaultCrawlId()
:return: a JobClient
""" |
crawlId = crawlId if crawlId else defaultCrawlId()
return JobClient(self.server, crawlId, self.confId) |
<SYSTEM_TASK:>
Get an attribute defined by this session
<END_TASK>
<USER_TASK:>
Description:
def get(self, attr, default=None):
"""Get an attribute defined by this session""" |
attrs = self.body.get('attributes') or {}
return attrs.get(attr, default) |
<SYSTEM_TASK:>
if data can't found in cache then it will be fetched from db,
<END_TASK>
<USER_TASK:>
Description:
def get_all(self, cat):
"""
if data can't found in cache then it will be fetched from db,
parsed and stored to cache for each lang_code.
:param cat: cat of catalog data
:return:
""" |
return self._get_from_local_cache(cat) or self._get_from_cache(cat) or self._get_from_db(cat) |
<SYSTEM_TASK:>
get from redis, cache locally then return
<END_TASK>
<USER_TASK:>
Description:
def _fill_get_item_cache(self, catalog, key):
"""
get from redis, cache locally then return
:param catalog: catalog name
:param key:
:return:
""" |
lang = self._get_lang()
keylist = self.get_all(catalog)
self.ITEM_CACHE[lang][catalog] = dict([(i['value'], i['name']) for i in keylist])
return self.ITEM_CACHE[lang][catalog].get(key) |
<SYSTEM_TASK:>
Utility method to quickly get a server up and running.
<END_TASK>
<USER_TASK:>
Description:
def run(self, host, port, debug=True, validate_requests=True):
"""Utility method to quickly get a server up and running.
:param debug: turns on Werkzeug debugger, code reloading, and full
logging.
:param validate_requests: whether or not to ensure that requests are
sent by Amazon. This can be usefulfor manually testing the server.
""" |
if debug:
# Turn on all alexandra log output
logging.basicConfig(level=logging.DEBUG)
app = self.create_wsgi_app(validate_requests)
run_simple(host, port, app, use_reloader=debug, use_debugger=debug) |
<SYSTEM_TASK:>
Given a parsed JSON request object, call the correct Intent, Launch,
<END_TASK>
<USER_TASK:>
Description:
def dispatch_request(self, body):
"""Given a parsed JSON request object, call the correct Intent, Launch,
or SessionEnded function.
This function is called after request parsing and validaion and will
raise a `ValueError` if an unknown request type comes in.
:param body: JSON object loaded from incoming request's POST data.
""" |
req_type = body.get('request', {}).get('type')
session_obj = body.get('session')
session = Session(session_obj) if session_obj else None
if req_type == 'LaunchRequest':
return self.launch_fn(session)
elif req_type == 'IntentRequest':
intent = body['request']['intent']['name']
intent_fn = self.intent_map.get(intent, self.unknown_intent_fn)
slots = {
slot['name']: slot.get('value')
for _, slot in
body['request']['intent'].get('slots', {}).items()
}
arity = intent_fn.__code__.co_argcount
if arity == 2:
return intent_fn(slots, session)
return intent_fn()
elif req_type == 'SessionEndedRequest':
return self.session_end_fn()
log.error('invalid request type: %s', req_type)
raise ValueError('bad request: %s', body) |
<SYSTEM_TASK:>
Decorator to register a handler for the given intent.
<END_TASK>
<USER_TASK:>
Description:
def intent(self, intent_name):
"""Decorator to register a handler for the given intent.
The decorated function can either take 0 or 2 arguments. If two are
specified, it will be provided a dictionary of `{slot_name: value}` and
a :py:class:`alexandra.session.Session` instance.
If no session was provided in the request, the session object will be
`None`. ::
@alexa_app.intent('FooBarBaz')
def foo_bar_baz_intent(slots, session):
pass
@alexa_app.intent('NoArgs')
def noargs_intent():
pass
""" |
# nested decorator so we can have params.
def _decorator(func):
arity = func.__code__.co_argcount
if arity not in [0, 2]:
raise ValueError("expected 0 or 2 argument function")
self.intent_map[intent_name] = func
return func
return _decorator |
<SYSTEM_TASK:>
encrypt password if not already encrypted
<END_TASK>
<USER_TASK:>
Description:
def encrypt_password(self):
""" encrypt password if not already encrypted """ |
if self.password and not self.password.startswith('$pbkdf2'):
self.set_password(self.password) |
<SYSTEM_TASK:>
Send arbitrary cmd and data to client
<END_TASK>
<USER_TASK:>
Description:
def send_client_cmd(self, data, cmd=None, via_queue=None):
"""
Send arbitrary cmd and data to client
if queue name passed by "via_queue" parameter,
that queue will be used instead of users private exchange.
Args:
data: dict
cmd: string
via_queue: queue name,
""" |
mq_channel = self._connect_mq()
if cmd:
data['cmd'] = cmd
if via_queue:
mq_channel.basic_publish(exchange='',
routing_key=via_queue,
body=json.dumps(data))
else:
mq_channel.basic_publish(exchange=self.prv_exchange,
routing_key='',
body=json.dumps(data)) |
<SYSTEM_TASK:>
Assigning the workflow to itself.
<END_TASK>
<USER_TASK:>
Description:
def assign_yourself(self):
"""
Assigning the workflow to itself.
The selected job is checked to see if there is an assigned role.
If it does not have a role assigned to it, it takes the job to itself
and displays a message that the process is successful.
If there is a role assigned to it, it does not do any operation
and the message is displayed on the screen.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
""" |
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
if not wfi.current_actor.exist:
wfi.current_actor = self.current.role
wfi.save()
[inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if
not inv == task_invitation]
title = _(u"Successful")
msg = _(u"You have successfully assigned the job to yourself.")
else:
title = _(u"Unsuccessful")
msg = _(u"Unfortunately, this job is already taken by someone else.")
self.current.msg_box(title=title, msg=msg) |
<SYSTEM_TASK:>
With the workflow instance and the task invitation is assigned a role.
<END_TASK>
<USER_TASK:>
Description:
def send_workflow(self):
"""
With the workflow instance and the task invitation is assigned a role.
""" |
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
select_role = self.input['form']['select_role']
if wfi.current_actor == self.current.role:
task_invitation.role = RoleModel.objects.get(select_role)
wfi.current_actor = RoleModel.objects.get(select_role)
wfi.save()
task_invitation.save()
[inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if
not inv == task_invitation]
title = _(u"Successful")
msg = _(u"The workflow was assigned to someone else with success.")
else:
title = _(u"Unsuccessful")
msg = _(u"This workflow does not belong to you, you cannot assign it to someone else.")
self.current.msg_box(title=title, msg=msg) |
<SYSTEM_TASK:>
Invitations with the same workflow status are deleted.
<END_TASK>
<USER_TASK:>
Description:
def save_date(self):
"""
Invitations with the same workflow status are deleted.
Workflow instance and invitation roles change.
""" |
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
if wfi.current_actor.exist and wfi.current_actor == self.current.role:
dt_start = datetime.strptime(self.input['form']['start_date'], "%d.%m.%Y")
dt_finish = datetime.strptime(self.input['form']['finish_date'], "%d.%m.%Y")
task_invitation.start_date = dt_start
task_invitation.finish_date = dt_finish
task_invitation.save()
wfi.start_date = dt_start
wfi.finish_date = dt_finish
wfi.save()
title = _(u"Successful")
msg = _(u"You've extended the workflow time.")
else:
title = _(u"Unsuccessful")
msg = _(u"This workflow does not belong to you.")
self.current.msg_box(title=title, msg=msg) |
<SYSTEM_TASK:>
If there is a role assigned to the workflow and
<END_TASK>
<USER_TASK:>
Description:
def suspend(self):
"""
If there is a role assigned to the workflow and
it is the same as the user, it can drop the workflow.
If it does not exist, it can not do anything.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
""" |
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
if wfi.current_actor.exist and wfi.current_actor == self.current.role:
for m in RoleModel.objects.filter(abstract_role=self.current.role.abstract_role,
unit=self.current.role.unit):
if m != self.current.role:
task_invitation.key = ''
task_invitation.role = m
task_invitation.save()
wfi.current_actor = RoleModel()
wfi.save()
title = _(u"Successful")
msg = _(u"You left the workflow.")
else:
title = _(u"Unsuccessful")
msg = _(u"Unfortunately, this workflow does not belong to you or is already idle.")
self.current.msg_box(title=title, msg=msg) |
<SYSTEM_TASK:>
Finds out if the piece is on the home row.
<END_TASK>
<USER_TASK:>
Description:
def on_home_row(self, location=None):
"""
Finds out if the piece is on the home row.
:return: bool for whether piece is on home row or not
""" |
location = location or self.location
return (self.color == color.white and location.rank == 1) or \
(self.color == color.black and location.rank == 6) |
<SYSTEM_TASK:>
Finds if move from current get_location would result in promotion
<END_TASK>
<USER_TASK:>
Description:
def would_move_be_promotion(self, location=None):
"""
Finds if move from current get_location would result in promotion
:type: location: Location
:rtype: bool
""" |
location = location or self.location
return (location.rank == 1 and self.color == color.black) or \
(location.rank == 6 and self.color == color.white) |
<SYSTEM_TASK:>
Finds square directly in front of Pawn
<END_TASK>
<USER_TASK:>
Description:
def square_in_front(self, location=None):
"""
Finds square directly in front of Pawn
:type: location: Location
:rtype: Location
""" |
location = location or self.location
return location.shift_up() if self.color == color.white else location.shift_down() |
<SYSTEM_TASK:>
Finds possible moves one step and two steps in front
<END_TASK>
<USER_TASK:>
Description:
def forward_moves(self, position):
"""
Finds possible moves one step and two steps in front
of Pawn.
:type: position: Board
:rtype: list
""" |
if position.is_square_empty(self.square_in_front(self.location)):
"""
If square in front is empty add the move
"""
if self.would_move_be_promotion():
for move in self.create_promotion_moves(notation_const.PROMOTE):
yield move
else:
yield self.create_move(end_loc=self.square_in_front(self.location),
status=notation_const.MOVEMENT)
if self.on_home_row() and \
position.is_square_empty(self.two_squares_in_front(self.location)):
"""
If pawn is on home row and two squares in front of the pawn is empty
add the move
"""
yield self.create_move(
end_loc=self.square_in_front(self.square_in_front(self.location)),
status=notation_const.MOVEMENT
) |
<SYSTEM_TASK:>
Adds specified diagonal as a capture move if it is one
<END_TASK>
<USER_TASK:>
Description:
def _one_diagonal_capture_square(self, capture_square, position):
"""
Adds specified diagonal as a capture move if it is one
""" |
if self.contains_opposite_color_piece(capture_square, position):
if self.would_move_be_promotion():
for move in self.create_promotion_moves(status=notation_const.CAPTURE_AND_PROMOTE,
location=capture_square):
yield move
else:
yield self.create_move(end_loc=capture_square,
status=notation_const.CAPTURE) |
<SYSTEM_TASK:>
Finds out all possible capture moves
<END_TASK>
<USER_TASK:>
Description:
def capture_moves(self, position):
"""
Finds out all possible capture moves
:rtype: list
""" |
try:
right_diagonal = self.square_in_front(self.location.shift_right())
for move in self._one_diagonal_capture_square(right_diagonal, position):
yield move
except IndexError:
pass
try:
left_diagonal = self.square_in_front(self.location.shift_left())
for move in self._one_diagonal_capture_square(left_diagonal, position):
yield move
except IndexError:
pass |
<SYSTEM_TASK:>
Finds out if pawn is on enemy center rank.
<END_TASK>
<USER_TASK:>
Description:
def on_en_passant_valid_location(self):
"""
Finds out if pawn is on enemy center rank.
:rtype: bool
""" |
return (self.color == color.white and self.location.rank == 4) or \
(self.color == color.black and self.location.rank == 3) |
<SYSTEM_TASK:>
Finds if their opponent's pawn is next to this pawn
<END_TASK>
<USER_TASK:>
Description:
def _is_en_passant_valid(self, opponent_pawn_location, position):
"""
Finds if their opponent's pawn is next to this pawn
:rtype: bool
""" |
try:
pawn = position.piece_at_square(opponent_pawn_location)
return pawn is not None and \
isinstance(pawn, Pawn) and \
pawn.color != self.color and \
position.piece_at_square(opponent_pawn_location).just_moved_two_steps
except IndexError:
return False |
<SYSTEM_TASK:>
Yields en_passant moves in given direction if it is legal.
<END_TASK>
<USER_TASK:>
Description:
def add_one_en_passant_move(self, direction, position):
"""
Yields en_passant moves in given direction if it is legal.
:type: direction: function
:type: position: Board
:rtype: gen
""" |
try:
if self._is_en_passant_valid(direction(self.location), position):
yield self.create_move(
end_loc=self.square_in_front(direction(self.location)),
status=notation_const.EN_PASSANT
)
except IndexError:
pass |
<SYSTEM_TASK:>
Build a dict containing a valid response to an Alexa request.
<END_TASK>
<USER_TASK:>
Description:
def respond(text=None, ssml=None, attributes=None, reprompt_text=None,
reprompt_ssml=None, end_session=True):
""" Build a dict containing a valid response to an Alexa request.
If speech output is desired, either of `text` or `ssml` should
be specified.
:param text: Plain text speech output to be said by Alexa device.
:param ssml: Speech output in SSML form.
:param attributes: Dictionary of attributes to store in the session.
:param end_session: Should the session be terminated after this response?
:param reprompt_text, reprompt_ssml: Works the same as
`text`/`ssml`, but instead sets the reprompting speech output.
""" |
obj = {
'version': '1.0',
'response': {
'outputSpeech': {'type': 'PlainText', 'text': ''},
'shouldEndSession': end_session
},
'sessionAttributes': attributes or {}
}
if text:
obj['response']['outputSpeech'] = {'type': 'PlainText', 'text': text}
elif ssml:
obj['response']['outputSpeech'] = {'type': 'SSML', 'ssml': ssml}
reprompt_output = None
if reprompt_text:
reprompt_output = {'type': 'PlainText', 'text': reprompt_text}
elif reprompt_ssml:
reprompt_output = {'type': 'SSML', 'ssml': reprompt_ssml}
if reprompt_output:
obj['response']['reprompt'] = {'outputSpeech': reprompt_output}
return obj |
<SYSTEM_TASK:>
Ensure the request's timestamp doesn't fall outside of the
<END_TASK>
<USER_TASK:>
Description:
def validate_request_timestamp(req_body, max_diff=150):
"""Ensure the request's timestamp doesn't fall outside of the
app's specified tolerance.
Returns True if this request is valid, False otherwise.
:param req_body: JSON object parsed out of the raw POST data of a request.
:param max_diff: Maximum allowable difference in seconds between request
timestamp and system clock. Amazon requires <= 150 seconds for
published skills.
""" |
time_str = req_body.get('request', {}).get('timestamp')
if not time_str:
log.error('timestamp not present %s', req_body)
return False
req_ts = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
diff = (datetime.utcnow() - req_ts).total_seconds()
if abs(diff) > max_diff:
log.error('timestamp difference too high: %d sec', diff)
return False
return True |
<SYSTEM_TASK:>
Ensure that the certificate and signature specified in the
<END_TASK>
<USER_TASK:>
Description:
def validate_request_certificate(headers, data):
"""Ensure that the certificate and signature specified in the
request headers are truely from Amazon and correctly verify.
Returns True if certificate verification succeeds, False otherwise.
:param headers: Dictionary (or sufficiently dictionary-like) map of request
headers.
:param data: Raw POST data attached to this request.
""" |
# Make sure we have the appropriate headers.
if 'SignatureCertChainUrl' not in headers or \
'Signature' not in headers:
log.error('invalid request headers')
return False
cert_url = headers['SignatureCertChainUrl']
sig = base64.b64decode(headers['Signature'])
cert = _get_certificate(cert_url)
if not cert:
return False
try:
# ... wtf kind of API decision is this
crypto.verify(cert, sig, data, 'sha1')
return True
except:
log.error('invalid request signature')
return False |
<SYSTEM_TASK:>
Download and validate a specified Amazon PEM file.
<END_TASK>
<USER_TASK:>
Description:
def _get_certificate(cert_url):
"""Download and validate a specified Amazon PEM file.""" |
global _cache
if cert_url in _cache:
cert = _cache[cert_url]
if cert.has_expired():
_cache = {}
else:
return cert
url = urlparse(cert_url)
host = url.netloc.lower()
path = posixpath.normpath(url.path)
# Sanity check location so we don't get some random person's cert.
if url.scheme != 'https' or \
host not in ['s3.amazonaws.com', 's3.amazonaws.com:443'] or \
not path.startswith('/echo.api/'):
log.error('invalid cert location %s', cert_url)
return
resp = urlopen(cert_url)
if resp.getcode() != 200:
log.error('failed to download certificate')
return
cert = crypto.load_certificate(crypto.FILETYPE_PEM, resp.read())
if cert.has_expired() or cert.get_subject().CN != 'echo-api.amazon.com':
log.error('certificate expired or invalid')
return
_cache[cert_url] = cert
return cert |
<SYSTEM_TASK:>
Check if version is already applied in the database.
<END_TASK>
<USER_TASK:>
Description:
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
""" |
return self.number in (v.number for v in db_versions if v.date_done) |
<SYSTEM_TASK:>
Check if version is a no operation version.
<END_TASK>
<USER_TASK:>
Description:
def is_noop(self):
"""Check if version is a no operation version.
""" |
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop |
<SYSTEM_TASK:>
Return a VersionMode for a mode name.
<END_TASK>
<USER_TASK:>
Description:
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
""" |
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode |
<SYSTEM_TASK:>
Add an operation to the version
<END_TASK>
<USER_TASK:>
Description:
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
""" |
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
) |
<SYSTEM_TASK:>
Add a backup operation to the version.
<END_TASK>
<USER_TASK:>
Description:
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
""" |
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup |
<SYSTEM_TASK:>
Return pre-operations only for the mode asked
<END_TASK>
<USER_TASK:>
Description:
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """ |
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations |
<SYSTEM_TASK:>
Return post-operations only for the mode asked
<END_TASK>
<USER_TASK:>
Description:
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """ |
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations |
<SYSTEM_TASK:>
Return merged set of main addons and mode's addons
<END_TASK>
<USER_TASK:>
Description:
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """ |
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade) |
<SYSTEM_TASK:>
remove explicit hydrogens if possible
<END_TASK>
<USER_TASK:>
Description:
def implicify_hydrogens(self):
"""
remove explicit hydrogens if possible
:return: number of removed hydrogens
""" |
total = 0
for ml in (self.__reagents, self.__reactants, self.__products):
for m in ml:
if hasattr(m, 'implicify_hydrogens'):
total += m.implicify_hydrogens()
if total:
self.flush_cache()
return total |
<SYSTEM_TASK:>
get CGR of reaction
<END_TASK>
<USER_TASK:>
Description:
def compose(self):
"""
get CGR of reaction
reagents will be presented as unchanged molecules
:return: CGRContainer
""" |
rr = self.__reagents + self.__reactants
if rr:
if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in rr):
raise TypeError('Queries not composable')
r = reduce(or_, rr)
else:
r = MoleculeContainer()
if self.__products:
if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in self.__products):
raise TypeError('Queries not composable')
p = reduce(or_, self.__products)
else:
p = MoleculeContainer()
return r ^ p |
<SYSTEM_TASK:>
fix coordinates of molecules in reaction
<END_TASK>
<USER_TASK:>
Description:
def fix_positions(self):
"""
fix coordinates of molecules in reaction
""" |
shift_x = 0
for m in self.__reactants:
max_x = self.__fix_positions(m, shift_x, 0)
shift_x = max_x + 1
arrow_min = shift_x
if self.__reagents:
for m in self.__reagents:
max_x = self.__fix_positions(m, shift_x, 1.5)
shift_x = max_x + 1
else:
shift_x += 3
arrow_max = shift_x - 1
for m in self.__products:
max_x = self.__fix_positions(m, shift_x, 0)
shift_x = max_x + 1
self._arrow = (arrow_min, arrow_max)
self.flush_cache() |
<SYSTEM_TASK:>
Permissions of the user.
<END_TASK>
<USER_TASK:>
Description:
def get_permissions(self):
"""
Permissions of the user.
Returns:
List of Permission objects.
""" |
user_role = self.last_login_role() if self.last_login_role_key else self.role_set[0].role
return user_role.get_permissions() |
<SYSTEM_TASK:>
Adds a permission with given name.
<END_TASK>
<USER_TASK:>
Description:
def add_permission_by_name(self, code, save=False):
"""
Adds a permission with given name.
Args:
code (str): Code name of the permission.
save (bool): If False, does nothing.
""" |
if not save:
return ["%s | %s" % (p.name, p.code) for p in
Permission.objects.filter(code__contains=code)]
for p in Permission.objects.filter(code__contains=code):
if p not in self.Permissions:
self.Permissions(permission=p)
if p:
self.save() |
<SYSTEM_TASK:>
sends a message to user of this role's private mq exchange
<END_TASK>
<USER_TASK:>
Description:
def send_notification(self, title, message, typ=1, url=None, sender=None):
"""
sends a message to user of this role's private mq exchange
""" |
self.user.send_notification(title=title, message=message, typ=typ, url=url,
sender=sender) |
<SYSTEM_TASK:>
Finds if move from current location would be a promotion
<END_TASK>
<USER_TASK:>
Description:
def would_move_be_promotion(self):
"""
Finds if move from current location would be a promotion
""" |
return (self._end_loc.rank == 0 and not self.color) or \
(self._end_loc.rank == 7 and self.color) |
<SYSTEM_TASK:>
Connect receiver to sender for signal.
<END_TASK>
<USER_TASK:>
Description:
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
""" |
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear() |
<SYSTEM_TASK:>
Disconnect receiver from sender for signal.
<END_TASK>
<USER_TASK:>
Description:
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
""" |
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected |
<SYSTEM_TASK:>
Perform a migration according to config.
<END_TASK>
<USER_TASK:>
Description:
def migrate(config):
"""Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config
""" |
webapp = WebApp(config.web_host, config.web_port,
custom_maintenance_file=config.web_custom_html)
webserver = WebServer(webapp)
webserver.daemon = True
webserver.start()
migration_parser = YamlParser.parse_from_file(config.migration_file)
migration = migration_parser.parse()
database = Database(config)
with database.connect() as lock_connection:
application_lock = ApplicationLock(lock_connection)
application_lock.start()
while not application_lock.acquired:
time.sleep(0.5)
else:
if application_lock.replica:
# when a replica could finally acquire a lock, it
# means that the concurrent process has finished the
# migration or that it failed to run it.
# In both cases after the lock is released, this process will
# verify if it has still to do something (if the other process
# failed mainly).
application_lock.stop = True
application_lock.join()
# we are not in the replica or the lock is released: go on for the
# migration
try:
table = MigrationTable(database)
runner = Runner(config, migration, database, table)
runner.perform()
finally:
application_lock.stop = True
application_lock.join() |
<SYSTEM_TASK:>
Generates permissions for all CrudView based class methods.
<END_TASK>
<USER_TASK:>
Description:
def get_permissions(cls):
"""
Generates permissions for all CrudView based class methods.
Returns:
List of Permission objects.
""" |
perms = []
for kls_name, kls in cls.registry.items():
for method_name in cls.__dict__.keys():
if method_name.endswith('_view'):
perms.append("%s.%s" % (kls_name, method_name))
return perms |
<SYSTEM_TASK:>
we need to create basic permissions
<END_TASK>
<USER_TASK:>
Description:
def _get_object_menu_models():
"""
we need to create basic permissions
for only CRUD enabled models
""" |
from pyoko.conf import settings
enabled_models = []
for entry in settings.OBJECT_MENU.values():
for mdl in entry:
if 'wf' not in mdl:
enabled_models.append(mdl['name'])
return enabled_models |
<SYSTEM_TASK:>
create a custom permission
<END_TASK>
<USER_TASK:>
Description:
def add(cls, code_name, name='', description=''):
"""
create a custom permission
""" |
if code_name not in cls.registry:
cls.registry[code_name] = (code_name, name or code_name, description)
return code_name |
<SYSTEM_TASK:>
get self to other mapping
<END_TASK>
<USER_TASK:>
Description:
def get_mapping(self, other):
"""
get self to other mapping
""" |
m = next(self._matcher(other).isomorphisms_iter(), None)
if m:
return {v: k for k, v in m.items()} |
<SYSTEM_TASK:>
get self to other substructure mapping
<END_TASK>
<USER_TASK:>
Description:
def get_substructure_mapping(self, other, limit=1):
"""
get self to other substructure mapping
:param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None;
if > 1 return list of dicts
""" |
i = self._matcher(other).subgraph_isomorphisms_iter()
if limit == 1:
m = next(i, None)
if m:
return {v: k for k, v in m.items()}
return
elif limit == 0:
return ({v: k for k, v in m.items()} for m in i)
return [{v: k for k, v in m.items()} for m in islice(i, limit)] |
<SYSTEM_TASK:>
Shifts in direction provided by ``Direction`` enum.
<END_TASK>
<USER_TASK:>
Description:
def shift(self, direction):
"""
Shifts in direction provided by ``Direction`` enum.
:type: direction: Direction
:rtype: Location
""" |
try:
if direction == Direction.UP:
return self.shift_up()
elif direction == Direction.DOWN:
return self.shift_down()
elif direction == Direction.RIGHT:
return self.shift_right()
elif direction == Direction.LEFT:
return self.shift_left()
else:
raise IndexError("Invalid direction {}".format(direction))
except IndexError as e:
raise IndexError(e) |
<SYSTEM_TASK:>
Finds Location shifted up by 1
<END_TASK>
<USER_TASK:>
Description:
def shift_up(self, times=1):
"""
Finds Location shifted up by 1
:rtype: Location
""" |
try:
return Location(self._rank + times, self._file)
except IndexError as e:
raise IndexError(e) |
<SYSTEM_TASK:>
Finds Location shifted down by 1
<END_TASK>
<USER_TASK:>
Description:
def shift_down(self, times=1):
"""
Finds Location shifted down by 1
:rtype: Location
""" |
try:
return Location(self._rank - times, self._file)
except IndexError as e:
raise IndexError(e) |
<SYSTEM_TASK:>
Finds Location shifted right by 1
<END_TASK>
<USER_TASK:>
Description:
def shift_right(self, times=1):
"""
Finds Location shifted right by 1
:rtype: Location
""" |
try:
return Location(self._rank, self._file + times)
except IndexError as e:
raise IndexError(e) |
<SYSTEM_TASK:>
Finds Location shifted left by 1
<END_TASK>
<USER_TASK:>
Description:
def shift_left(self, times=1):
"""
Finds Location shifted left by 1
:rtype: Location
""" |
try:
return Location(self._rank, self._file - times)
except IndexError as e:
raise IndexError(e) |
<SYSTEM_TASK:>
Finds Location shifted up right by 1
<END_TASK>
<USER_TASK:>
Description:
def shift_up_right(self, times=1):
"""
Finds Location shifted up right by 1
:rtype: Location
""" |
try:
return Location(self._rank + times, self._file + times)
except IndexError as e:
raise IndexError(e) |
<SYSTEM_TASK:>
Finds Location shifted up left by 1
<END_TASK>
<USER_TASK:>
Description:
def shift_up_left(self, times=1):
"""
Finds Location shifted up left by 1
:rtype: Location
""" |
try:
return Location(self._rank + times, self._file - times)
except IndexError as e:
raise IndexError(e) |
<SYSTEM_TASK:>
Finds Location shifted down right by 1
<END_TASK>
<USER_TASK:>
Description:
def shift_down_right(self, times=1):
"""
Finds Location shifted down right by 1
:rtype: Location
""" |
try:
return Location(self._rank - times, self._file + times)
except IndexError as e:
raise IndexError(e) |
<SYSTEM_TASK:>
Finds Location shifted down left by 1
<END_TASK>
<USER_TASK:>
Description:
def shift_down_left(self, times=1):
"""
Finds Location shifted down left by 1
:rtype: Location
""" |
try:
return Location(self._rank - times, self._file - times)
except IndexError as e:
raise IndexError(e) |
<SYSTEM_TASK:>
standardize functional groups
<END_TASK>
<USER_TASK:>
Description:
def standardize(self):
"""
standardize functional groups
:return: number of found groups
""" |
self.reset_query_marks()
seen = set()
total = 0
for n, atom in self.atoms():
if n in seen:
continue
for k, center in central.items():
if center != atom:
continue
shell = tuple((bond, self._node[m]) for m, bond in self._adj[n].items())
for shell_query, shell_patch, atom_patch in query_patch[k]:
if shell_query != shell:
continue
total += 1
for attr_name, attr_value in atom_patch.items():
setattr(atom, attr_name, attr_value)
for (bond_patch, atom_patch), (bond, atom) in zip(shell_patch, shell):
bond.update(bond_patch)
for attr_name, attr_value in atom_patch.items():
setattr(atom, attr_name, attr_value)
seen.add(n)
seen.update(self._adj[n])
break
else:
continue
break
if total:
self.flush_cache()
return total |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.