_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q277200
|
Auth.change_password
|
test
|
def change_password(self, old_password, new_password):
"""Change password."""
try:
user = this.user
except self.user_model.DoesNotExist:
self.auth_failed()
user = auth.authenticate(
username=user.get_username(),
password=self.get_password(old_password),
)
if user is None:
self.auth_failed()
else:
user.set_password(self.get_password(new_password))
user.save()
password_changed.send(
sender=__name__,
request=this.request,
user=user,
)
return {"passwordChanged": True}
|
python
|
{
"resource": ""
}
|
q277201
|
Auth.forgot_password
|
test
|
def forgot_password(self, params):
"""Request password reset email."""
username = self.get_username(params)
try:
user = self.user_model.objects.get(**{
self.user_model.USERNAME_FIELD: username,
})
except self.user_model.DoesNotExist:
self.auth_failed()
minutes_valid = HASH_MINUTES_VALID[HashPurpose.PASSWORD_RESET]
token = get_user_token(
user=user, purpose=HashPurpose.PASSWORD_RESET,
minutes_valid=minutes_valid,
)
forgot_password.send(
sender=__name__,
user=user,
token=token,
request=this.request,
expiry_date=calc_expiry_time(minutes_valid),
)
|
python
|
{
"resource": ""
}
|
q277202
|
Auth.reset_password
|
test
|
def reset_password(self, token, new_password):
"""Reset password using a token received in email then logs user in."""
user = self.validated_user(
token, purpose=HashPurpose.PASSWORD_RESET,
minutes_valid=HASH_MINUTES_VALID[HashPurpose.PASSWORD_RESET],
)
user.set_password(new_password)
user.save()
self.do_login(user)
return {"userId": this.user_ddp_id}
|
python
|
{
"resource": ""
}
|
q277203
|
dict_merge
|
test
|
def dict_merge(lft, rgt):
"""
Recursive dict merge.
Recursively merges dict's. not just simple lft['key'] = rgt['key'], if
both lft and rgt have a key who's value is a dict then dict_merge is
called on both values and the result stored in the returned dictionary.
"""
if not isinstance(rgt, dict):
return rgt
result = deepcopy(lft)
for key, val in rgt.iteritems():
if key in result and isinstance(result[key], dict):
result[key] = dict_merge(result[key], val)
else:
result[key] = deepcopy(val)
return result
|
python
|
{
"resource": ""
}
|
q277204
|
read
|
test
|
def read(path, default=None, encoding='utf8'):
"""Read encoded contents from specified path or return default."""
if not path:
return default
try:
with io.open(path, mode='r', encoding=encoding) as contents:
return contents.read()
except IOError:
if default is not None:
return default
raise
|
python
|
{
"resource": ""
}
|
q277205
|
get_meteor_id
|
test
|
def get_meteor_id(obj_or_model, obj_pk=None):
"""Return an Alea ID for the given object."""
if obj_or_model is None:
return None
# Django model._meta is now public API -> pylint: disable=W0212
meta = obj_or_model._meta
model = meta.model
if model is ObjectMapping:
# this doesn't make sense - raise TypeError
raise TypeError("Can't map ObjectMapping instances through self.")
# try getting value of AleaIdField straight from instance if possible
if isinstance(obj_or_model, model):
# obj_or_model is an instance, not a model.
if isinstance(meta.pk, AleaIdField):
return obj_or_model.pk
if obj_pk is None:
# fall back to primary key, but coerce as string type for lookup.
obj_pk = str(obj_or_model.pk)
alea_unique_fields = [
field
for field in meta.local_fields
if isinstance(field, AleaIdField) and field.unique
]
if len(alea_unique_fields) == 1:
# found an AleaIdField with unique=True, assume it's got the value.
aid = alea_unique_fields[0].attname
if isinstance(obj_or_model, model):
val = getattr(obj_or_model, aid)
elif obj_pk is None:
val = None
else:
val = model.objects.values_list(aid, flat=True).get(
pk=obj_pk,
)
if val:
return val
if obj_pk is None:
# bail out if args are (model, pk) but pk is None.
return None
# fallback to using AleaIdField from ObjectMapping model.
content_type = ContentType.objects.get_for_model(model)
try:
return ObjectMapping.objects.values_list(
'meteor_id', flat=True,
).get(
content_type=content_type,
object_id=obj_pk,
)
except ObjectDoesNotExist:
return ObjectMapping.objects.create(
content_type=content_type,
object_id=obj_pk,
meteor_id=meteor_random_id('/collection/%s' % meta),
).meteor_id
|
python
|
{
"resource": ""
}
|
q277206
|
get_meteor_ids
|
test
|
def get_meteor_ids(model, object_ids):
"""Return Alea ID mapping for all given ids of specified model."""
# Django model._meta is now public API -> pylint: disable=W0212
meta = model._meta
result = collections.OrderedDict(
(str(obj_pk), None)
for obj_pk
in object_ids
)
if isinstance(meta.pk, AleaIdField):
# primary_key is an AleaIdField, use it.
return collections.OrderedDict(
(obj_pk, obj_pk) for obj_pk in object_ids
)
alea_unique_fields = [
field
for field in meta.local_fields
if isinstance(field, AleaIdField) and field.unique and not field.null
]
if len(alea_unique_fields) == 1:
aid = alea_unique_fields[0].name
query = model.objects.filter(
pk__in=object_ids,
).values_list('pk', aid)
else:
content_type = ContentType.objects.get_for_model(model)
query = ObjectMapping.objects.filter(
content_type=content_type,
object_id__in=list(result)
).values_list('object_id', 'meteor_id')
for obj_pk, meteor_id in query:
result[str(obj_pk)] = meteor_id
for obj_pk, meteor_id in result.items():
if meteor_id is None:
result[obj_pk] = get_meteor_id(model, obj_pk)
return result
|
python
|
{
"resource": ""
}
|
q277207
|
get_object_id
|
test
|
def get_object_id(model, meteor_id):
"""Return an object ID for the given meteor_id."""
if meteor_id is None:
return None
# Django model._meta is now public API -> pylint: disable=W0212
meta = model._meta
if model is ObjectMapping:
# this doesn't make sense - raise TypeError
raise TypeError("Can't map ObjectMapping instances through self.")
if isinstance(meta.pk, AleaIdField):
# meteor_id is the primary key
return meteor_id
alea_unique_fields = [
field
for field in meta.local_fields
if isinstance(field, AleaIdField) and field.unique
]
if len(alea_unique_fields) == 1:
# found an AleaIdField with unique=True, assume it's got the value.
val = model.objects.values_list(
'pk', flat=True,
).get(**{
alea_unique_fields[0].attname: meteor_id,
})
if val:
return val
content_type = ContentType.objects.get_for_model(model)
return ObjectMapping.objects.filter(
content_type=content_type,
meteor_id=meteor_id,
).values_list('object_id', flat=True).get()
|
python
|
{
"resource": ""
}
|
q277208
|
get_object_ids
|
test
|
def get_object_ids(model, meteor_ids):
"""Return all object IDs for the given meteor_ids."""
if model is ObjectMapping:
# this doesn't make sense - raise TypeError
raise TypeError("Can't map ObjectMapping instances through self.")
# Django model._meta is now public API -> pylint: disable=W0212
meta = model._meta
alea_unique_fields = [
field
for field in meta.local_fields
if isinstance(field, AleaIdField) and field.unique and not field.null
]
result = collections.OrderedDict(
(str(meteor_id), None)
for meteor_id
in meteor_ids
)
if len(alea_unique_fields) == 1:
aid = alea_unique_fields[0].name
query = model.objects.filter(**{
'%s__in' % aid: meteor_ids,
}).values_list(aid, 'pk')
else:
content_type = ContentType.objects.get_for_model(model)
query = ObjectMapping.objects.filter(
content_type=content_type,
meteor_id__in=meteor_ids,
).values_list('meteor_id', 'object_id')
for meteor_id, object_id in query:
result[meteor_id] = object_id
return result
|
python
|
{
"resource": ""
}
|
q277209
|
get_object
|
test
|
def get_object(model, meteor_id, *args, **kwargs):
"""Return an object for the given meteor_id."""
# Django model._meta is now public API -> pylint: disable=W0212
meta = model._meta
if isinstance(meta.pk, AleaIdField):
# meteor_id is the primary key
return model.objects.filter(*args, **kwargs).get(pk=meteor_id)
alea_unique_fields = [
field
for field in meta.local_fields
if isinstance(field, AleaIdField) and field.unique and not field.null
]
if len(alea_unique_fields) == 1:
return model.objects.filter(*args, **kwargs).get(**{
alea_unique_fields[0].name: meteor_id,
})
return model.objects.filter(*args, **kwargs).get(
pk=get_object_id(model, meteor_id),
)
|
python
|
{
"resource": ""
}
|
q277210
|
set_default_forwards
|
test
|
def set_default_forwards(app_name, operation, apps, schema_editor):
"""Set default value for AleaIdField."""
model = apps.get_model(app_name, operation.model_name)
for obj_pk in model.objects.values_list('pk', flat=True):
model.objects.filter(pk=obj_pk).update(**{
operation.name: get_meteor_id(model, obj_pk),
})
|
python
|
{
"resource": ""
}
|
q277211
|
set_default_reverse
|
test
|
def set_default_reverse(app_name, operation, apps, schema_editor):
"""Unset default value for AleaIdField."""
model = apps.get_model(app_name, operation.model_name)
for obj_pk in model.objects.values_list('pk', flat=True):
get_meteor_id(model, obj_pk)
|
python
|
{
"resource": ""
}
|
q277212
|
TruncateOperation.truncate
|
test
|
def truncate(self, app_label, schema_editor, models):
"""Truncate tables."""
for model_name in models:
model = '%s_%s' % (app_label, model_name)
schema_editor.execute(
'TRUNCATE TABLE %s RESTART IDENTITY CASCADE' % (
model.lower(),
),
)
|
python
|
{
"resource": ""
}
|
q277213
|
TruncateOperation.database_forwards
|
test
|
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Use schema_editor to apply any forward changes."""
self.truncate(app_label, schema_editor, self.truncate_forwards)
|
python
|
{
"resource": ""
}
|
q277214
|
TruncateOperation.database_backwards
|
test
|
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""Use schema_editor to apply any reverse changes."""
self.truncate(app_label, schema_editor, self.truncate_backwards)
|
python
|
{
"resource": ""
}
|
q277215
|
build_meteor.initialize_options
|
test
|
def initialize_options(self):
"""Set command option defaults."""
setuptools.command.build_py.build_py.initialize_options(self)
self.meteor = 'meteor'
self.meteor_debug = False
self.build_lib = None
self.package_dir = None
self.meteor_builds = []
self.no_prune_npm = None
self.inplace = True
|
python
|
{
"resource": ""
}
|
q277216
|
build_meteor.finalize_options
|
test
|
def finalize_options(self):
"""Update command options."""
# Get all the information we need to install pure Python modules
# from the umbrella 'install' command -- build (source) directory,
# install (target) directory, and whether to compile .py files.
self.set_undefined_options(
'build',
('build_lib', 'build_lib'),
)
self.set_undefined_options(
'build_py',
('package_dir', 'package_dir'),
)
setuptools.command.build_py.build_py.finalize_options(self)
|
python
|
{
"resource": ""
}
|
q277217
|
build_meteor.run
|
test
|
def run(self):
"""Peform build."""
for (package, source, target, extra_args) in self.meteor_builds:
src_dir = self.get_package_dir(package)
# convert UNIX-style paths to directory names
project_dir = self.path_to_dir(src_dir, source)
target_dir = self.path_to_dir(src_dir, target)
output_dir = self.path_to_dir(
os.path.abspath(SETUP_DIR if self.inplace else self.build_lib),
target_dir,
)
# construct command line.
cmdline = [self.meteor, 'build', '--directory', output_dir]
no_prune_npm = self.no_prune_npm
if extra_args[:1] == ['--no-prune-npm']:
no_prune_npm = True
extra_args[:1] = []
if self.meteor_debug and '--debug' not in cmdline:
cmdline.append('--debug')
cmdline.extend(extra_args)
# execute command
log.info(
'building meteor app %r (%s)', project_dir, ' '.join(cmdline),
)
subprocess.check_call(cmdline, cwd=project_dir)
if not no_prune_npm:
# django-ddp doesn't use bundle/programs/server/npm cruft
npm_build_dir = os.path.join(
output_dir, 'bundle', 'programs', 'server', 'npm',
)
log.info('pruning meteor npm build %r', npm_build_dir)
shutil.rmtree(npm_build_dir)
|
python
|
{
"resource": ""
}
|
q277218
|
build_meteor.path_to_dir
|
test
|
def path_to_dir(*path_args):
"""Convert a UNIX-style path into platform specific directory spec."""
return os.path.join(
*list(path_args[:-1]) + path_args[-1].split(posixpath.sep)
)
|
python
|
{
"resource": ""
}
|
q277219
|
Alea.seed
|
test
|
def seed(self, values):
"""Seed internal state from supplied values."""
if not values:
# Meteor uses epoch seconds as the seed if no args supplied, we use
# a much more secure seed by default to avoid hash collisions.
seed_ids = [int, str, random, self, values, self.__class__]
random.shuffle(seed_ids)
values = list(map(id, seed_ids)) + [time.time(), os.urandom(512)]
mash = Mash()
self.c = 1
self.s0 = mash(' ')
self.s1 = mash(' ')
self.s2 = mash(' ')
for val in values:
self.s0 -= mash(val)
if self.s0 < 0:
self.s0 += 1
self.s1 -= mash(val)
if self.s1 < 0:
self.s1 += 1
self.s2 -= mash(val)
if self.s2 < 0:
self.s2 += 1
|
python
|
{
"resource": ""
}
|
q277220
|
Alea.state
|
test
|
def state(self):
"""Return internal state, useful for testing."""
return {'c': self.c, 's0': self.s0, 's1': self.s1, 's2': self.s2}
|
python
|
{
"resource": ""
}
|
q277221
|
Alea.random_string
|
test
|
def random_string(self, length, alphabet):
"""Return string of `length` elements chosen from `alphabet`."""
return ''.join(
self.choice(alphabet) for n in range(length)
)
|
python
|
{
"resource": ""
}
|
q277222
|
api_endpoint
|
test
|
def api_endpoint(path_or_func=None, decorate=True):
"""
Decorator to mark a method as an API endpoint for later registration.
Args:
path_or_func: either the function to be decorated or its API path.
decorate (bool): Apply API_ENDPOINT_DECORATORS if True (default).
Returns:
Callable: Decorated function (with optionally applied decorators).
Examples:
>>> from dddp.api import APIMixin, api_endpoint
>>> class Counter(APIMixin):
... value = 0
...
... # default API path matches function name 'increment'.
... @api_endpoint
... def increment(self, amount):
... '''Increment counter value by `amount`.'''
... self.value += amount
... return self.value
...
... # excplicitly set API path to 'Decrement'.
... @api_endpoint('Decrement')
... def decrement(self, amount):
... '''Decrement counter value by `amount`.'''
... self.value -= amount
... return self.value
"""
def maybe_decorated(func):
"""Apply API_ENDPOINT_DECORATORS to func."""
if decorate:
for decorator in API_ENDPOINT_DECORATORS:
func = decorator()(func)
return func
if callable(path_or_func):
path_or_func.api_path = path_or_func.__name__
return maybe_decorated(path_or_func)
else:
def _api_endpoint(func):
"""Decorator inner."""
if path_or_func is None:
func.api_path = func.__name__
else:
func.api_path = path_or_func
return maybe_decorated(func)
return _api_endpoint
|
python
|
{
"resource": ""
}
|
q277223
|
api_endpoints
|
test
|
def api_endpoints(obj):
"""Iterator over all API endpoint names and callbacks."""
for name in dir(obj):
attr = getattr(obj, name)
api_path = getattr(attr, 'api_path', None)
if api_path:
yield (
'%s%s' % (obj.api_path_prefix, api_path),
attr,
)
for api_provider in obj.api_providers:
for api_path, attr in api_endpoints(api_provider):
yield (api_path, attr)
|
python
|
{
"resource": ""
}
|
q277224
|
APIMixin.clear_api_path_map_cache
|
test
|
def clear_api_path_map_cache(self):
"""Clear out cache for api_path_map."""
self._api_path_cache = None
for api_provider in self.api_providers:
if six.get_method_self(
api_provider.clear_api_path_map_cache,
) is not None:
api_provider.clear_api_path_map_cache()
|
python
|
{
"resource": ""
}
|
q277225
|
dprint
|
test
|
def dprint(name, val):
"""Debug print name and val."""
from pprint import pformat
print(
'% 5s: %s' % (
name,
'\n '.join(
pformat(
val, indent=4, width=75,
).split('\n')
),
),
)
|
python
|
{
"resource": ""
}
|
q277226
|
validate_kwargs
|
test
|
def validate_kwargs(func, kwargs):
"""Validate arguments to be supplied to func."""
func_name = func.__name__
argspec = inspect.getargspec(func)
all_args = argspec.args[:]
defaults = list(argspec.defaults or [])
# ignore implicit 'self' argument
if inspect.ismethod(func) and all_args[:1] == ['self']:
all_args[:1] = []
# don't require arguments that have defaults
if defaults:
required = all_args[:-len(defaults)]
else:
required = all_args[:]
# translate 'foo_' to avoid reserved names like 'id'
trans = {
arg: arg.endswith('_') and arg[:-1] or arg
for arg
in all_args
}
for key in list(kwargs):
key_adj = '%s_' % key
if key_adj in all_args:
kwargs[key_adj] = kwargs.pop(key)
# figure out what we're missing
supplied = sorted(kwargs)
missing = [
trans.get(arg, arg) for arg in required
if arg not in supplied
]
if missing:
raise MeteorError(
400,
func.err,
'Missing required arguments to %s: %s' % (
func_name,
' '.join(missing),
),
)
# figure out what is extra
extra = [
arg for arg in supplied
if arg not in all_args
]
if extra:
raise MeteorError(
400,
func.err,
'Unknown arguments to %s: %s' % (func_name, ' '.join(extra)),
)
|
python
|
{
"resource": ""
}
|
q277227
|
DDPWebSocketApplication.on_open
|
test
|
def on_open(self):
"""Handle new websocket connection."""
this.request = WSGIRequest(self.ws.environ)
this.ws = self
this.send = self.send
this.reply = self.reply
self.logger = self.ws.logger
self.remote_ids = collections.defaultdict(set)
# `_tx_buffer` collects outgoing messages which must be sent in order
self._tx_buffer = {}
# track the head of the queue (buffer) and the next msg to be sent
self._tx_buffer_id_gen = itertools.cycle(irange(sys.maxint))
self._tx_next_id_gen = itertools.cycle(irange(sys.maxint))
# start by waiting for the very first message
self._tx_next_id = next(self._tx_next_id_gen)
this.remote_addr = self.remote_addr = \
'{0[REMOTE_ADDR]}:{0[REMOTE_PORT]}'.format(
self.ws.environ,
)
this.subs = {}
safe_call(self.logger.info, '+ %s OPEN', self)
self.send('o')
self.send('a["{\\"server_id\\":\\"0\\"}"]')
|
python
|
{
"resource": ""
}
|
q277228
|
DDPWebSocketApplication.on_close
|
test
|
def on_close(self, *args, **kwargs):
"""Handle closing of websocket connection."""
if self.connection is not None:
del self.pgworker.connections[self.connection.pk]
self.connection.delete()
self.connection = None
signals.request_finished.send(sender=self.__class__)
safe_call(self.logger.info, '- %s %s', self, args or 'CLOSE')
|
python
|
{
"resource": ""
}
|
q277229
|
DDPWebSocketApplication.on_message
|
test
|
def on_message(self, message):
"""Process a message received from remote."""
if self.ws.closed:
return None
try:
safe_call(self.logger.debug, '< %s %r', self, message)
# process individual messages
for data in self.ddp_frames_from_message(message):
self.process_ddp(data)
# emit request_finished signal to close DB connections
signals.request_finished.send(sender=self.__class__)
except geventwebsocket.WebSocketError:
self.ws.close()
|
python
|
{
"resource": ""
}
|
q277230
|
DDPWebSocketApplication.ddp_frames_from_message
|
test
|
def ddp_frames_from_message(self, message):
"""Yield DDP messages from a raw WebSocket message."""
# parse message set
try:
msgs = ejson.loads(message)
except ValueError:
self.reply(
'error', error=400, reason='Data is not valid EJSON',
)
raise StopIteration
if not isinstance(msgs, list):
self.reply(
'error', error=400, reason='Invalid EJSON messages',
)
raise StopIteration
# process individual messages
while msgs:
# pop raw message from the list
raw = msgs.pop(0)
# parse message payload
try:
data = ejson.loads(raw)
except (TypeError, ValueError):
data = None
if not isinstance(data, dict):
self.reply(
'error', error=400,
reason='Invalid SockJS DDP payload',
offendingMessage=raw,
)
yield data
if msgs:
# yield to other greenlets before processing next msg
gevent.sleep()
|
python
|
{
"resource": ""
}
|
q277231
|
DDPWebSocketApplication.process_ddp
|
test
|
def process_ddp(self, data):
"""Process a single DDP message."""
msg_id = data.get('id', None)
try:
msg = data.pop('msg')
except KeyError:
self.reply(
'error', reason='Bad request',
offendingMessage=data,
)
return
try:
# dispatch message
self.dispatch(msg, data)
except Exception as err: # pylint: disable=broad-except
# This should be the only protocol exception handler
kwargs = {
'msg': {'method': 'result'}.get(msg, 'error'),
}
if msg_id is not None:
kwargs['id'] = msg_id
if isinstance(err, MeteorError):
error = err.as_dict()
else:
error = {
'error': 500,
'reason': 'Internal server error',
}
if kwargs['msg'] == 'error':
kwargs.update(error)
else:
kwargs['error'] = error
if not isinstance(err, MeteorError):
# not a client error, should always be logged.
stack, _ = safe_call(
self.logger.error, '%r %r', msg, data, exc_info=1,
)
if stack is not None:
# something went wrong while logging the error, revert to
# writing a stack trace to stderr.
traceback.print_exc(file=sys.stderr)
sys.stderr.write(
'Additionally, while handling the above error the '
'following error was encountered:\n'
)
sys.stderr.write(stack)
elif settings.DEBUG:
print('ERROR: %s' % err)
dprint('msg', msg)
dprint('data', data)
error.setdefault('details', traceback.format_exc())
# print stack trace for client errors when DEBUG is True.
print(error['details'])
self.reply(**kwargs)
if msg_id and msg == 'method':
self.reply('updated', methods=[msg_id])
|
python
|
{
"resource": ""
}
|
q277232
|
DDPWebSocketApplication.dispatch
|
test
|
def dispatch(self, msg, kwargs):
"""Dispatch msg to appropriate recv_foo handler."""
# enforce calling 'connect' first
if self.connection is None and msg != 'connect':
self.reply('error', reason='Must connect first')
return
if msg == 'method':
if (
'method' not in kwargs
) or (
'id' not in kwargs
):
self.reply(
'error', error=400, reason='Malformed method invocation',
)
return
# lookup method handler
try:
handler = getattr(self, 'recv_%s' % msg)
except (AttributeError, UnicodeEncodeError):
raise MeteorError(404, 'Method not found')
# validate handler arguments
validate_kwargs(handler, kwargs)
# dispatch to handler
handler(**kwargs)
|
python
|
{
"resource": ""
}
|
q277233
|
DDPWebSocketApplication.recv_connect
|
test
|
def recv_connect(self, version=None, support=None, session=None):
"""DDP connect handler."""
del session # Meteor doesn't even use this!
if self.connection is not None:
raise MeteorError(
400, 'Session already established.',
self.connection.connection_id,
)
elif None in (version, support) or version not in self.versions:
self.reply('failed', version=self.versions[0])
elif version not in support:
raise MeteorError(400, 'Client version/support mismatch.')
else:
from dddp.models import Connection
cur = connection.cursor()
cur.execute('SELECT pg_backend_pid()')
(backend_pid,) = cur.fetchone()
this.version = version
this.support = support
self.connection = Connection.objects.create(
server_addr='%d:%s' % (
backend_pid,
self.ws.handler.socket.getsockname(),
),
remote_addr=self.remote_addr,
version=version,
)
self.pgworker.connections[self.connection.pk] = self
atexit.register(self.on_close, 'Shutting down.')
self.reply('connected', session=self.connection.connection_id)
|
python
|
{
"resource": ""
}
|
q277234
|
DDPWebSocketApplication.recv_ping
|
test
|
def recv_ping(self, id_=None):
"""DDP ping handler."""
if id_ is None:
self.reply('pong')
else:
self.reply('pong', id=id_)
|
python
|
{
"resource": ""
}
|
q277235
|
DDPWebSocketApplication.recv_sub
|
test
|
def recv_sub(self, id_, name, params):
"""DDP sub handler."""
self.api.sub(id_, name, *params)
|
python
|
{
"resource": ""
}
|
q277236
|
DDPWebSocketApplication.recv_unsub
|
test
|
def recv_unsub(self, id_=None):
"""DDP unsub handler."""
if id_:
self.api.unsub(id_)
else:
self.reply('nosub')
|
python
|
{
"resource": ""
}
|
q277237
|
DDPWebSocketApplication.recv_method
|
test
|
def recv_method(self, method, params, id_, randomSeed=None):
"""DDP method handler."""
if randomSeed is not None:
this.random_streams.random_seed = randomSeed
this.alea_random = alea.Alea(randomSeed)
self.api.method(method, params, id_)
self.reply('updated', methods=[id_])
|
python
|
{
"resource": ""
}
|
q277238
|
ddpp_sockjs_info
|
test
|
def ddpp_sockjs_info(environ, start_response):
"""Inform client that WebSocket service is available."""
import random
import ejson
start_response(
'200 OK',
[
('Content-Type', 'application/json; charset=UTF-8'),
] + common_headers(environ),
)
yield ejson.dumps(collections.OrderedDict([
('websocket', True),
('origins', [
'*:*',
]),
('cookie_needed', False),
('entropy', random.getrandbits(32)),
]))
|
python
|
{
"resource": ""
}
|
q277239
|
serve
|
test
|
def serve(listen, verbosity=1, debug_port=0, **ssl_args):
"""Spawn greenlets for handling websockets and PostgreSQL calls."""
launcher = DDPLauncher(debug=verbosity == 3, verbosity=verbosity)
if debug_port:
launcher.servers.append(
launcher.get_backdoor_server('localhost:%d' % debug_port)
)
launcher.add_web_servers(listen, **ssl_args)
# die gracefully with SIGINT or SIGQUIT
sigmap = {
val: name
for name, val
in vars(signal).items()
if name.startswith('SIG')
}
def sighandler(signum=None, frame=None):
"""Signal handler"""
launcher.logger.info(
'Received signal %s in frame %r',
sigmap.get(signum, signum),
frame,
)
launcher.stop()
for signum in [signal.SIGINT, signal.SIGQUIT]:
gevent.signal(signum, sighandler)
launcher.run()
|
python
|
{
"resource": ""
}
|
q277240
|
main
|
test
|
def main():
"""Main entry point for `dddp` command."""
parser = argparse.ArgumentParser(description=__doc__)
django = parser.add_argument_group('Django Options')
django.add_argument(
'--verbosity', '-v', metavar='VERBOSITY', dest='verbosity', type=int,
default=1,
)
django.add_argument(
'--debug-port', metavar='DEBUG_PORT', dest='debug_port', type=int,
default=0,
)
django.add_argument(
'--settings', metavar='SETTINGS', dest='settings',
help="The Python path to a settings module, e.g. "
"\"myproject.settings.main\". If this isn't provided, the "
"DJANGO_SETTINGS_MODULE environment variable will be used.",
)
http = parser.add_argument_group('HTTP Options')
http.add_argument(
'listen', metavar='address[:port]', nargs='*', type=addr,
help='Listening address for HTTP(s) server.',
)
ssl = parser.add_argument_group('SSL Options')
ssl.add_argument('--ssl-version', metavar='SSL_VERSION', dest='ssl_version',
help="SSL version to use (see stdlib ssl module's) [3]",
choices=['1', '2', '3'], default='3')
ssl.add_argument('--certfile', metavar='FILE', dest='certfile',
help="SSL certificate file [None]")
ssl.add_argument('--ciphers', metavar='CIPHERS', dest='ciphers',
help="Ciphers to use (see stdlib ssl module's) [TLSv1]")
ssl.add_argument('--ca-certs', metavar='FILE', dest='ca_certs',
help="CA certificates file [None]")
ssl.add_argument('--keyfile', metavar='FILE', dest='keyfile',
help="SSL key file [None]")
namespace = parser.parse_args()
if namespace.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = namespace.settings
serve(
namespace.listen or [Addr('localhost', 8000)],
debug_port=namespace.debug_port,
keyfile=namespace.keyfile,
certfile=namespace.certfile,
verbosity=namespace.verbosity,
)
|
python
|
{
"resource": ""
}
|
q277241
|
DDPLauncher.print
|
test
|
def print(self, msg, *args, **kwargs):
"""Print formatted msg if verbosity set at 1 or above."""
if self.verbosity >= 1:
print(msg, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q277242
|
DDPLauncher.stop
|
test
|
def stop(self):
"""Stop all green threads."""
self.logger.debug('PostgresGreenlet stop')
self._stop_event.set()
# ask all threads to stop.
for server in self.servers + [DDPLauncher.pgworker]:
self.logger.debug('Stopping %s', server)
server.stop()
# wait for all threads to stop.
gevent.joinall(self.threads + [DDPLauncher.pgworker])
self.threads = []
|
python
|
{
"resource": ""
}
|
q277243
|
DDPLauncher.run
|
test
|
def run(self):
"""Run DDP greenlets."""
self.logger.debug('PostgresGreenlet run')
self.start()
self._stop_event.wait()
# wait for all threads to stop.
gevent.joinall(self.threads + [DDPLauncher.pgworker])
self.threads = []
|
python
|
{
"resource": ""
}
|
q277244
|
PostgresGreenlet._run
|
test
|
def _run(self): # pylint: disable=method-hidden
"""Spawn sub tasks, wait for stop signal."""
conn_params = self.connection.get_connection_params()
# See http://initd.org/psycopg/docs/module.html#psycopg2.connect and
# http://www.postgresql.org/docs/current/static/libpq-connect.html
# section 31.1.2 (Parameter Key Words) for details on available params.
conn_params.update(
async=True,
application_name='{} pid={} django-ddp'.format(
socket.gethostname(), # hostname
os.getpid(), # PID
)[:64], # 64 characters for default PostgreSQL build config
)
conn = None
while conn is None:
try:
conn = psycopg2.connect(**conn_params)
except psycopg2.OperationalError as err:
# Some variants of the psycopg2 driver for Django add extra
# params that aren't meant to be passed directly to
# `psycopg2.connect()` -- issue a warning and try again.
msg = ('%s' % err).strip()
msg_prefix = 'invalid connection option "'
if not msg.startswith(msg_prefix):
# *waves hand* this is not the errror you are looking for.
raise
key = msg[len(msg_prefix):-1]
self.logger.warning(
'Ignoring unknown settings.DATABASES[%r] option: %s=%r',
self.connection.alias,
key, conn_params.pop(key),
)
self.poll(conn) # wait for conneciton to start
import logging
logging.getLogger('dddp').info('=> Started PostgresGreenlet.')
cur = conn.cursor()
cur.execute('LISTEN "ddp";')
while not self._stop_event.is_set():
try:
self.select_greenlet = gevent.spawn(
gevent.select.select,
[conn], [], [], timeout=None,
)
self.select_greenlet.get()
except gevent.GreenletExit:
self._stop_event.set()
finally:
self.select_greenlet = None
self.poll(conn)
self.poll(conn)
cur.close()
self.poll(conn)
conn.close()
|
python
|
{
"resource": ""
}
|
q277245
|
PostgresGreenlet.poll
|
test
|
def poll(self, conn):
"""Poll DB socket and process async tasks."""
while 1:
state = conn.poll()
if state == psycopg2.extensions.POLL_OK:
while conn.notifies:
notify = conn.notifies.pop()
self.logger.info(
"Got NOTIFY (pid=%d, payload=%r)",
notify.pid, notify.payload,
)
# read the header and check seq/fin.
hdr, chunk = notify.payload.split('|', 1)
# print('RECEIVE: %s' % hdr)
header = ejson.loads(hdr)
uuid = header['uuid']
size, chunks = self.chunks.setdefault(uuid, [0, {}])
if header['fin']:
size = self.chunks[uuid][0] = header['seq']
# stash the chunk
chunks[header['seq']] = chunk
if len(chunks) != size:
# haven't got all the chunks yet
continue # process next NOTIFY in loop
# got the last chunk -> process it.
data = ''.join(
chunk for _, chunk in sorted(chunks.items())
)
del self.chunks[uuid] # don't forget to cleanup!
data = ejson.loads(data)
sender = data.pop('_sender', None)
tx_id = data.pop('_tx_id', None)
for connection_id in data.pop('_connection_ids'):
try:
websocket = self.connections[connection_id]
except KeyError:
continue # connection not in this process
if connection_id == sender:
websocket.send(data, tx_id=tx_id)
else:
websocket.send(data)
break
elif state == psycopg2.extensions.POLL_WRITE:
gevent.select.select([], [conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
gevent.select.select([conn.fileno()], [], [])
else:
self.logger.warn('POLL_ERR: %s', state)
|
python
|
{
"resource": ""
}
|
q277246
|
greenify
|
test
|
def greenify():
"""Patch threading and psycopg2 modules for green threads."""
# don't greenify twice.
if _GREEN:
return
_GREEN[True] = True
from gevent.monkey import patch_all, saved
if ('threading' in sys.modules) and ('threading' not in saved):
import warnings
warnings.warn('threading module loaded before patching!')
patch_all()
try:
# Use psycopg2 by default
import psycopg2
del psycopg2
except ImportError:
# Fallback to psycopg2cffi if required (eg: pypy)
from psycopg2cffi import compat
compat.register()
from psycogreen.gevent import patch_psycopg
patch_psycopg()
|
python
|
{
"resource": ""
}
|
q277247
|
meteor_random_id
|
test
|
def meteor_random_id(name=None, length=17):
"""Generate a new ID, optionally using namespace of given `name`."""
if name is None:
stream = THREAD_LOCAL.alea_random
else:
stream = THREAD_LOCAL.random_streams[name]
return stream.random_string(length, METEOR_ID_CHARS)
|
python
|
{
"resource": ""
}
|
q277248
|
autodiscover
|
test
|
def autodiscover():
"""Import all `ddp` submodules from `settings.INSTALLED_APPS`."""
from django.utils.module_loading import autodiscover_modules
from dddp.api import API
autodiscover_modules('ddp', register_to=API)
return API
|
python
|
{
"resource": ""
}
|
q277249
|
MeteorError.as_dict
|
test
|
def as_dict(self, **kwargs):
"""Return an error dict for self.args and kwargs."""
error, reason, details, err_kwargs = self.args
result = {
key: val
for key, val in {
'error': error, 'reason': reason, 'details': details,
}.items()
if val is not None
}
result.update(err_kwargs)
result.update(kwargs)
return result
|
python
|
{
"resource": ""
}
|
q277250
|
ThreadLocal.get
|
test
|
def get(self, name, factory, *factory_args, **factory_kwargs):
"""Get attribute, creating if required using specified factory."""
update_thread_local = getattr(factory, 'update_thread_local', True)
if (not update_thread_local) or (name not in self.__dict__):
obj = factory(*factory_args, **factory_kwargs)
if update_thread_local:
setattr(self, name, obj)
return obj
return getattr(self, name)
|
python
|
{
"resource": ""
}
|
q277251
|
DDPHandler.emit
|
test
|
def emit(self, record):
"""Emit a formatted log record via DDP."""
if getattr(this, 'subs', {}).get(LOGS_NAME, False):
self.format(record)
this.send({
'msg': ADDED,
'collection': LOGS_NAME,
'id': meteor_random_id('/collection/%s' % LOGS_NAME),
'fields': {
attr: {
# typecasting methods for specific attributes
'args': lambda args: [repr(arg) for arg in args],
'created': datetime.datetime.fromtimestamp,
'exc_info': stacklines_or_none,
}.get(
attr,
lambda val: val # default typecasting method
)(getattr(record, attr, None))
for attr in (
'args',
'asctime',
'created',
'exc_info',
'filename',
'funcName',
'levelname',
'levelno',
'lineno',
'module',
'msecs',
'message',
'name',
'pathname',
'process',
'processName',
'relativeCreated',
'thread',
'threadName',
)
},
})
|
python
|
{
"resource": ""
}
|
q277252
|
negotiation_middleware
|
test
|
def negotiation_middleware(
renderers=DEFAULTS['RENDERERS'],
negotiator=DEFAULTS['NEGOTIATOR'],
force_negotiation=DEFAULTS['FORCE_NEGOTIATION']
):
"""Middleware which selects a renderer for a given request then renders
a handler's data to a `aiohttp.web.Response`.
"""
@asyncio.coroutine
def factory(app, handler):
@asyncio.coroutine
def middleware(request):
content_type, renderer = negotiator(
request,
renderers,
force_negotiation,
)
request['selected_media_type'] = content_type
response = yield from handler(request)
if getattr(response, 'data', None):
# Render data with the selected renderer
if asyncio.iscoroutinefunction(renderer):
render_result = yield from renderer(request, response.data)
else:
render_result = renderer(request, response.data)
else:
render_result = response
if isinstance(render_result, web.Response):
return render_result
if getattr(response, 'data', None):
response.body = render_result
response.content_type = content_type
return response
return middleware
return factory
|
python
|
{
"resource": ""
}
|
q277253
|
add_route_context
|
test
|
def add_route_context(
app: web.Application, module=None, url_prefix: str=None, name_prefix: str=None
):
"""Context manager which yields a function for adding multiple routes from a given module.
Example:
.. code-block:: python
# myapp/articles/views.py
async def list_articles(request):
return web.Response(b'article list...')
async def create_article(request):
return web.Response(b'created article...')
.. code-block:: python
# myapp/app.py
from myapp.articles import views
with add_route_context(app, url_prefix='/api/', name_prefix='articles') as route:
route('GET', '/articles/', views.list_articles)
route('POST', '/articles/', views.create_article)
app.router['articles.list_articles'].url() # /api/articles/
If you prefer, you can also pass module and handler names as strings.
.. code-block:: python
with add_route_context(app, module='myapp.articles.views',
url_prefix='/api/', name_prefix='articles') as route:
route('GET', '/articles/', 'list_articles')
route('POST', '/articles/', 'create_article')
:param app: Application to add routes to.
:param module: Import path to module (str) or module object which contains the handlers.
:param url_prefix: Prefix to prepend to all route paths.
:param name_prefix: Prefix to prepend to all route names.
"""
if isinstance(module, (str, bytes)):
module = importlib.import_module(module)
def add_route(method, path, handler, name=None):
"""
:param str method: HTTP method.
:param str path: Path for the route.
:param handler: A handler function or a name of a handler function contained
in `module`.
:param str name: Name for the route. If `None`, defaults to the handler's
function name.
"""
if isinstance(handler, (str, bytes)):
if not module:
raise ValueError(
'Must pass module to add_route_context if passing handler name strings.'
)
name = name or handler
handler = getattr(module, handler)
else:
name = name or handler.__name__
path = make_path(path, url_prefix)
name = '.'.join((name_prefix, name)) if name_prefix else name
return app.router.add_route(method, path, handler, name=name)
yield add_route
|
python
|
{
"resource": ""
}
|
q277254
|
ResourceRouter.add_resource_object
|
test
|
def add_resource_object(self, path: str, resource, methods: tuple=tuple(), names: Mapping=None):
"""Add routes by an resource instance's methods.
:param path: route path. Should be started with slash (``'/'``).
:param resource: A "resource" instance. May be an instance of a plain object.
:param methods: Methods (strings) to register.
:param names: Dictionary of ``name`` overrides.
"""
names = names or {}
if methods:
method_names = methods
else:
method_names = self.HTTP_METHOD_NAMES
for method_name in method_names:
handler = getattr(resource, method_name, None)
if handler:
name = names.get(method_name, self.get_default_handler_name(resource, method_name))
self.add_route(method_name.upper(), path, handler, name=name)
|
python
|
{
"resource": ""
}
|
q277255
|
run
|
test
|
def run(app: web.Application, **kwargs):
"""Run an `aiohttp.web.Application` using gunicorn.
:param app: The app to run.
:param str app_uri: Import path to `app`. Takes the form
``$(MODULE_NAME):$(VARIABLE_NAME)``.
The module name can be a full dotted path.
The variable name refers to the `aiohttp.web.Application` instance.
This argument is required if ``reload=True``.
:param str host: Hostname to listen on.
:param int port: Port of the server.
:param bool reload: Whether to reload the server on a code change.
If not set, will take the same value as ``app.debug``.
**EXPERIMENTAL**.
:param \*\*kwargs: Extra configuration options to set on the
``GunicornApp's`` config object.
"""
runner = Runner(app, **kwargs)
runner.run()
|
python
|
{
"resource": ""
}
|
q277256
|
GCMDevice.send_message
|
test
|
def send_message(self, message, **kwargs):
"""
Sends a push notification to this device via GCM
"""
from ..libs.gcm import gcm_send_message
data = kwargs.pop("extra", {})
if message is not None:
data["message"] = message
return gcm_send_message(registration_id=self.registration_id,
data=data, **kwargs)
|
python
|
{
"resource": ""
}
|
q277257
|
apns_send_bulk_message
|
test
|
def apns_send_bulk_message(registration_ids, alert, **kwargs):
"""
Sends an APNS notification to one or more registration_ids.
The registration_ids argument needs to be a list.
Note that if set alert should always be a string. If it is not set,
it won't be included in the notification. You will need to pass None
to this for silent notifications.
"""
with closing(_apns_create_socket_to_push(**kwargs)) as socket:
for identifier, registration_id in enumerate(registration_ids):
_apns_send(registration_id, alert, identifier=identifier, socket=socket, **kwargs)
_apns_check_errors(socket)
|
python
|
{
"resource": ""
}
|
q277258
|
apns_fetch_inactive_ids
|
test
|
def apns_fetch_inactive_ids():
"""
Queries the APNS server for id's that are no longer active since
the last fetch
"""
with closing(_apns_create_socket_to_feedback()) as socket:
inactive_ids = []
for _, registration_id in _apns_receive_feedback(socket):
inactive_ids.append(codecs.encode(registration_id, 'hex_codec'))
return inactive_ids
|
python
|
{
"resource": ""
}
|
q277259
|
gcm_send_message
|
test
|
def gcm_send_message(registration_id, data, encoding='utf-8', **kwargs):
"""
Standalone method to send a single gcm notification
"""
messenger = GCMMessenger(registration_id, data, encoding=encoding, **kwargs)
return messenger.send_plain()
|
python
|
{
"resource": ""
}
|
q277260
|
gcm_send_bulk_message
|
test
|
def gcm_send_bulk_message(registration_ids, data, encoding='utf-8', **kwargs):
"""
Standalone method to send bulk gcm notifications
"""
messenger = GCMMessenger(registration_ids, data, encoding=encoding, **kwargs)
return messenger.send_bulk()
|
python
|
{
"resource": ""
}
|
q277261
|
GCMMessenger.send_json
|
test
|
def send_json(self, ids=None):
"""
Sends a json GCM message
"""
items = ids or self._registration_id
values = {"registration_ids": items}
if self._data is not None:
values["data"] = self._data
for key, val in self._kwargs.items():
if val:
values[key] = val
data = json.dumps(values, separators=(",", ":"), sort_keys=True).encode(
self.encoding)
result = json.loads(self._send(data, "application/json"))
if ("failure" in result) and (result["failure"]):
unregistered = []
throw_error = False
for index, error in enumerate(result.get("results", [])):
error = error.get("error", "")
if error in ("NotRegistered", "InvalidRegistration"):
unregistered.append(items[index])
elif error != "":
throw_error = True
self.deactivate_unregistered_devices(unregistered)
if throw_error:
raise GCMPushError(result)
return result
|
python
|
{
"resource": ""
}
|
q277262
|
GCMMessenger._send
|
test
|
def _send(self, data, content_type):
"""
Sends a GCM message with the given content type
"""
headers = {
"Content-Type": content_type,
"Authorization": "key=%s" % (self.api_key),
"Content-Length": str(len(data))
}
request = Request(self.api_url, data, headers)
return urlopen(request).read().decode(self.encoding)
|
python
|
{
"resource": ""
}
|
q277263
|
get_model
|
test
|
def get_model(module_location):
"""
Returns the instance of the given module location.
"""
if not isinstance(module_location, (str, unicode)):
raise ValueError("The value provided should either be a string or "\
"unicode instance. The value '%s' provided was %s "\
"rather." % (module_location, type(module_location)))
try:
name_split = module_location.split(".")
class_name = name_split.pop(-1)
if not len(name_split):
raise ValueError("The value should provide the module location "\
"joined by '.' e.g. for model named 'test' in "
"/app/module.py, The value should be 'app.module.test'")
module_location = ".".join(name_split)
module = importlib.import_module(module_location)
cls = getattr(module, class_name)
return cls
except AttributeError:
pass
|
python
|
{
"resource": ""
}
|
q277264
|
fast_forward_selection
|
test
|
def fast_forward_selection(scenarios, number_of_reduced_scenarios, probability=None):
"""Fast forward selection algorithm
Parameters
----------
scenarios : numpy.array
Contain the input scenarios.
The columns representing the individual scenarios
The rows are the vector of values in each scenario
number_of_reduced_scenarios : int
final number of scenarios that
the reduced scenarios contain.
If number of scenarios is equal to or greater than the input scenarios,
then the original input scenario set is returned as the reduced set
probability : numpy.array (default=None)
probability is a numpy.array with length equal to number of scenarios.
if probability is not defined, all scenarios get equal probabilities
Returns
-------
reduced_scenarios : numpy.array
reduced set of scenarios
reduced_probability : numpy.array
probability of reduced set of scenarios
reduced_scenario_set : list
scenario numbers of reduced set of scenarios
Example
-------
Scenario reduction can be performed as shown below::
>>> import numpy as np
>>> import random
>>> scenarios = np.array([[random.randint(500,1000) for i in range(0,24)],
>>> [random.randint(500,1000) for i in range(0,24)],
>>> [random.randint(500,1000) for i in range(0,24)],
>>> [random.randint(500,1000) for i in range(0,24)],
>>> [random.randint(500,1000) for i in range(0,24)],
>>> [random.randint(500,1000) for i in range(0,24)],
>>> [random.randint(500,1000) for i in range(0,24)],
>>> [random.randint(500,1000) for i in range(0,24)],
>>> [random.randint(500,1000) for i in range(0,24)],
>>> [random.randint(500,1000) for i in range(0,24)]])
>>> import psst.scenario
>>> reduced_scenarios, reduced_probability, reduced_scenario_numbers = psst.scenario.fast_forward_selection(scenarios, probability, 2)
"""
print("Running fast forward selection algorithm")
number_of_scenarios = scenarios.shape[1]
logger.debug("Input number of scenarios = %d", number_of_scenarios)
# if probability is not defined assign equal probability to all scenarios
if probability is None:
probability = np.array([1/number_of_scenarios for i in range(0, number_of_scenarios)])
# initialize z, c and J
z = np.array([np.inf for i in range(0, number_of_scenarios)])
c = np.zeros((number_of_scenarios, number_of_scenarios))
J = range(0, number_of_scenarios)
# no reduction necessary
if number_of_reduced_scenarios >= number_of_scenarios:
return(scenarios, probability, J)
for scenario_k in range(0, number_of_scenarios):
for scenario_u in range(0, number_of_scenarios):
c[scenario_k, scenario_u] = distance(scenarios[:, scenario_k], scenarios[:, scenario_u])
for scenario_u in range(0, number_of_scenarios):
summation = 0
for scenario_k in range(0, number_of_scenarios):
if scenario_k != scenario_u:
summation = summation + probability[scenario_k]*c[scenario_k, scenario_u]
z[scenario_u] = summation
U = [np.argmin(z)]
for u in U:
J.remove(u)
for _ in range(0, number_of_scenarios - number_of_reduced_scenarios - 1):
print("Running {}".format(_))
for scenario_u in J:
for scenario_k in J:
lowest_value = np.inf
for scenario_number in U:
lowest_value = min(c[scenario_k, scenario_u], c[scenario_k, scenario_number])
c[scenario_k, scenario_u] = lowest_value
for scenario_u in J:
summation = 0
for scenario_k in J:
if scenario_k not in U:
summation = summation + probability[scenario_k]*c[scenario_k, scenario_u]
z[scenario_u] = summation
u_i = np.argmin([item if i in J else np.inf for i, item in enumerate(z)])
J.remove(u_i)
U.append(u_i)
reduced_scenario_set = U
reduced_probability = []
reduced_probability = copy.deepcopy(probability)
for deleted_scenario_number in J:
lowest_value = np.inf
# find closest scenario_number
for scenario_j in reduced_scenario_set:
if c[deleted_scenario_number, scenario_j] < lowest_value:
closest_scenario_number = scenario_j
lowest_value = c[deleted_scenario_number, scenario_j]
reduced_probability[closest_scenario_number] = reduced_probability[closest_scenario_number] + reduced_probability[deleted_scenario_number]
reduced_scenarios = copy.deepcopy(scenarios[:, reduced_scenario_set])
reduced_probability = reduced_probability[reduced_scenario_set]
return reduced_scenarios, reduced_probability, reduced_scenario_set
|
python
|
{
"resource": ""
}
|
q277265
|
search
|
test
|
def search(term=None, phrase=None, limit=DEFAULT_SEARCH_LIMIT,
api_key=GIPHY_PUBLIC_KEY, strict=False, rating=None):
"""
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the search method. Note that this will return a generator
"""
return Giphy(api_key=api_key, strict=strict).search(
term=term, phrase=phrase, limit=limit, rating=rating)
|
python
|
{
"resource": ""
}
|
q277266
|
translate
|
test
|
def translate(term=None, phrase=None, api_key=GIPHY_PUBLIC_KEY, strict=False,
rating=None):
"""
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the translate method.
"""
return Giphy(api_key=api_key, strict=strict).translate(
term=term, phrase=phrase, rating=rating)
|
python
|
{
"resource": ""
}
|
q277267
|
trending
|
test
|
def trending(limit=DEFAULT_SEARCH_LIMIT, api_key=GIPHY_PUBLIC_KEY,
strict=False, rating=None):
"""
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the trending method. Note that this will return
a generator
"""
return Giphy(api_key=api_key, strict=strict).trending(
limit=limit, rating=rating)
|
python
|
{
"resource": ""
}
|
q277268
|
gif
|
test
|
def gif(gif_id, api_key=GIPHY_PUBLIC_KEY, strict=False):
"""
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the gif method.
"""
return Giphy(api_key=api_key, strict=strict).gif(gif_id)
|
python
|
{
"resource": ""
}
|
q277269
|
screensaver
|
test
|
def screensaver(tag=None, api_key=GIPHY_PUBLIC_KEY, strict=False):
"""
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the screensaver method.
"""
return Giphy(api_key=api_key, strict=strict).screensaver(tag=tag)
|
python
|
{
"resource": ""
}
|
q277270
|
upload
|
test
|
def upload(tags, file_path, username=None, api_key=GIPHY_PUBLIC_KEY,
strict=False):
"""
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the upload method.
"""
return Giphy(api_key=api_key, strict=strict).upload(
tags, file_path, username)
|
python
|
{
"resource": ""
}
|
q277271
|
GiphyImage._normalized
|
test
|
def _normalized(self, data):
"""
Does a normalization of sorts on image type data so that values
that should be integers are converted from strings
"""
int_keys = ('frames', 'width', 'height', 'size')
for key in int_keys:
if key not in data:
continue
try:
data[key] = int(data[key])
except ValueError:
pass # Ignored
return data
|
python
|
{
"resource": ""
}
|
q277272
|
Giphy._fetch
|
test
|
def _fetch(self, endpoint_name, **params):
"""
Wrapper for making an api request from giphy
"""
params['api_key'] = self.api_key
resp = requests.get(self._endpoint(endpoint_name), params=params)
resp.raise_for_status()
data = resp.json()
self._check_or_raise(data.get('meta', {}))
return data
|
python
|
{
"resource": ""
}
|
q277273
|
Giphy.translate
|
test
|
def translate(self, term=None, phrase=None, strict=False, rating=None):
"""
Retrieve a single image that represents a transalation of a term or
phrase into an animated gif. Punctuation is ignored. By default, this
will perform a `term` translation. If you want to translate by phrase,
use the `phrase` keyword argument.
:param term: Search term or terms
:type term: string
:param phrase: Search phrase
:type phrase: string
:param strict: Whether an exception should be raised when no results
:type strict: boolean
:param rating: limit results to those rated (y,g, pg, pg-13 or r).
:type rating: string
"""
assert any((term, phrase)), 'You must supply a term or phrase to search'
# Phrases should have dashes and not spaces
if phrase:
phrase = phrase.replace(' ', '-')
params = {'s': (term or phrase)}
if rating:
params.update({'rating': rating})
resp = self._fetch('translate', **params)
if resp['data']:
return GiphyImage(resp['data'])
elif strict or self.strict:
raise GiphyApiException(
"Term/Phrase '%s' could not be translated into a GIF" %
(term or phrase))
|
python
|
{
"resource": ""
}
|
q277274
|
Giphy.trending
|
test
|
def trending(self, rating=None, limit=DEFAULT_SEARCH_LIMIT):
"""
Retrieve GIFs currently trending online. The data returned mirrors
that used to create The Hot 100 list of GIFs on Giphy.
:param rating: limit results to those rated (y,g, pg, pg-13 or r).
:type rating: string
:param limit: Maximum number of results to yield
:type limit: int
"""
results_yielded = 0 # Count how many things we yield
page, per_page = 0, 25
params = {'rating': rating} if rating else {}
fetch = partial(self._fetch, 'trending', **params)
# Generate results until we 1) run out of pages 2) reach a limit
while True:
data = fetch(offset=page, limit=per_page)
page += per_page
# Guard for empty results
if not data['data']:
raise StopIteration
for item in data['data']:
results_yielded += 1
yield GiphyImage(item)
if limit is not None and results_yielded >= limit:
raise StopIteration
# Check yieled limit and whether or not there are more items
if (page >= data['pagination']['total_count'] or
(limit is not None and results_yielded >= limit)):
raise StopIteration
|
python
|
{
"resource": ""
}
|
q277275
|
Giphy.gif
|
test
|
def gif(self, gif_id, strict=False):
"""
Retrieves a specifc gif from giphy based on unique id
:param gif_id: Unique giphy gif ID
:type gif_id: string
:param strict: Whether an exception should be raised when no results
:type strict: boolean
"""
resp = self._fetch(gif_id)
if resp['data']:
return GiphyImage(resp['data'])
elif strict or self.strict:
raise GiphyApiException(
"GIF with ID '%s' could not be found" % gif_id)
|
python
|
{
"resource": ""
}
|
q277276
|
Giphy.upload
|
test
|
def upload(self, tags, file_path, username=None):
"""
Uploads a gif from the filesystem to Giphy.
:param tags: Tags to apply to the uploaded image
:type tags: list
:param file_path: Path at which the image can be found
:type file_path: string
:param username: Your channel username if not using public API key
"""
params = {
'api_key': self.api_key,
'tags': ','.join(tags)
}
if username is not None:
params['username'] = username
with open(file_path, 'rb') as f:
resp = requests.post(
GIPHY_UPLOAD_ENDPOINT, params=params, files={'file': f})
resp.raise_for_status()
data = resp.json()
self._check_or_raise(data.get('meta', {}))
return self.gif(data['data']['id'])
|
python
|
{
"resource": ""
}
|
q277277
|
Api._access_control
|
test
|
def _access_control(self, access_control, my_media_group=None):
"""
Prepares the extension element for access control
Extension element is the optional parameter for the YouTubeVideoEntry
We use extension element to modify access control settings
Returns:
tuple of extension elements
"""
# Access control
extension = None
if access_control is AccessControl.Private:
# WARNING: this part of code is not tested
# set video as private
if my_media_group:
my_media_group.private = gdata.media.Private()
elif access_control is AccessControl.Unlisted:
# set video as unlisted
from gdata.media import YOUTUBE_NAMESPACE
from atom import ExtensionElement
kwargs = {
"namespace": YOUTUBE_NAMESPACE,
"attributes": {'action': 'list', 'permission': 'denied'},
}
extension = ([ExtensionElement('accessControl', **kwargs)])
return extension
|
python
|
{
"resource": ""
}
|
q277278
|
Api.authenticate
|
test
|
def authenticate(self, email=None, password=None, source=None):
"""
Authenticates the user and sets the GData Auth token.
All params are optional, if not set, we will use the ones on the settings, if no settings found, raises AttributeError
params are email, password and source. Source is the app id
Raises:
gdata.service.exceptions.BadAuthentication
"""
from gdata.service import BadAuthentication
# Auth parameters
Api.yt_service.email = email if email else settings.YOUTUBE_AUTH_EMAIL
Api.yt_service.password = password if password else settings.YOUTUBE_AUTH_PASSWORD
Api.yt_service.source = source if source else settings.YOUTUBE_CLIENT_ID
try:
Api.yt_service.ProgrammaticLogin()
self.authenticated = True
except BadAuthentication:
raise ApiError(_("Incorrect username or password"))
|
python
|
{
"resource": ""
}
|
q277279
|
Api.upload
|
test
|
def upload(self, title, description="", keywords="", developer_tags=None, access_control=AccessControl.Public):
"""
Browser based upload
Creates the video entry and meta data to initiate a browser upload
Authentication is needed
Params:
title: string
description: string
keywords: comma seperated string
developer_tags: tuple
Return:
dict contains post_url and youtube_token. i.e { 'post_url': post_url, 'youtube_token': youtube_token }
Raises:
ApiError: on no authentication
"""
# Raise ApiError if not authenticated
if not self.authenticated:
raise ApiError(_("Authentication is required"))
# create media group
my_media_group = gdata.media.Group(
title=gdata.media.Title(text=title),
description=gdata.media.Description(description_type='plain',
text=description),
keywords=gdata.media.Keywords(text=keywords),
category=[gdata.media.Category(
text='Autos',
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label='Autos')],
#player = None
)
# Access Control
extension = self._access_control(access_control, my_media_group)
# create video entry
video_entry = gdata.youtube.YouTubeVideoEntry(
media=my_media_group, extension_elements=extension)
# add developer tags
if developer_tags:
video_entry.AddDeveloperTags(developer_tags)
# upload meta data only
response = Api.yt_service.GetFormUploadToken(video_entry)
# parse response tuple and use the variables to build a form
post_url = response[0]
youtube_token = response[1]
return {'post_url': post_url, 'youtube_token': youtube_token}
|
python
|
{
"resource": ""
}
|
q277280
|
Api.check_upload_status
|
test
|
def check_upload_status(self, video_id):
"""
Checks the video upload status
Newly uploaded videos may be in the processing state
Authentication is required
Returns:
True if video is available
otherwise a dict containes upload_state and detailed message
i.e. {"upload_state": "processing", "detailed_message": ""}
"""
# Raise ApiError if not authenticated
if not self.authenticated:
raise ApiError(_("Authentication is required"))
entry = self.fetch_video(video_id)
upload_status = Api.yt_service.CheckUploadStatus(entry)
if upload_status is not None:
video_upload_state = upload_status[0]
detailed_message = upload_status[1]
return {"upload_state": video_upload_state, "detailed_message": detailed_message}
else:
return True
|
python
|
{
"resource": ""
}
|
q277281
|
Api.update_video
|
test
|
def update_video(self, video_id, title="", description="", keywords="", access_control=AccessControl.Unlisted):
"""
Updates the video
Authentication is required
Params:
entry: video entry fetch via 'fetch_video()'
title: string
description: string
keywords: string
Returns:
a video entry on success
None otherwise
"""
# Raise ApiError if not authenticated
if not self.authenticated:
raise ApiError(_("Authentication is required"))
entry = self.fetch_video(video_id)
# Set Access Control
extension = self._access_control(access_control)
if extension:
entry.extension_elements = extension
if title:
entry.media.title.text = title
if description:
entry.media.description.text = description
#if keywords:
# entry.media.keywords.text = keywords
success = Api.yt_service.UpdateVideoEntry(entry)
return success
|
python
|
{
"resource": ""
}
|
q277282
|
Api.delete_video
|
test
|
def delete_video(self, video_id):
"""
Deletes the video
Authentication is required
Params:
entry: video entry fetch via 'fetch_video()'
Return:
True if successful
Raise:
OperationError: on unsuccessful deletion
"""
# Raise ApiError if not authenticated
if not self.authenticated:
raise ApiError(_("Authentication is required"))
entry = self.fetch_video(video_id)
response = Api.yt_service.DeleteVideoEntry(entry)
if not response:
raise OperationError(_("Cannot be deleted from Youtube"))
return True
|
python
|
{
"resource": ""
}
|
q277283
|
check_video_availability
|
test
|
def check_video_availability(request, video_id):
"""
Controls the availability of the video. Newly uploaded videos are in processing stage.
And others might be rejected.
Returns:
json response
"""
# Check video availability
# Available states are: processing
api = Api()
api.authenticate()
availability = api.check_upload_status(video_id)
if availability is not True:
data = {'success': False}
else:
data = {'success': True}
return HttpResponse(json.dumps(data), content_type="application/json")
|
python
|
{
"resource": ""
}
|
q277284
|
video
|
test
|
def video(request, video_id):
"""
Displays a video in an embed player
"""
# Check video availability
# Available states are: processing
api = Api()
api.authenticate()
availability = api.check_upload_status(video_id)
if availability is not True:
# Video is not available
video = Video.objects.filter(video_id=video_id).get()
state = availability["upload_state"]
# Add additional states here. I'm not sure what states are available
if state == "failed" or state == "rejected":
return render_to_response(
"django_youtube/video_failed.html",
{"video": video, "video_id": video_id, "message":
_("Invalid video."), "availability": availability},
context_instance=RequestContext(request)
)
else:
return render_to_response(
"django_youtube/video_unavailable.html",
{"video": video, "video_id": video_id,
"message": _("This video is currently being processed"), "availability": availability},
context_instance=RequestContext(request)
)
video_params = _video_params(request, video_id)
return render_to_response(
"django_youtube/video.html",
video_params,
context_instance=RequestContext(request)
)
|
python
|
{
"resource": ""
}
|
q277285
|
video_list
|
test
|
def video_list(request, username=None):
"""
list of videos of a user
if username does not set, shows the currently logged in user
"""
# If user is not authenticated and username is None, raise an error
if username is None and not request.user.is_authenticated():
from django.http import Http404
raise Http404
from django.contrib.auth.models import User
user = User.objects.get(username=username) if username else request.user
# loop through the videos of the user
videos = Video.objects.filter(user=user).all()
video_params = []
for video in videos:
video_params.append(_video_params(request, video.video_id))
return render_to_response(
"django_youtube/videos.html",
{"video_params": video_params},
context_instance=RequestContext(request)
)
|
python
|
{
"resource": ""
}
|
q277286
|
direct_upload
|
test
|
def direct_upload(request):
"""
direct upload method
starts with uploading video to our server
then sends the video file to youtube
param:
(optional) `only_data`: if set, a json response is returns i.e. {'video_id':'124weg'}
return:
if `only_data` set, a json object.
otherwise redirects to the video display page
"""
if request.method == "POST":
try:
form = YoutubeDirectUploadForm(request.POST, request.FILES)
# upload the file to our server
if form.is_valid():
uploaded_video = form.save()
# send this file to youtube
api = Api()
api.authenticate()
video_entry = api.upload_direct(uploaded_video.file_on_server.path, "Uploaded video from zuqqa")
# get data from video entry
swf_url = video_entry.GetSwfUrl()
youtube_url = video_entry.id.text
# getting video_id is tricky, I can only reach the url which
# contains the video_id.
# so the only option is to parse the id element
# https://groups.google.com/forum/?fromgroups=#!topic/youtube-api-gdata/RRl_h4zuKDQ
url_parts = youtube_url.split("/")
url_parts.reverse()
video_id = url_parts[0]
# save video_id to video instance
video = Video()
video.user = request.user
video.video_id = video_id
video.title = 'tmp video'
video.youtube_url = youtube_url
video.swf_url = swf_url
video.save()
# send a signal
video_created.send(sender=video, video=video)
# delete the uploaded video instance
uploaded_video.delete()
# return the response
return_only_data = request.GET.get('only_data')
if return_only_data:
return HttpResponse(json.dumps({"video_id": video_id}), content_type="application/json")
else:
# Redirect to the video page or the specified page
try:
next_url = settings.YOUTUBE_UPLOAD_REDIRECT_URL
except AttributeError:
next_url = reverse(
"django_youtube.views.video", kwargs={"video_id": video_id})
return HttpResponseRedirect(next_url)
except:
import sys
logger.error("Unexpected error: %s - %s" % (sys.exc_info()[
0], sys.exc_info()[1]))
# @todo: proper error management
return HttpResponse("error happened")
form = YoutubeDirectUploadForm()
if return_only_data:
return HttpResponse(json.dumps({"error": 500}), content_type="application/json")
else:
return render_to_response(
"django_youtube/direct-upload.html",
{"form": form},
context_instance=RequestContext(request)
)
|
python
|
{
"resource": ""
}
|
q277287
|
upload
|
test
|
def upload(request):
"""
Displays an upload form
Creates upload url and token from youtube api and uses them on the form
"""
# Get the optional parameters
title = request.GET.get("title", "%s's video on %s" % (
request.user.username, request.get_host()))
description = request.GET.get("description", "")
keywords = request.GET.get("keywords", "")
# Try to create post_url and token to create an upload form
try:
api = Api()
# upload method needs authentication
api.authenticate()
# Customize following line to your needs, you can add description, keywords or developer_keys
# I prefer to update video information after upload finishes
data = api.upload(title, description=description, keywords=keywords,
access_control=AccessControl.Unlisted)
except ApiError as e:
# An api error happened, redirect to homepage
messages.add_message(request, messages.ERROR, e.message)
return HttpResponseRedirect("/")
except:
# An error happened, redirect to homepage
messages.add_message(request, messages.ERROR, _(
'An error occurred during the upload, Please try again.'))
return HttpResponseRedirect("/")
# Create the form instance
form = YoutubeUploadForm(initial={"token": data["youtube_token"]})
protocol = 'https' if request.is_secure() else 'http'
next_url = '%s://%s%s/' % (protocol, request.get_host(), reverse("django_youtube.views.upload_return"))
return render_to_response(
"django_youtube/upload.html",
{"form": form, "post_url": data["post_url"], "next_url": next_url},
context_instance=RequestContext(request)
)
|
python
|
{
"resource": ""
}
|
q277288
|
upload_return
|
test
|
def upload_return(request):
"""
The upload result page
Youtube will redirect to this page after upload is finished
Saves the video data and redirects to the next page
Params:
status: status of the upload (200 for success)
id: id number of the video
"""
status = request.GET.get("status")
video_id = request.GET.get("id")
if status == "200" and video_id:
# upload is successful
# save the video entry
video = Video()
video.user = request.user
video.video_id = video_id
video.save()
# send a signal
video_created.send(sender=video, video=video)
# Redirect to the video page or the specified page
try:
next_url = settings.YOUTUBE_UPLOAD_REDIRECT_URL
except AttributeError:
next_url = reverse(
"django_youtube.views.video", kwargs={"video_id": video_id})
return HttpResponseRedirect(next_url)
else:
# upload failed, redirect to upload page
from django.contrib import messages
messages.add_message(
request, messages.ERROR, _('Upload failed, Please try again.'))
return HttpResponseRedirect(reverse("django_youtube.views.upload"))
|
python
|
{
"resource": ""
}
|
q277289
|
remove
|
test
|
def remove(request, video_id):
"""
Removes the video from youtube and from db
Requires POST
"""
# prepare redirection url
try:
next_url = settings.YOUTUBE_DELETE_REDIRECT_URL
except AttributeError:
next_url = reverse("django_youtube.views.upload")
# Remove from db
try:
Video.objects.get(video_id=video_id).delete()
except:
from django.contrib import messages
messages.add_message(
request, messages.ERROR, _('Video could not be deleted.'))
# Return to upload page or specified page
return HttpResponseRedirect(next_url)
|
python
|
{
"resource": ""
}
|
q277290
|
Video.entry
|
test
|
def entry(self):
"""
Connects to Youtube Api and retrieves the video entry object
Return:
gdata.youtube.YouTubeVideoEntry
"""
api = Api()
api.authenticate()
return api.fetch_video(self.video_id)
|
python
|
{
"resource": ""
}
|
q277291
|
Video.save
|
test
|
def save(self, *args, **kwargs):
"""
Syncronize the video information on db with the video on Youtube
The reason that I didn't use signals is to avoid saving the video instance twice.
"""
# if this is a new instance add details from api
if not self.id:
# Connect to api and get the details
entry = self.entry()
# Set the details
self.title = entry.media.title.text
self.description = entry.media.description.text
self.keywords = entry.media.keywords.text
self.youtube_url = entry.media.player.url
self.swf_url = entry.GetSwfUrl()
if entry.media.private:
self.access_control = AccessControl.Private
else:
self.access_control = AccessControl.Public
# Save the instance
super(Video, self).save(*args, **kwargs)
# show thumbnails
for thumbnail in entry.media.thumbnail:
t = Thumbnail()
t.url = thumbnail.url
t.video = self
t.save()
else:
# updating the video instance
# Connect to API and update video on youtube
api = Api()
# update method needs authentication
api.authenticate()
# Update the info on youtube, raise error on failure
api.update_video(self.video_id, self.title, self.description,
self.keywords, self.access_control)
# Save the model
return super(Video, self).save(*args, **kwargs)
|
python
|
{
"resource": ""
}
|
q277292
|
Video.delete
|
test
|
def delete(self, *args, **kwargs):
"""
Deletes the video from youtube
Raises:
OperationError
"""
api = Api()
# Authentication is required for deletion
api.authenticate()
# Send API request, raises OperationError on unsuccessful deletion
api.delete_video(self.video_id)
# Call the super method
return super(Video, self).delete(*args, **kwargs)
|
python
|
{
"resource": ""
}
|
q277293
|
Metadata.update_metadata
|
test
|
def update_metadata(self, params):
""" Generic method for a resource's Update Metadata endpoint.
Example endpoints:
* `Update Device Metadata <https://m2x.att.com/developer/documentation/v2/device#Update-Device-Metadata>`_
* `Update Distribution Metadata <https://m2x.att.com/developer/documentation/v2/distribution#Update-Distribution-Metadata>`_
* `Update Collection Metadata <https://m2x.att.com/developer/documentation/v2/collections#Update-Collection-Metadata>`_
:param params: The metadata being updated
:return: The API response, see M2X API docs for details
:rtype: dict
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
return self.api.put(self.metadata_path(), data=params)
|
python
|
{
"resource": ""
}
|
q277294
|
Metadata.update_metadata_field
|
test
|
def update_metadata_field(self, field, value):
""" Generic method for a resource's Update Metadata Field endpoint.
Example endpoints:
* `Update Device Metadata Field <https://m2x.att.com/developer/documentation/v2/device#Update-Device-Metadata-Field>`_
* `Update Distribution Metadata Field <https://m2x.att.com/developer/documentation/v2/distribution#Update-Distribution-Metadata-Field>`_
* `Update Collection Metadata Field <https://m2x.att.com/developer/documentation/v2/collections#Update-Collection-Metadata-Field>`_
:param field: The metadata field to be updated
:param value: The value to update
:return: The API response, see M2X API docs for details
:rtype: dict
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
return self.api.put(self.metadata_field_path(field), data={ "value": value })
|
python
|
{
"resource": ""
}
|
q277295
|
Resource.update
|
test
|
def update(self, **attrs):
""" Generic method for a resource's Update endpoint.
Example endpoints:
* `Update Device Details <https://m2x.att.com/developer/documentation/v2/device#Update-Device-Details>`_
* `Update Distribution Details <https://m2x.att.com/developer/documentation/v2/distribution#Update-Distribution-Details>`_
* `Update Collection Details <https://m2x.att.com/developer/documentation/v2/collections#Update-Collection-Details>`_
:param attrs: Query parameters passed as keyword arguments. View M2X API Docs for listing of available parameters.
:return: The API response, see M2X API docs for details
:rtype: dict
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
self.data.update(self.item_update(self.api, self.id, **attrs))
return self.data
|
python
|
{
"resource": ""
}
|
q277296
|
loads
|
test
|
def loads(s, strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted string.
:param s: Newick formatted string.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return [parse_node(ss.strip(), **kw) for ss in s.split(';') if ss.strip()]
|
python
|
{
"resource": ""
}
|
q277297
|
dumps
|
test
|
def dumps(trees):
"""
Serialize a list of trees in Newick format.
:param trees: List of Node objects or a single Node object.
:return: Newick formatted string.
"""
if isinstance(trees, Node):
trees = [trees]
return ';\n'.join([tree.newick for tree in trees]) + ';'
|
python
|
{
"resource": ""
}
|
q277298
|
load
|
test
|
def load(fp, strip_comments=False, **kw):
"""
Load a list of trees from an open Newick formatted file.
:param fp: open file handle.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return loads(fp.read(), **kw)
|
python
|
{
"resource": ""
}
|
q277299
|
read
|
test
|
def read(fname, encoding='utf8', strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted file.
:param fname: file path.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
with io.open(fname, encoding=encoding) as fp:
return load(fp, **kw)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.