text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Runs refine_cand on all positive SNR candidates above threshold. Any detected at higher SNR are highlighted.
<END_TASK>
<USER_TASK:>
Description:
def refine_cands(candsfile, threshold=0, scaledm=2.1, scalepix=2, scaleuv=1.0, chans=[], savepkl=True):
""" Runs refine_cand on all positive SNR candidates above threshold. Any detected at higher SNR are highlighted. """ |
# get snrs above threshold
locs, props, d = pc.read_candidates(candsfile, snrmin=threshold, returnstate=True)
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2')
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1')
scancol = d['featureind'].index('scan')
segmentcol = d['featureind'].index('segment')
intcol = d['featureind'].index('int')
dtindcol = d['featureind'].index('dtind')
dmindcol = d['featureind'].index('dmind')
snrs = props[:, snrcol]
for (i, snr) in enumerate(snrs):
if snr > 0:
d, cands = refine_cand(candsfile, threshold=threshold, candnum=i,
scaledm=scaledm, scalepix=scalepix, scaleuv=scaleuv, chans=chans)
if cands:
candlocs = np.array(cands.keys())
candprops = np.array(cands.values())
scan = locs[i, scancol]
segment = locs[i, segmentcol]
candint = locs[i, intcol]
dmind = locs[i, dmindcol]
dtind = locs[i, dtindcol]
candfile = 'cands_{0}_sc{1}-seg{2}-i{3}-dm{4}-dt{5}.pkl'.format(d['fileroot'], scan, segment, candint, dmind, dtind)
if any([candsnr > snr for candsnr in candprops[:, snrcol]]):
logger.info('Cand {0} had SNR {1} and refinement found a higher SNR in new ones: {2}.'.format(i, snr, candprops[:, snrcol]))
logger.info('Saving to {0}: {1}'.format(candfile, cands))
with open(candfile, 'w') as pkl:
pickle.dump(d, pkl, protocol=2)
pickle.dump((candlocs, candprops), pkl, protocol=2)
else:
logger.info('Cand {0} had SNR {1}, but refinement found no improvement: {2}'.format(i, snr, candprops[:, snrcol])) |
<SYSTEM_TASK:>
For given state and location that are too bulky, calculate new location given memory_limit.
<END_TASK>
<USER_TASK:>
Description:
def convertloc(candsfile, candloc, memory_limit):
""" For given state and location that are too bulky, calculate new location given memory_limit. """ |
scan, segment, candint, dmind, dtind, beamnum = candloc
# set up state and find absolute integration of candidate
d0 = pickle.load(open(candsfile, 'r'))
filename = os.path.basename(d0['filename'])
readints0 = d0['readints']
nskip0 = (24*3600*(d0['segmenttimes'][segment, 0]
- d0['starttime_mjd'])
/ d0['inttime']).astype(int)
candint_abs = nskip0 + candint
logger.debug('readints0 {} nskip0 {}, candint_abs {}'.format(readints0, nskip0, candint_abs))
# clean up d0 and resubmit to set_pipeline
params = pp.Params()
for key in d0.keys():
if not hasattr(params, key):
_ = d0.pop(key)
d0['logfile'] = False
d0['npix'] = 0
d0['uvres'] = 0
d0['nsegments'] = 0
d0['memory_limit'] = memory_limit
d = rt.set_pipeline(os.path.basename(filename), scan, **d0)
# find best segment for new state
readints = d['readints']
nskips = [(24*3600*(d['segmenttimes'][segment, 0]
- d['starttime_mjd']) / d['inttime']).astype(int)
for segment in range(d['nsegments'])]
posind = [i for i in range(len(nskips)) if candint_abs - nskips[i] > 0]
segment_new = [seg for seg in posind if candint_abs - nskips[seg] == min([candint_abs - nskips[i] for i in posind])][0]
candint_new = candint_abs - nskips[segment_new]
logger.debug('nskips {}, segment_new {}'.format(nskips, segment_new))
return [scan, segment_new, candint_new, dmind, dtind, beamnum] |
<SYSTEM_TASK:>
quick and dirty coord conversion. googled to find bdnyc.org.
<END_TASK>
<USER_TASK:>
Description:
def deg2HMS(ra='', dec='', round=False):
""" quick and dirty coord conversion. googled to find bdnyc.org.
""" |
RA, DEC, rs, ds = '', '', '', ''
if dec:
if str(dec)[0] == '-':
ds, dec = '-', abs(dec)
deg = int(dec)
decM = abs(int((dec-deg)*60))
if round:
decS = int((abs((dec-deg)*60)-decM)*60)
else:
decS = (abs((dec-deg)*60)-decM)*60
DEC = '{0}{1} {2} {3}'.format(ds, deg, decM, decS)
if ra:
if str(ra)[0] == '-':
rs, ra = '-', abs(ra)
raH = int(ra/15)
raM = int(((ra/15)-raH)*60)
if round:
raS = int(((((ra/15)-raH)*60)-raM)*60)
else:
raS = ((((ra/15)-raH)*60)-raM)*60
RA = '{0}{1} {2} {3}'.format(rs, raH, raM, raS)
if ra and dec:
return (RA, DEC)
else:
return RA or DEC |
<SYSTEM_TASK:>
We will try to guess the wxr version used
<END_TASK>
<USER_TASK:>
Description:
def guess_wxr_version(self, tree):
"""
We will try to guess the wxr version used
to complete the wordpress xml namespace name.
""" |
for v in ('1.2', '1.1', '1.0'):
try:
tree.find('channel/{%s}wxr_version' % (WP_NS % v)).text
return v
except AttributeError:
pass
raise CommandError('Cannot resolve the wordpress namespace') |
<SYSTEM_TASK:>
Retrieve all the authors used in posts
<END_TASK>
<USER_TASK:>
Description:
def import_authors(self, tree):
"""
Retrieve all the authors used in posts
and convert it to new or existing author and
return the conversion.
""" |
self.write_out(self.style.STEP('- Importing authors\n'))
post_authors = set()
for item in tree.findall('channel/item'):
post_type = item.find('{%s}post_type' % WP_NS).text
if post_type == 'post':
post_authors.add(item.find(
'{http://purl.org/dc/elements/1.1/}creator').text)
self.write_out('> %i authors found.\n' % len(post_authors))
authors = {}
for post_author in post_authors:
if self.default_author:
authors[post_author] = self.default_author
else:
authors[post_author] = self.migrate_author(
post_author.replace(' ', '-'))
return authors |
<SYSTEM_TASK:>
Handle actions for migrating the authors.
<END_TASK>
<USER_TASK:>
Description:
def migrate_author(self, author_name):
"""
Handle actions for migrating the authors.
""" |
action_text = "The author '%s' needs to be migrated to an user:\n"\
"1. Use an existing user ?\n"\
"2. Create a new user ?\n"\
"Please select a choice: " % self.style.ITEM(author_name)
while 42:
selection = input(smart_str(action_text))
if selection and selection in '12':
break
if selection == '1':
users = Author.objects.all()
if users.count() == 1:
username = users[0].get_username()
preselected_user = username
usernames = [username]
usernames_display = ['[%s]' % username]
else:
usernames = []
usernames_display = []
preselected_user = None
for user in users:
username = user.get_username()
if username == author_name:
usernames_display.append('[%s]' % username)
preselected_user = username
else:
usernames_display.append(username)
usernames.append(username)
while 42:
user_text = "1. Select your user, by typing " \
"one of theses usernames:\n"\
"%s or 'back'\n"\
"Please select a choice: " % \
', '.join(usernames_display)
user_selected = input(user_text)
if user_selected in usernames:
break
if user_selected == '' and preselected_user:
user_selected = preselected_user
break
if user_selected.strip() == 'back':
return self.migrate_author(author_name)
return users.get(**{users[0].USERNAME_FIELD: user_selected})
else:
create_text = "2. Please type the email of " \
"the '%s' user or 'back': " % author_name
author_mail = input(create_text)
if author_mail.strip() == 'back':
return self.migrate_author(author_name)
try:
return Author.objects.create_user(author_name, author_mail)
except IntegrityError:
return Author.objects.get(
**{Author.USERNAME_FIELD: author_name}) |
<SYSTEM_TASK:>
Return a list of entry's tags,
<END_TASK>
<USER_TASK:>
Description:
def get_entry_tags(self, categories):
"""
Return a list of entry's tags,
by using the nicename for url compatibility.
""" |
tags = []
for category in categories:
domain = category.attrib.get('domain', 'category')
if 'tag' in domain and category.attrib.get('nicename'):
tags.append(category.attrib.get('nicename'))
return tags |
<SYSTEM_TASK:>
Return a list of entry's categories
<END_TASK>
<USER_TASK:>
Description:
def get_entry_categories(self, category_nodes):
"""
Return a list of entry's categories
based on imported categories.
""" |
categories = []
for category_node in category_nodes:
domain = category_node.attrib.get('domain')
if domain == 'category':
categories.append(self.categories[category_node.text])
return categories |
<SYSTEM_TASK:>
Loops over items and find entry to import,
<END_TASK>
<USER_TASK:>
Description:
def import_entries(self, items):
"""
Loops over items and find entry to import,
an entry need to have 'post_type' set to 'post' and
have content.
""" |
self.write_out(self.style.STEP('- Importing entries\n'))
for item_node in items:
title = (item_node.find('title').text or '')[:255]
post_type = item_node.find('{%s}post_type' % WP_NS).text
content = item_node.find(
'{http://purl.org/rss/1.0/modules/content/}encoded').text
if post_type == 'post' and content and title:
self.write_out('> %s... ' % title)
entry, created = self.import_entry(title, content, item_node)
if created:
self.write_out(self.style.ITEM('OK\n'))
image_id = self.find_image_id(
item_node.findall('{%s}postmeta' % WP_NS))
if image_id:
self.import_image(entry, items, image_id)
self.import_comments(entry, item_node.findall(
'{%s}comment' % WP_NS))
else:
self.write_out(self.style.NOTICE(
'SKIPPED (already imported)\n'))
else:
self.write_out('> %s... ' % title, 2)
self.write_out(self.style.NOTICE('SKIPPED (not a post)\n'), 2) |
<SYSTEM_TASK:>
Loops over comments nodes and import then
<END_TASK>
<USER_TASK:>
Description:
def import_comments(self, entry, comment_nodes):
"""
Loops over comments nodes and import then
in django_comments.
""" |
for comment_node in comment_nodes:
is_pingback = comment_node.find(
'{%s}comment_type' % WP_NS).text == PINGBACK
is_trackback = comment_node.find(
'{%s}comment_type' % WP_NS).text == TRACKBACK
title = 'Comment #%s' % (comment_node.find(
'{%s}comment_id' % WP_NS).text)
self.write_out(' > %s... ' % title)
content = comment_node.find(
'{%s}comment_content' % WP_NS).text
if not content:
self.write_out(self.style.NOTICE('SKIPPED (unfilled)\n'))
return
submit_date = datetime.strptime(
comment_node.find('{%s}comment_date_gmt' % WP_NS).text,
'%Y-%m-%d %H:%M:%S')
if settings.USE_TZ:
submit_date = timezone.make_aware(submit_date,
pytz.timezone('GMT'))
approvation = comment_node.find(
'{%s}comment_approved' % WP_NS).text
is_public = True
is_removed = False
if approvation != '1':
is_removed = True
if approvation == 'spam':
is_public = False
comment_dict = {
'content_object': entry,
'site': self.SITE,
'user_name': comment_node.find(
'{%s}comment_author' % WP_NS).text[:50],
'user_email': comment_node.find(
'{%s}comment_author_email' % WP_NS).text or '',
'user_url': comment_node.find(
'{%s}comment_author_url' % WP_NS).text or '',
'comment': content,
'submit_date': submit_date,
'ip_address': comment_node.find(
'{%s}comment_author_IP' % WP_NS).text or None,
'is_public': is_public,
'is_removed': is_removed, }
comment = comments.get_model()(**comment_dict)
comment.save()
if is_pingback:
comment.flags.create(
user=get_user_flagger(), flag=PINGBACK)
if is_trackback:
comment.flags.create(
user=get_user_flagger(), flag=TRACKBACK)
self.write_out(self.style.ITEM('OK\n'))
entry.comment_count = entry.comments.count()
entry.pingback_count = entry.pingbacks.count()
entry.trackback_count = entry.trackbacks.count()
entry.save(force_update=True) |
<SYSTEM_TASK:>
Compute the "gradient" of the model for the current parameters
<END_TASK>
<USER_TASK:>
Description:
def compute_gradient(self, *args, **kwargs):
"""
Compute the "gradient" of the model for the current parameters
The default implementation computes the gradients numerically using
a first order forward scheme. For better performance, this method
should be overloaded by subclasses. The output of this function
should be an array where the first dimension is ``full_size``.
""" |
_EPS = 1.254e-5
vector = self.get_parameter_vector()
value0 = self.get_value(*args, **kwargs)
grad = np.empty([len(vector)] + list(value0.shape), dtype=np.float64)
for i, v in enumerate(vector):
vector[i] = v + _EPS
self.set_parameter_vector(vector)
value = self.get_value(*args, **kwargs)
vector[i] = v
self.set_parameter_vector(vector)
grad[i] = (value - value0) / _EPS
return grad |
<SYSTEM_TASK:>
Connect the instance to redis by checking the existence of its primary
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""
Connect the instance to redis by checking the existence of its primary
key. Do nothing if already connected.
""" |
if self.connected:
return
pk = self._pk
if self.exists(pk=pk):
self._connected = True
else:
self._pk = None
self._connected = False
raise DoesNotExist("No %s found with pk %s" % (self.__class__.__name__, pk)) |
<SYSTEM_TASK:>
Create an object, setting its primary key without testing it. So the
<END_TASK>
<USER_TASK:>
Description:
def lazy_connect(cls, pk):
"""
Create an object, setting its primary key without testing it. So the
instance is not connected
""" |
instance = cls()
instance._pk = instance.pk.normalize(pk)
instance._connected = False
return instance |
<SYSTEM_TASK:>
Set default values to fields. We assume that they are not yet populated
<END_TASK>
<USER_TASK:>
Description:
def _set_defaults(self):
"""
Set default values to fields. We assume that they are not yet populated
as this method is called just after creation of a new pk.
""" |
for field_name in self._fields:
if field_name in self._init_fields:
continue
field = self.get_field(field_name)
if hasattr(field, "default"):
field.proxy_set(field.default) |
<SYSTEM_TASK:>
A model with the values defined by kwargs exists in db?
<END_TASK>
<USER_TASK:>
Description:
def exists(cls, **kwargs):
"""
A model with the values defined by kwargs exists in db?
`kwargs` are mandatory.
""" |
if not kwargs:
raise ValueError(u"`Exists` method requires at least one kwarg.")
# special case to check for a simple pk
if len(kwargs) == 1 and cls._field_is_pk(list(kwargs.keys())[0]):
return cls.get_field('pk').exists(list(kwargs.values())[0])
# get only the first element of the unsorted collection (the fastest)
try:
cls.collection(**kwargs).sort(by='nosort')[0]
except IndexError:
return False
else:
return True |
<SYSTEM_TASK:>
Retrieve one instance from db according to given kwargs.
<END_TASK>
<USER_TASK:>
Description:
def get(cls, *args, **kwargs):
"""
Retrieve one instance from db according to given kwargs.
Optionnaly, one arg could be used to retrieve it from pk.
""" |
if len(args) == 1: # Guess it's a pk
pk = args[0]
elif kwargs:
# special case to check for a simple pk
if len(kwargs) == 1 and cls._field_is_pk(list(kwargs.keys())[0]):
pk = list(kwargs.values())[0]
else: # case with many filters
result = cls.collection(**kwargs).sort(by='nosort')
if len(result) == 0:
raise DoesNotExist(u"No object matching filter: %s" % kwargs)
elif len(result) > 1:
raise ValueError(u"More than one object matching filter: %s" % kwargs)
else:
try:
pk = result[0]
except IndexError:
# object was deleted between the `len` check and now
raise DoesNotExist(u"No object matching filter: %s" % kwargs)
else:
raise ValueError("Invalid `get` usage with args %s and kwargs %s" % (args, kwargs))
return cls(pk) |
<SYSTEM_TASK:>
Try to retrieve an object in db, and create it if it does not exist.
<END_TASK>
<USER_TASK:>
Description:
def get_or_connect(cls, **kwargs):
"""
Try to retrieve an object in db, and create it if it does not exist.
""" |
try:
inst = cls.get(**kwargs)
created = False
except DoesNotExist:
inst = cls(**kwargs)
created = True
except Exception:
raise
return inst, created |
<SYSTEM_TASK:>
This command on the model allow getting many instancehash fields with only
<END_TASK>
<USER_TASK:>
Description:
def hmget(self, *args):
"""
This command on the model allow getting many instancehash fields with only
one redis call. You must pass hash name to retrieve as arguments.
""" |
if args and not any(arg in self._instancehash_fields for arg in args):
raise ValueError("Only InstanceHashField can be used here.")
return self._call_command('hmget', args) |
<SYSTEM_TASK:>
This command on the model allow setting many instancehash fields with only
<END_TASK>
<USER_TASK:>
Description:
def hmset(self, **kwargs):
"""
This command on the model allow setting many instancehash fields with only
one redis call. You must pass kwargs with field names as keys, with
their value.
""" |
if kwargs and not any(kwarg in self._instancehash_fields for kwarg in iterkeys(kwargs)):
raise ValueError("Only InstanceHashField can be used here.")
indexed = []
# main try block to revert indexes if something fail
try:
# Set indexes for indexable fields.
for field_name, value in iteritems(kwargs):
field = self.get_field(field_name)
if field.indexable:
indexed.append(field)
field.deindex()
field.index(value)
# Call redis (waits for a dict)
result = self._call_command('hmset', kwargs)
return result
except:
# We revert indexes previously set if we have an exception, then
# really raise the error
for field in indexed:
field._rollback_indexes()
raise
finally:
for field in indexed:
field._reset_indexes_caches() |
<SYSTEM_TASK:>
This command on the model allow deleting many instancehash fields with
<END_TASK>
<USER_TASK:>
Description:
def hdel(self, *args):
"""
This command on the model allow deleting many instancehash fields with
only one redis call. You must pass hash names to retrieve as arguments
""" |
if args and not any(arg in self._instancehash_fields for arg in args):
raise ValueError("Only InstanceHashField can be used here.")
# Set indexes for indexable fields.
for field_name in args:
field = self.get_field(field_name)
if field.indexable:
field.deindex()
# Return the number of fields really deleted
return self._call_command('hdel', *args) |
<SYSTEM_TASK:>
Delete the instance from redis storage.
<END_TASK>
<USER_TASK:>
Description:
def delete(self):
"""
Delete the instance from redis storage.
""" |
# Delete each field
for field_name in self._fields:
field = self.get_field(field_name)
if not isinstance(field, PKField):
# pk has no stored key
field.delete()
# Remove the pk from the model collection
self.connection.srem(self.get_field('pk').collection_key, self._pk)
# Deactivate the instance
delattr(self, "_pk") |
<SYSTEM_TASK:>
Iter on all the key related to the current instance fields, using redis SCAN command
<END_TASK>
<USER_TASK:>
Description:
def scan_keys(self, count=None):
"""Iter on all the key related to the current instance fields, using redis SCAN command
Parameters
----------
count: int, default to None (redis uses 10)
Hint for redis about the number of expected result
Yields
-------
str
All keys found by the scan, one by one. A key can be returned multiple times, it's
related to the way the SCAN command works in redis.
""" |
pattern = self.make_key(
self._name,
self.pk.get(),
'*'
)
return self.database.scan_keys(pattern, count) |
<SYSTEM_TASK:>
Iter on all the key related to the current model, using redis SCAN command
<END_TASK>
<USER_TASK:>
Description:
def scan_model_keys(cls, count=None):
"""Iter on all the key related to the current model, using redis SCAN command
Parameters
----------
count: int, default to None (redis uses 10)
Hint for redis about the number of expected result
Yields
-------
str
All keys found by the scan, one by one. A key can be returned multiple times, it's
related to the way the SCAN command works in redis.
""" |
pattern = cls.make_key(
cls._name,
"*",
)
return cls.database.scan_keys(pattern, count) |
<SYSTEM_TASK:>
Given a model name as ``app_label.ModelName``, returns the Django model.
<END_TASK>
<USER_TASK:>
Description:
def get_model(model):
"""
Given a model name as ``app_label.ModelName``, returns the Django model.
""" |
try:
if isinstance(model, str):
app_label, model_name = model.split('.', 1)
m = loading.get_model(app_label, model_name)
if not m: # pragma: no cover
raise LookupError() # Django < 1.7 just returns None
return m
elif issubclass(model, models.Model):
return model
except (LookupError, ValueError):
pass
raise ValueError(model) |
<SYSTEM_TASK:>
Asserts that the content types for the given object are valid for this
<END_TASK>
<USER_TASK:>
Description:
def _validate_ctypes(self, from_obj, to_obj):
"""
Asserts that the content types for the given object are valid for this
relationship. If validation fails, ``AssertionError`` will be raised.
""" |
if from_obj:
from_ctype = ContentType.objects.get_for_model(from_obj)
assert from_ctype.natural_key() == self.from_content_type.natural_key(), (
'Relationship "%s" does not support connections '
'from "%s" types' % (self.name, from_ctype))
if to_obj:
to_ctype = ContentType.objects.get_for_model(to_obj)
assert to_ctype.natural_key() == self.to_content_type.natural_key(), (
'Relationship "%s" does not support connections '
'to "%s" types' % (self.name, to_ctype)) |
<SYSTEM_TASK:>
Creates and returns a connection between the given objects. If a
<END_TASK>
<USER_TASK:>
Description:
def create_connection(self, from_obj, to_obj):
"""
Creates and returns a connection between the given objects. If a
connection already exists, that connection will be returned instead.
""" |
self._validate_ctypes(from_obj, to_obj)
return Connection.objects.get_or_create(relationship_name=self.name,
from_pk=from_obj.pk, to_pk=to_obj.pk)[0] |
<SYSTEM_TASK:>
Returns a ``Connection`` instance for the given objects or ``None`` if
<END_TASK>
<USER_TASK:>
Description:
def get_connection(self, from_obj, to_obj):
"""
Returns a ``Connection`` instance for the given objects or ``None`` if
there's no connection.
""" |
self._validate_ctypes(from_obj, to_obj)
try:
return self.connections.get(from_pk=from_obj.pk, to_pk=to_obj.pk)
except Connection.DoesNotExist:
return None |
<SYSTEM_TASK:>
Returns ``True`` if a connection between the given objects exists,
<END_TASK>
<USER_TASK:>
Description:
def connection_exists(self, from_obj, to_obj):
"""
Returns ``True`` if a connection between the given objects exists,
else ``False``.
""" |
self._validate_ctypes(from_obj, to_obj)
return self.connections.filter(from_pk=from_obj.pk, to_pk=to_obj.pk).exists() |
<SYSTEM_TASK:>
Returns a ``Connection`` query set matching all connections with
<END_TASK>
<USER_TASK:>
Description:
def connections_from_object(self, from_obj):
"""
Returns a ``Connection`` query set matching all connections with
the given object as a source.
""" |
self._validate_ctypes(from_obj, None)
return self.connections.filter(from_pk=from_obj.pk) |
<SYSTEM_TASK:>
Returns a ``Connection`` query set matching all connections with
<END_TASK>
<USER_TASK:>
Description:
def connections_to_object(self, to_obj):
"""
Returns a ``Connection`` query set matching all connections with
the given object as a destination.
""" |
self._validate_ctypes(None, to_obj)
return self.connections.filter(to_pk=to_obj.pk) |
<SYSTEM_TASK:>
Returns a query set matching all connected objects with the given
<END_TASK>
<USER_TASK:>
Description:
def connected_objects(self, from_obj):
"""
Returns a query set matching all connected objects with the given
object as a source.
""" |
return self.to_content_type.get_all_objects_for_this_type(pk__in=self.connected_object_ids(from_obj)) |
<SYSTEM_TASK:>
Returns a query set matching all connected objects with the given
<END_TASK>
<USER_TASK:>
Description:
def connected_to_objects(self, to_obj):
"""
Returns a query set matching all connected objects with the given
object as a destination.
""" |
return self.from_content_type.get_all_objects_for_this_type(pk__in=self.connected_to_object_ids(to_obj)) |
<SYSTEM_TASK:>
Calculates the distance between two objects. Distance 0 means
<END_TASK>
<USER_TASK:>
Description:
def distance_between(self, from_obj, to_obj, limit=2):
"""
Calculates the distance between two objects. Distance 0 means
``from_obj`` and ``to_obj`` are the same objects, 1 means ``from_obj``
has a direct connection to ``to_obj``, 2 means that one or more of
``from_obj``'s connected objects are directly connected to ``to_obj``,
etc.
``limit`` limits the depth of connections traversal.
Returns ``None`` if the two objects are not connected within ``limit``
distance.
""" |
self._validate_ctypes(from_obj, to_obj)
if from_obj == to_obj:
return 0
d = 1
pk = to_obj.pk
qs = self.connections
pks = qs.filter(from_pk=from_obj.pk).values_list('to_pk', flat=True)
while limit > 0:
if pk in pks:
return d
else:
pks = qs.filter(from_pk__in=pks).values_list('pk', flat=True)
d += 1
limit -= 1
return None |
<SYSTEM_TASK:>
Generate an MS that contains all calibrator scans with 1 s integration time.
<END_TASK>
<USER_TASK:>
Description:
def genms(self, scans=[]):
""" Generate an MS that contains all calibrator scans with 1 s integration time.
""" |
if len(scans):
scanstr = string.join([str(ss) for ss in sorted(scans)], ',')
else:
scanstr = self.allstr
print 'Splitting out all cal scans (%s) with 1s int time' % scanstr
newname = ps.sdm2ms(self.sdmfile, self.sdmfile.rstrip('/')+'.ms', scanstr, inttime='1') # integrate down to 1s during split
return newname |
<SYSTEM_TASK:>
Replace the emoticons string by HTML images,
<END_TASK>
<USER_TASK:>
Description:
def regexp_replace_emoticons(content):
"""
Replace the emoticons string by HTML images,
with regular expressions.
""" |
for emoticon, emoticon_html in EMOTICONS_COMPILED:
if emoticon.search(content):
content = emoticon.sub(emoticon_html, content)
return content |
<SYSTEM_TASK:>
Replace the emoticons string by HTML images.
<END_TASK>
<USER_TASK:>
Description:
def replace_emoticons(content, excluded_markups):
"""
Replace the emoticons string by HTML images.
If some markups should be excluded from replacement,
BeautifulSoup will be used.
""" |
if not excluded_markups:
return regexp_replace_emoticons(content)
excluded_markups = excluded_markups.split(',') + ['[document]']
soup = BeautifulSoup(content, 'html.parser')
for content_string in list(soup.strings):
if content_string.parent.name not in excluded_markups:
replaced_content_string = regexp_replace_emoticons(content_string)
if content_string != replaced_content_string:
content_string.replace_with(
BeautifulSoup(replaced_content_string, 'html.parser'))
return str(soup) |
<SYSTEM_TASK:>
Filter for rendering emoticons.
<END_TASK>
<USER_TASK:>
Description:
def emoticons_filter(content, exclude='', autoescape=None):
"""
Filter for rendering emoticons.
""" |
esc = autoescape and conditional_escape or (lambda x: x)
content = mark_safe(replace_emoticons(esc(content), exclude))
return content |
<SYSTEM_TASK:>
Save or update obj as pkl file with name label
<END_TASK>
<USER_TASK:>
Description:
def save(self, obj, label, format='text'):
""" Save or update obj as pkl file with name label
format can be 'text' or 'pickle'.
""" |
# initialize hidden state directory
objloc = '{0}/{1}'.format(self.statedir, label)
with open(objloc, 'w') as fp:
if format == 'pickle':
pickle.dump(obj, fp)
elif format == 'text':
fp.write(str(obj))
else:
print('Format {0} not recognized. Please choose either pickle or text.'.format(format))
print('Saving {0} to label {1}'.format(obj, label)) |
<SYSTEM_TASK:>
Load obj with give label from hidden state directory
<END_TASK>
<USER_TASK:>
Description:
def load(self, label):
""" Load obj with give label from hidden state directory """ |
objloc = '{0}/{1}'.format(self.statedir, label)
try:
obj = pickle.load(open(objloc, 'r'))
except (KeyError, IndexError, EOFError):
obj = open(objloc, 'r').read()
try:
obj = float(obj)
except ValueError:
pass
except IOError:
obj = None
return obj |
<SYSTEM_TASK:>
Given FDMT state, return indices to slice partial FDMT solution and sump to a given DM
<END_TASK>
<USER_TASK:>
Description:
def dmtoind(dm, f_min, f_max, nchan0, inttime, it):
"""
Given FDMT state, return indices to slice partial FDMT solution and sump to a given DM
""" |
# maxDT = dmtodt(dm) # need to write
if it>0:
correction = dF/2.
else:
correction = 0
shift = []
nchan = nchan0/2**(iteration_num)
for i_F in range(nchan):
f_start = (f_max - f_min)/float(nchan) * (i_F) + f_min
f_end = (f_max - f_min)/float(nchan) *(i_F+1) + f_min
f_middle = (f_end - f_start)/2. + f_start - correction
f_middle_larger = (f_end - f_start)/2 + f_start + correction
dT_middle = int(round(i_dT * (1./f_middle**2 - 1./f_start**2)/(1./f_end**2 - 1./f_start**2)))
dT_middle_larger = int(round(i_dT * (1./f_middle_larger**2 - 1./f_start**2)/(1./f_end**2 - 1./f_start**2)))
shift.append( (-dT_middle_larger, i_F) ) |
<SYSTEM_TASK:>
Generate a unique keyname that does not exists is the connection
<END_TASK>
<USER_TASK:>
Description:
def unique_key(connection):
"""
Generate a unique keyname that does not exists is the connection
keyspace.
""" |
while 1:
key = str(uuid.uuid4().hex)
if not connection.exists(key):
break
return key |
<SYSTEM_TASK:>
Simple method to always have the same kind of value
<END_TASK>
<USER_TASK:>
Description:
def normalize(value):
"""
Simple method to always have the same kind of value
""" |
if value and isinstance(value, bytes):
value = value.decode('utf-8')
return value |
<SYSTEM_TASK:>
Accepts new clients and sends them to the to _handle_accepted within a subthread
<END_TASK>
<USER_TASK:>
Description:
def _mainthread_accept_clients(self):
"""Accepts new clients and sends them to the to _handle_accepted within a subthread
""" |
try:
if self._accept_selector.select(timeout=self.block_time):
client = self._server_socket.accept()
logging.info('Client connected: {}'.format(client[1]))
self._threads_limiter.start_thread(target=self._subthread_handle_accepted,
args=(client,))
except socket.error:
pass |
<SYSTEM_TASK:>
Searches for readable client sockets. These sockets are then put in a subthread
<END_TASK>
<USER_TASK:>
Description:
def _mainthread_poll_readable(self):
"""Searches for readable client sockets. These sockets are then put in a subthread
to be handled by _handle_readable
""" |
events = self._recv_selector.select(self.block_time)
for key, mask in events:
if mask == selectors.EVENT_READ:
self._recv_selector.unregister(key.fileobj)
self._threads_limiter.start_thread(target=self._subthread_handle_readable,
args=(key.fileobj,)) |
<SYSTEM_TASK:>
Gets accepted clients from the queue object and sets up the client socket.
<END_TASK>
<USER_TASK:>
Description:
def _subthread_handle_accepted(self, client):
"""Gets accepted clients from the queue object and sets up the client socket.
The client can then be found in the clients dictionary with the socket object
as the key.
""" |
conn, addr = client
if self.handle_incoming(conn, addr):
logging.info('Accepted connection from client: {}'.format(addr))
conn.setblocking(False)
self.clients[conn] = addr
self.register(conn)
else:
logging.info('Refused connection from client: {}'.format(addr))
self.disconnect(conn) |
<SYSTEM_TASK:>
Handles readable client sockets. Calls the user modified handle_readable with
<END_TASK>
<USER_TASK:>
Description:
def _subthread_handle_readable(self, conn):
"""Handles readable client sockets. Calls the user modified handle_readable with
the client socket as the only variable. If the handle_readable function returns
true the client is again registered to the selector object otherwise the client
is disconnected.
""" |
if self.handle_readable(conn):
self.register(conn)
else:
self.disconnect(conn) |
<SYSTEM_TASK:>
Wrapper for mean visibility subtraction in time.
<END_TASK>
<USER_TASK:>
Description:
def meantsubpool(d, data_read):
""" Wrapper for mean visibility subtraction in time.
Doesn't work when called from pipeline using multiprocessing pool.
""" |
logger.info('Subtracting mean visibility in time...')
data_read = numpyview(data_read_mem, 'complex64', datashape(d))
tsubpart = partial(rtlib.meantsub, data_read)
blranges = [(d['nbl'] * t/d['nthread'], d['nbl']*(t+1)/d['nthread']) for t in range(d['nthread'])]
with closing(mp.Pool(1, initializer=initreadonly, initargs=(data_read_mem,))) as tsubpool:
tsubpool.map(tsubpart, blr) |
<SYSTEM_TASK:>
Flagging data in single process
<END_TASK>
<USER_TASK:>
Description:
def dataflag(d, data_read):
""" Flagging data in single process
""" |
for flag in d['flaglist']:
mode, sig, conv = flag
# resultlist = []
# with closing(mp.Pool(4, initializer=initreadonly, initargs=(data_read_mem,))) as flagpool:
for ss in d['spw']:
chans = n.arange(d['spw_chanr_select'][ss][0], d['spw_chanr_select'][ss][1])
for pol in range(d['npol']):
status = rtlib.dataflag(data_read, chans, pol, d, sig, mode, conv)
logger.info(status)
# hack to get rid of bad spw/pol combos whacked by rfi
if 'badspwpol' in d:
logger.info('Comparing overall power between spw/pol. Removing those with %d times typical value' % d['badspwpol'])
spwpol = {}
for spw in d['spw']:
chans = n.arange(d['spw_chanr_select'][spw][0], d['spw_chanr_select'][spw][1])
for pol in range(d['npol']):
spwpol[(spw, pol)] = n.abs(data_read[:,:,chans,pol]).std()
meanstd = n.mean(spwpol.values())
for (spw,pol) in spwpol:
if spwpol[(spw, pol)] > d['badspwpol']*meanstd:
logger.info('Flagging all of (spw %d, pol %d) for excess noise.' % (spw, pol))
chans = n.arange(d['spw_chanr_select'][spw][0], d['spw_chanr_select'][spw][1])
data_read[:,:,chans,pol] = 0j |
<SYSTEM_TASK:>
Wrapper function to get shared memory as numpy array into pool
<END_TASK>
<USER_TASK:>
Description:
def dataflagatom(chans, pol, d, sig, mode, conv):
""" Wrapper function to get shared memory as numpy array into pool
Assumes data_mem is global mps.Array
""" |
data = numpyview(data_mem, 'complex64', datashape(d))
# data = n.ma.masked_array(data, data==0j) # this causes massive overflagging on 14sep03 data
return rtlib.dataflag(data, chans, pol, d, sig, mode, conv) |
<SYSTEM_TASK:>
Reproduce function, much like search.
<END_TASK>
<USER_TASK:>
Description:
def runreproduce(d, data_mem, data_resamp_mem, u, v, w, dmind, dtind, candint=-1, lm=None, twindow=30):
""" Reproduce function, much like search.
Returns image and rephased data for given candint.
If no candint is given, it returns resampled data by default. Optionally rephases to lm=(l, m) coordinates.
""" |
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
with closing(mp.Pool(1, initializer=initresamp, initargs=(data_mem, data_resamp_mem))) as repropool:
# dedisperse
logger.info('Dedispersing with DM=%.1f, dt=%d...' % (d['dmarr'][dmind], d['dtarr'][dtind]))
repropool.apply(correct_dmdt, [d, dmind, dtind, (0,d['nbl'])])
# set up image
if 'image1' in d['searchtype']:
npixx = d['npixx']
npixy = d['npixy']
elif 'image2' in d['searchtype']:
npixx = d['npixx_full']
npixy = d['npixy_full']
if candint > -1:
if lm:
logger.warn('Using candint image to get l,m. Not using provided l,m.')
# image
logger.info('Imaging int %d with %d %d pixels...' % (candint, npixx, npixy))
im = repropool.apply(image1wrap, [d, u, v, w, npixx, npixy, candint/d['dtarr'][dtind]])
snrmin = im.min()/im.std()
snrmax = im.max()/im.std()
logger.info('Made image with SNR min, max: %.1f, %.1f' % (snrmin, snrmax))
if snrmax > -1*snrmin:
l1, m1 = calc_lm(d, im, minmax='max')
else:
l1, m1 = calc_lm(d, im, minmax='min')
# rephase and trim interesting ints out
repropool.apply(move_phasecenter, [d, l1, m1, u, v])
minint = max(candint/d['dtarr'][dtind]-twindow/2, 0)
maxint = min(candint/d['dtarr'][dtind]+twindow/2, len(data_resamp)/d['dtarr'][dtind])
return(im, data_resamp[minint:maxint].mean(axis=1))
else:
if lm:
l1, m1 = lm
repropool.apply(move_phasecenter, [d, l1, m1, u, v])
return data_resamp |
<SYSTEM_TASK:>
Produce a mock transient pulse source for the purposes of characterizing the
<END_TASK>
<USER_TASK:>
Description:
def make_transient(std, DMmax, Amin=6., Amax=20., rmax=20., rmin=0., DMmin=0.):
""" Produce a mock transient pulse source for the purposes of characterizing the
detection success of the current pipeline.
Assumes
- Code to inject the transients does so by inserting at an array index
- Noise level at the center of the data array is characteristic of the
noise level throughout
Input
std - noise level in visibilities(?) at mid-point of segment
DMmax - maximum DM at which mock transient can be inserted [pc/cm^3]
Amin/Amax is amplitude in units of the std (calculated below)
rmax/rmin is radius range in arcmin
DMmin is min DM
Returns
loff - direction cosine offset of mock transient from phase center [radians]
moff - direction cosine offset of mock transient from phase center [radians]
A - amplitude of transient [std units]
DM - dispersion measure of mock transient [pc/cm^3]
""" |
rad_arcmin = math.pi/(180*60)
phimin = 0.0
phimax = 2*math.pi
# Amplitude of transient, done in units of the std
# std is calculated assuming that noise level in the middle of the data,
# at index d['readints']/2, is characteristic of that throughout the data
A = random.uniform(Amin, Amax) * std
# Position of transient, in direction cosines
r = random.uniform(rmin, rmax)
phi = random.uniform(phimin, phimax)
loff = r*math.cos(phi) * rad_arcmin
moff = r*math.sin(phi) * rad_arcmin
# Dispersion measure
DM = random.uniform(DMmin, DMmax)
return loff, moff, A, DM |
<SYSTEM_TASK:>
Calculate the number of thermal-noise false positives per segment.
<END_TASK>
<USER_TASK:>
Description:
def calc_nfalse(d):
""" Calculate the number of thermal-noise false positives per segment.
""" |
dtfactor = n.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm
ntrials = d['readints'] * dtfactor * len(d['dmarr']) * d['npixx'] * d['npixy']
qfrac = 1 - (erf(d['sigma_image1']/n.sqrt(2)) + 1)/2.
nfalse = int(qfrac*ntrials)
return nfalse |
<SYSTEM_TASK:>
Helper function for set_pipeline to define segmenttimes list, given nsegments definition
<END_TASK>
<USER_TASK:>
Description:
def calc_segment_times(d):
""" Helper function for set_pipeline to define segmenttimes list, given nsegments definition
""" |
# this casts to int (flooring) to avoid 0.5 int rounding issue.
stopdts = n.linspace(d['nskip']+d['t_overlap']/d['inttime'], d['nints'], d['nsegments']+1)[1:] # nseg+1 assures that at least one seg made
startdts = n.concatenate( ([d['nskip']], stopdts[:-1]-d['t_overlap']/d['inttime']) )
segmenttimes = []
for (startdt, stopdt) in zip(d['inttime']*startdts, d['inttime']*stopdts):
starttime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['starttime_mjd']+startdt/(24*3600),'d'),form=['ymd'], prec=9)[0], 's'))[0]/(24*3600)
stoptime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['starttime_mjd']+stopdt/(24*3600), 'd'), form=['ymd'], prec=9)[0], 's'))[0]/(24*3600)
segmenttimes.append((starttime, stoptime))
d['segmenttimes'] = n.array(segmenttimes)
totaltimeread = 24*3600*(d['segmenttimes'][:, 1] - d['segmenttimes'][:, 0]).sum() # not guaranteed to be the same for each segment
d['readints'] = n.round(totaltimeread / (d['inttime']*d['nsegments'])).astype(int)
d['t_segment'] = totaltimeread/d['nsegments'] |
<SYSTEM_TASK:>
Estimate largest time span of a "segment".
<END_TASK>
<USER_TASK:>
Description:
def calc_fringetime(d):
""" Estimate largest time span of a "segment".
A segment is the maximal time span that can be have a single bg fringe subtracted and uv grid definition.
Max fringe window estimated for 5% amp loss at first null averaged over all baselines. Assumes dec=+90, which is conservative.
Returns time in seconds that defines good window.
""" |
maxbl = d['uvres']*d['npix']/2 # fringe time for imaged data only
fringetime = 0.5*(24*3600)/(2*n.pi*maxbl/25.) # max fringe window in seconds
return fringetime |
<SYSTEM_TASK:>
Dedisperses data into data_resamp
<END_TASK>
<USER_TASK:>
Description:
def correct_dm(d, dm, blrange):
""" Dedisperses data into data_resamp
Drops edges, since it assumes that data is read with overlapping chunks in time.
""" |
data = numpyview(data_mem, 'complex64', datashape(d))
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
bl0,bl1 = blrange
data_resamp[:, bl0:bl1] = data[:, bl0:bl1]
rtlib.dedisperse_par(data_resamp, d['freq'], d['inttime'], dm, blrange, verbose=0) |
<SYSTEM_TASK:>
Resamples data_resamp
<END_TASK>
<USER_TASK:>
Description:
def correct_dt(d, dt, blrange):
""" Resamples data_resamp
Drops edges, since it assumes that data is read with overlapping chunks in time.
""" |
data = numpyview(data_mem, 'complex64', datashape(d))
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
bl0,bl1 = blrange
rtlib.resample_par(data_resamp, d['freq'], d['inttime'], dt, blrange, verbose=0) |
<SYSTEM_TASK:>
Handler function for phaseshift_threaded
<END_TASK>
<USER_TASK:>
Description:
def move_phasecenter(d, l1, m1, u, v):
""" Handler function for phaseshift_threaded
""" |
logger.info('Rephasing data to (l, m)=(%.4f, %.4f).' % (l1, m1))
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
rtlib.phaseshift_threaded(data_resamp, d, l1, m1, u, v) |
<SYSTEM_TASK:>
Function to calculate the DM values for a given maximum sensitivity loss.
<END_TASK>
<USER_TASK:>
Description:
def calc_dmgrid(d, maxloss=0.05, dt=3000., mindm=0., maxdm=0.):
""" Function to calculate the DM values for a given maximum sensitivity loss.
maxloss is sensitivity loss tolerated by dm bin width. dt is assumed pulse width in microsec.
""" |
# parameters
tsamp = d['inttime']*1e6 # in microsec
k = 8.3
freq = d['freq'].mean() # central (mean) frequency in GHz
bw = 1e3*(d['freq'][-1] - d['freq'][0])
ch = 1e3*(d['freq'][1] - d['freq'][0]) # channel width in MHz
# width functions and loss factor
dt0 = lambda dm: n.sqrt(dt**2 + tsamp**2 + ((k*dm*ch)/(freq**3))**2)
dt1 = lambda dm, ddm: n.sqrt(dt**2 + tsamp**2 + ((k*dm*ch)/(freq**3))**2 + ((k*ddm*bw)/(freq**3.))**2)
loss = lambda dm, ddm: 1 - n.sqrt(dt0(dm)/dt1(dm,ddm))
loss_cordes = lambda ddm, dfreq, dt, freq: 1 - (n.sqrt(n.pi) / (2 * 6.91e-3 * ddm * dfreq / (dt*freq**3))) * erf(6.91e-3 * ddm * dfreq / (dt*freq**3)) # not quite right for underresolved pulses
if maxdm == 0:
return [0]
else:
# iterate over dmgrid to find optimal dm values. go higher than maxdm to be sure final list includes full range.
dmgrid = n.arange(mindm, maxdm, 0.05)
dmgrid_final = [dmgrid[0]]
for i in range(len(dmgrid)):
ddm = (dmgrid[i] - dmgrid_final[-1])/2.
ll = loss(dmgrid[i],ddm)
if ll > maxloss:
dmgrid_final.append(dmgrid[i])
return dmgrid_final |
<SYSTEM_TASK:>
Samples one integration and returns image
<END_TASK>
<USER_TASK:>
Description:
def sample_image(d, data, u, v, w, i=-1, verbose=0, imager='xy', wres=100):
""" Samples one integration and returns image
i is integration to image. Default is mid int.
""" |
if i == -1:
i = len(data)/2
if imager == 'xy':
image = rtlib.imgonefullxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], d['npixx'], d['npixy'], d['uvres'], verbose=verbose)
elif imager == 'w':
npix = max(d['npixx'], d['npixy'])
bls, uvkers = rtlib.genuvkernels(w, wres, npix, d['uvres'], ksize=21, oversample=1)
image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], bls, uvkers, verbose=verbose)
# bls, lmkers = rtlib.genlmkernels(w, wres, npix, d['uvres'])
# image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], [bls[0]], [lmkers[0]], verbose=verbose)
return image |
<SYSTEM_TASK:>
Takes large data array and sigma clips it to find noise per bl for input to detect_bispectra.
<END_TASK>
<USER_TASK:>
Description:
def estimate_noiseperbl(data):
""" Takes large data array and sigma clips it to find noise per bl for input to detect_bispectra.
Takes mean across pols and channels for now, as in detect_bispectra.
""" |
# define noise per baseline for data seen by detect_bispectra or image
datamean = data.mean(axis=2).imag # use imaginary part to estimate noise without calibrated, on-axis signal
(datameanmin, datameanmax) = rtlib.sigma_clip(datamean.flatten())
good = n.where( (datamean>datameanmin) & (datamean<datameanmax) )
noiseperbl = datamean[good].std() # measure single noise for input to detect_bispectra
logger.debug('Clipped to %d%% of data (%.3f to %.3f). Noise = %.3f.' % (100.*len(good[0])/len(datamean.flatten()), datameanmin, datameanmax, noiseperbl))
return noiseperbl |
<SYSTEM_TASK:>
Calculates noise properties and saves values to pickle.
<END_TASK>
<USER_TASK:>
Description:
def noisepickle(d, data, u, v, w, chunk=200):
""" Calculates noise properties and saves values to pickle.
chunk defines window for measurement. at least one measurement always made.
""" |
if d['savenoise']:
noisefile = getnoisefile(d)
if os.path.exists(noisefile):
logger.warn('noisefile %s already exists' % noisefile)
else:
nints = len(data)
chunk = min(chunk, nints) # ensure at least one measurement
results = []
rr = range(0, nints, chunk)
if len(rr) == 1: rr.append(1) # hack. need to make sure it iterates for nints=1 case
for i in range(len(rr)-1):
imid = (rr[i]+rr[i+1])/2
noiseperbl = estimate_noiseperbl(data[rr[i]:rr[i+1]])
imstd = sample_image(d, data, u, v, w, imid, verbose=0).std()
zerofrac = float(len(n.where(data[rr[i]:rr[i+1]] == 0j)[0]))/data[rr[i]:rr[i+1]].size
results.append( (d['segment'], noiseperbl, zerofrac, imstd) )
with open(noisefile, 'a') as pkl:
pickle.dump(results, pkl)
logger.info('Wrote %d noise measurement%s to %s.' % (len(results), 's'[:len(results)-1], noisefile)) |
<SYSTEM_TASK:>
Save all candidates in pkl file for later aggregation and filtering.
<END_TASK>
<USER_TASK:>
Description:
def savecands(d, cands, domock=False):
""" Save all candidates in pkl file for later aggregation and filtering.
domock is option to save simulated cands file
""" |
with open(getcandsfile(d, domock=domock), 'w') as pkl:
pickle.dump(d, pkl)
pickle.dump(cands, pkl) |
<SYSTEM_TASK:>
Takes mp shared array and returns numpy array with given shape.
<END_TASK>
<USER_TASK:>
Description:
def numpyview(arr, datatype, shape, raw=False):
""" Takes mp shared array and returns numpy array with given shape.
""" |
if raw:
return n.frombuffer(arr, dtype=n.dtype(datatype)).view(n.dtype(datatype)).reshape(shape) # for shared mps.RawArray
else:
return n.frombuffer(arr.get_obj(), dtype=n.dtype(datatype)).view(n.dtype(datatype)).reshape(shape) |
<SYSTEM_TASK:>
Return this path as a relative path,
<END_TASK>
<USER_TASK:>
Description:
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
""" |
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self) |
<SYSTEM_TASK:>
Return a relative path from self to dest.
<END_TASK>
<USER_TASK:>
Description:
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
""" |
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath) |
<SYSTEM_TASK:>
Return a list of path objects that match the pattern.
<END_TASK>
<USER_TASK:>
Description:
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, ``path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their bin directories.
""" |
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))] |
<SYSTEM_TASK:>
Calculate the md5 hash for this file.
<END_TASK>
<USER_TASK:>
Description:
def read_md5(self, hex=False):
""" Calculate the md5 hash for this file.
hex - Return the digest as hex string.
This reads through the entire file.
""" |
f = self.open('rb')
try:
m = hashlib.md5()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
if hex:
return m.hexdigest()
else:
return m.digest() |
<SYSTEM_TASK:>
r""" Return the name of the owner of this file or directory.
<END_TASK>
<USER_TASK:>
Description:
def owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
""" |
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name |
<SYSTEM_TASK:>
Make sure the directory exists, create if necessary.
<END_TASK>
<USER_TASK:>
Description:
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
""" |
if not self.exists() or not self.isdir():
os.makedirs(self, mode) |
<SYSTEM_TASK:>
Read parameter file and set parameter values.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, paramfile):
""" Read parameter file and set parameter values.
File should have python-like syntax. Full file name needed.
""" |
with open(paramfile, 'r') as f:
for line in f.readlines():
line_clean = line.rstrip('\n').split('#')[0] # trim out comments and trailing cr
if line_clean and '=' in line: # use valid lines only
attribute, value = line_clean.split('=')
try:
value_eval = eval(value.strip())
except NameError:
value_eval = value.strip()
finally:
setattr(self, attribute.strip(), value_eval) |
<SYSTEM_TASK:>
Load the index file and reorder the banks based in order listed in index
<END_TASK>
<USER_TASK:>
Description:
def load(self, indexables):
"""
Load the index file and reorder the banks based in order listed in index
:param list[Indexable] indexables: Banks that will be reordered based in index file
:return list[Bank]: Banks reordered
""" |
try:
data = Persistence.read(self.path, create_file=True)
except ValueError:
data = {}
return self.load_data(data, indexables) |
<SYSTEM_TASK:>
Register an observer for it be notified when occurs changes.
<END_TASK>
<USER_TASK:>
Description:
def register(self, observer):
"""
Register an observer for it be notified when occurs changes.
For more details, see :class:`.UpdatesObserver`
:param UpdatesObserver observer: Observer that will be notified then occurs changes
""" |
self.observer_manager.append(observer)
observer.manager = self |
<SYSTEM_TASK:>
Remove the observers of the observers list.
<END_TASK>
<USER_TASK:>
Description:
def unregister(self, observer):
"""
Remove the observers of the observers list.
It will not receive any more notifications when occurs changes.
:param UpdatesObserver observer: Observer you will not receive any more notifications then
occurs changes.
""" |
self.observer_manager.observers.remove(observer)
observer.manager = None |
<SYSTEM_TASK:>
method returns tag that all messages will be preceded with
<END_TASK>
<USER_TASK:>
Description:
def get_log_tag(process_name):
"""method returns tag that all messages will be preceded with""" |
process_obj = context.process_context[process_name]
if isinstance(process_obj, FreerunProcessEntry):
return str(process_obj.token)
elif isinstance(process_obj, ManagedProcessEntry):
return str(process_obj.token) + str(process_obj.time_qualifier)
elif isinstance(process_obj, DaemonProcessEntry):
return str(process_obj.token)
else:
raise ValueError('Unknown process type: {0}'.format(process_obj.__class__.__name__)) |
<SYSTEM_TASK:>
method implements stream write interface, allowing to redirect stdout to logger
<END_TASK>
<USER_TASK:>
Description:
def write(self, msg, level=logging.INFO):
""" method implements stream write interface, allowing to redirect stdout to logger """ |
if msg is not None and len(msg.strip()) > 0:
self.logger.log(level, msg) |
<SYSTEM_TASK:>
Loads the metadata. They will be used so that it is possible to generate lv2 audio plugins.
<END_TASK>
<USER_TASK:>
Description:
def reload(self, metadata, ignore_unsupported_plugins=True):
"""
Loads the metadata. They will be used so that it is possible to generate lv2 audio plugins.
:param list metadata: lv2 audio plugins metadata
:param bool ignore_unsupported_plugins: Not allows instantiation of uninstalled or unrecognized audio plugins?
""" |
supported_plugins = self._supported_plugins
for plugin in metadata:
if not ignore_unsupported_plugins \
or plugin['uri'] in supported_plugins:
self._plugins[plugin['uri']] = Lv2Plugin(plugin) |
<SYSTEM_TASK:>
Decorator to validate a key for zookeeper.
<END_TASK>
<USER_TASK:>
Description:
def validate_key(func):
"""
Decorator to validate a key for zookeeper.
""" |
@wraps(func)
def wrapper(self, key, *args, **kwargs):
if posixpath.sep in key:
raise ValueError('Keys cannot contains slashes')
return func(self, key, *args, **kwargs)
return wrapper |
<SYSTEM_TASK:>
Encode and save ``value`` at ``key``.
<END_TASK>
<USER_TASK:>
Description:
def persist(self, key, value):
"""
Encode and save ``value`` at ``key``.
:param key: Key to store ``value`` at in Zookeeper.
:type key: string
:param value: Value to store. Encoded before being stored.
:type value: value
""" |
encoded = self.encoding.encode(value)
self.__set_or_create(key, encoded)
self.__increment_last_updated() |
<SYSTEM_TASK:>
Remove ``key`` from dictionary.
<END_TASK>
<USER_TASK:>
Description:
def depersist(self, key):
"""
Remove ``key`` from dictionary.
:param key: Key to remove from Zookeeper.
:type key: string
""" |
self.connection.retry(self.connection.delete, self.__path_of(key))
self.__increment_last_updated() |
<SYSTEM_TASK:>
Dictionary of all keys and their values in Zookeeper.
<END_TASK>
<USER_TASK:>
Description:
def durables(self):
"""
Dictionary of all keys and their values in Zookeeper.
""" |
results = dict()
for child in self.connection.retry(self.connection.get_children, self.keyspace):
value, _ = self.connection.retry(
self.connection.get,
self.__path_of(child),
watch=self.__increment_last_updated
)
results[child] = self.encoding.decode(value)
return results |
<SYSTEM_TASK:>
If ``key`` is present in Zookeeper, removes it from Zookeeper and
<END_TASK>
<USER_TASK:>
Description:
def _pop(self, key, default=None):
"""
If ``key`` is present in Zookeeper, removes it from Zookeeper and
returns the value. If key is not in Zookeper and ``default`` argument
is provided, ``default`` is returned. If ``default`` argument is not
provided, ``KeyError`` is raised.
:param key: Key to remove from Zookeeper
:type key: string
:param default: Default object to return if ``key`` is not present.
:type default: object
""" |
path = self.__path_of(key)
value = None
try:
# We need to both delete and return the value that was in ZK here.
raw_value, _ = self.connection.retry(self.connection.get, path)
value = self.encoding.decode(raw_value)
except self.no_node_error:
# The node is already gone, so if a default is given, return it,
# otherwise, raise KeyError
if default:
return default
else:
raise KeyError
# Made it this far, it means have a value from the node and it existed
# at least by that point in time
try:
# Try to delete the node
self.connection.retry(self.connection.delete, path)
self.__increment_last_updated()
except self.no_node_error:
# Someone deleted the node in the mean time...how nice!
pass
return value |
<SYSTEM_TASK:>
If ``key`` is not present, set it as ``default`` and return it. If
<END_TASK>
<USER_TASK:>
Description:
def _setdefault(self, key, default=None):
"""
If ``key`` is not present, set it as ``default`` and return it. If
``key`` is present, return its value.
:param key: Key to add to Zookeeper
:type key: string
:param default: Default object to return if ``key`` is present.
:type default: object
Will retry trying to get or create a node based on the "retry" config
from the Kazoo client.
""" |
return self.connection.retry(self.__inner_set_default, key, default) |
<SYSTEM_TASK:>
Tries to return the value at key. If the key does not exist, attempts
<END_TASK>
<USER_TASK:>
Description:
def __inner_set_default(self, key, value):
"""
Tries to return the value at key. If the key does not exist, attempts
to create it with the value. If the node is created in the mean time,
a ``NodeExistsError`` will be raised.
""" |
path = self.__path_of(key)
try:
# Try to get and return the existing node with its data
value, _ = self.connection.retry(self.connection.get, path)
return self.encoding.decode(value)
except self.no_node_error:
# Node does not exist, we have to create it
self.connection.retry(self.connection.create, path, self.encoding.encode(value))
self.__increment_last_updated()
return value |
<SYSTEM_TASK:>
unregistering tree that we are dependent on
<END_TASK>
<USER_TASK:>
Description:
def unregister_dependent_on(self, tree):
"""unregistering tree that we are dependent on""" |
if tree in self.dependent_on:
self.dependent_on.remove(tree) |
<SYSTEM_TASK:>
Used by _get_next_child_node, this method is called to find next possible parent.
<END_TASK>
<USER_TASK:>
Description:
def _get_next_parent_node(self, parent):
""" Used by _get_next_child_node, this method is called to find next possible parent.
For example if timeperiod 2011010200 has all children processed, but is not yet processed itself
then it makes sense to look in 2011010300 for hourly nodes """ |
grandparent = parent.parent
if grandparent is None:
# here, we work at yearly/linear level
return None
parent_siblings = list(grandparent.children)
sorted_keys = sorted(parent_siblings)
index = sorted_keys.index(parent.timeperiod)
if index + 1 >= len(sorted_keys):
return None
else:
return grandparent.children[sorted_keys[index + 1]] |
<SYSTEM_TASK:>
Iterates among children of the given parent and looks for a suitable node to process
<END_TASK>
<USER_TASK:>
Description:
def _get_next_child_node(self, parent):
"""
Iterates among children of the given parent and looks for a suitable node to process
In case given parent has no suitable nodes, a younger parent will be found
and the logic will be repeated for him
""" |
children_keys = list(parent.children)
sorted_keys = sorted(children_keys)
for key in sorted_keys:
node = parent.children[key]
if node.job_record is None:
self.timetable.assign_job_record(node)
return node
elif self.should_skip_tree_node(node):
continue
elif node.job_record.is_active:
return node
# special case, when all children of the parent node are not suitable for processing
new_parent = self._get_next_parent_node(parent)
if new_parent is not None:
# in case all nodes are processed or blocked - look for next valid parent node
return self._get_next_child_node(new_parent)
else:
# if all valid parents are exploited - return current node
process_name = parent.children[sorted_keys[0]].process_name
time_qualifier = parent.children[sorted_keys[0]].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
return self.get_node(process_name, actual_timeperiod) |
<SYSTEM_TASK:>
method builds tree by iterating from the synergy_start_timeperiod to the current time
<END_TASK>
<USER_TASK:>
Description:
def build_tree(self, rebuild=False):
""" method builds tree by iterating from the synergy_start_timeperiod to the current time
and inserting corresponding nodes """ |
time_qualifier = self.process_hierarchy.bottom_process.time_qualifier
process_name = self.process_hierarchy.bottom_process.process_name
if rebuild or self.build_timeperiod is None:
timeperiod = settings.settings['synergy_start_timeperiod']
else:
timeperiod = self.build_timeperiod
timeperiod = cast_to_time_qualifier(time_qualifier, timeperiod)
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
while actual_timeperiod >= timeperiod:
self.get_node(process_name, timeperiod)
timeperiod = time_helper.increment_timeperiod(time_qualifier, timeperiod)
self.build_timeperiod = actual_timeperiod |
<SYSTEM_TASK:>
Updates job record property for a tree node associated with the given Job
<END_TASK>
<USER_TASK:>
Description:
def update_node(self, job_record):
""" Updates job record property for a tree node associated with the given Job """ |
if job_record.process_name not in self.process_hierarchy:
raise ValueError('unable to update the node due to unknown process: {0}'.format(job_record.process_name))
time_qualifier = self.process_hierarchy[job_record.process_name].process_entry.time_qualifier
node = self._get_node(time_qualifier, job_record.timeperiod)
node.job_record = job_record |
<SYSTEM_TASK:>
Method retrieves a tree node identified by the time_qualifier and the timeperiod
<END_TASK>
<USER_TASK:>
Description:
def get_node(self, process_name, timeperiod):
""" Method retrieves a tree node identified by the time_qualifier and the timeperiod """ |
if process_name not in self.process_hierarchy:
raise ValueError('unable to retrieve the node due to unknown process: {0}'.format(process_name))
time_qualifier = self.process_hierarchy[process_name].process_entry.time_qualifier
return self._get_node(time_qualifier, timeperiod) |
<SYSTEM_TASK:>
method starts validation of the tree.
<END_TASK>
<USER_TASK:>
Description:
def validate(self):
""" method starts validation of the tree.
@see TreeNode.validate """ |
for timeperiod, child in self.root.children.items():
child.validate()
self.validation_timestamp = datetime.utcnow() |
<SYSTEM_TASK:>
Return the appropriate initialized exception class for a response.
<END_TASK>
<USER_TASK:>
Description:
def error_for(response):
"""Return the appropriate initialized exception class for a response.""" |
klass = error_classes.get(response.status)
if klass is None:
if 400 <= response.status < 500:
klass = ClientError
if 500 <= response.status < 600:
klass = ServerError # pragma: no cover
return klass(response) |
<SYSTEM_TASK:>
Get a specific device configuration.
<END_TASK>
<USER_TASK:>
Description:
def device_configuration(self, pending=False, use_included=False):
"""Get a specific device configuration.
A device can have at most one loaded and one pending device
configuration. This returns that device_configuration based on
a given flag.
Keyword Args:
pending(bool): Fetch the pending configuration or return
the loaded one.
use_included(bool): Use included resources in this device
configuration.
Returns:
The requested loaded or pending configuration or None if
no device configuration is found.
""" |
device_configs = self.device_configurations(use_included=use_included)
for device_config in device_configs:
if device_config.is_loaded() is not pending:
return device_config
return None |
<SYSTEM_TASK:>
Determine the path to the virtualenv python
<END_TASK>
<USER_TASK:>
Description:
def get_python():
"""Determine the path to the virtualenv python""" |
if sys.platform == 'win32':
python = path.join(VE_ROOT, 'Scripts', 'python.exe')
else:
python = path.join(VE_ROOT, 'bin', 'python')
return python |
<SYSTEM_TASK:>
Install virtual environment for Python 2.7+; removing the old one if it exists
<END_TASK>
<USER_TASK:>
Description:
def install_virtualenv_p2(root, python_version):
""" Install virtual environment for Python 2.7+; removing the old one if it exists """ |
try:
import virtualenv
except ImportError:
sys.stdout.write('Installing virtualenv into global interpreter \n')
ret_code = subprocess.call([VE_GLOBAL_SCRIPT, PROJECT_ROOT])
sys.stdout.write('Installation finished with code {0}. Re-run ./launch.py install \n'.format(ret_code))
sys.exit(ret_code)
if path.exists(root):
shutil.rmtree(root)
virtualenv.logger = virtualenv.Logger(consumers=[])
virtualenv.create_environment(root, site_packages=False)
ret_code = subprocess.call([VE_SCRIPT, PROJECT_ROOT, root, python_version])
sys.exit(ret_code) |
<SYSTEM_TASK:>
Install virtual environment for Python 3.3+; removing the old one if it exists
<END_TASK>
<USER_TASK:>
Description:
def install_virtualenv_p3(root, python_version):
""" Install virtual environment for Python 3.3+; removing the old one if it exists """ |
import venv
builder = venv.EnvBuilder(system_site_packages=False, clear=True, symlinks=False, upgrade=False)
builder.create(root)
ret_code = subprocess.call([VE_SCRIPT, PROJECT_ROOT, root, python_version])
sys.exit(ret_code) |
<SYSTEM_TASK:>
Supervisor-related commands
<END_TASK>
<USER_TASK:>
Description:
def supervisor_command(parser_args):
""" Supervisor-related commands """ |
import logging
from synergy.supervisor.supervisor_configurator import SupervisorConfigurator, set_box_id
if parser_args.boxid:
set_box_id(logging, parser_args.argument)
return
sc = SupervisorConfigurator()
if parser_args.reset:
sc.reset_db()
elif parser_args.start:
sc.mark_for_start(parser_args.argument)
elif parser_args.stop:
sc.mark_for_stop(parser_args.argument)
elif parser_args.query:
sc.query() |
<SYSTEM_TASK:>
Put this decorator before your view to check if the function is coming from an IP on file
<END_TASK>
<USER_TASK:>
Description:
def ip_verification_required(func):
"""
Put this decorator before your view to check if the function is coming from an IP on file
""" |
def wrapper(request, *args, **kwargs):
slug = kwargs.get('slug', "")
if not slug:
return kickoutt_404("Not found.", content_type="application/json")
try:
wip = WriteAPIIP.objects.get(slug=slug)
ip = get_client_ip(request)
if ip not in wip.allowable_ips() and "0.0.0.0" not in wip.allowable_ips():
msg = "The IP %s is not authorized to make the API call." % (
ip)
return kickout_401(msg)
except WriteAPIIP.DoesNotExist:
return HttpResponse(unauthorized_json_response(),
content_type="application/json")
return func(request, *args, **kwargs)
return update_wrapper(wrapper, func) |
<SYSTEM_TASK:>
method builds query dictionary by zipping together DB field names with the field values
<END_TASK>
<USER_TASK:>
Description:
def build_db_query(fields_names, field_values):
""" method builds query dictionary by zipping together DB field names with the field values """ |
if isinstance(field_values, string_types):
field_values = [field_values]
if len(fields_names) != len(field_values):
raise ValueError('Error: unable to build a primary key query due '
'to mismatch in number of fields {0} vs {1}'
.format(len(fields_names), len(field_values)))
query = dict()
for k, v in zip(fields_names, field_values):
query[k] = v
return query |
<SYSTEM_TASK:>
method finds single record base on the given primary key and returns it to the caller
<END_TASK>
<USER_TASK:>
Description:
def get_one(self, key):
""" method finds single record base on the given primary key and returns it to the caller""" |
query = build_db_query(self.primary_key, key)
collection = self.ds.connection(self.collection_name)
document = collection.find_one(query)
if document is None:
raise LookupError('{0} with key {1} was not found'.format(self.model_klass.__name__, query))
return self.model_klass.from_json(document) |
<SYSTEM_TASK:>
method runs query on a specified collection and return a list of filtered Model records
<END_TASK>
<USER_TASK:>
Description:
def run_query(self, query):
""" method runs query on a specified collection and return a list of filtered Model records """ |
collection = self.ds.connection(self.collection_name)
cursor = collection.find(query)
if cursor.count() == 0:
raise LookupError('Collection {0} has no {1} records'
.format(self.collection_name, self.model_klass.__name__))
return [self.model_klass.from_json(entry) for entry in cursor] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.