text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Tell if the current index can be used for the given filter suffix
<END_TASK>
<USER_TASK:>
Description:
def can_handle_suffix(self, suffix):
"""Tell if the current index can be used for the given filter suffix
Parameters
----------
suffix: str
The filter suffix we want to check. Must not includes the "__" part.
Returns
-------
bool
``True`` if the index can handle this suffix, ``False`` otherwise.
""" |
try:
return self.remove_prefix(suffix) in self.handled_suffixes
except IndexError:
return False |
<SYSTEM_TASK:>
Convert the value to be stored.
<END_TASK>
<USER_TASK:>
Description:
def transform_value(self, value):
"""Convert the value to be stored.
This does nothing by default but subclasses can change this.
Then the index will be able to filter on the transformed value.
For example if the transform capitalizes some text, the filter
would be ``myfield__capitalized__eq='FOO'``
""" |
if not self.transform:
return value
try:
# we store a staticmethod but we accept a method taking `self` and `value`
return self.transform(self, value)
except TypeError as e:
if 'argument' in str(e): # try to limit only to arguments error
return self.transform(value) |
<SYSTEM_TASK:>
Remove the class prefix from the suffix
<END_TASK>
<USER_TASK:>
Description:
def remove_prefix(cls, suffix):
""""Remove the class prefix from the suffix
The collection pass the full suffix used in the filters to the index.
But to know if it is valid, we have to remove the prefix to get the
real suffix, for example to check if the suffix is handled by the index.
Parameters
-----------
suffix: str
The full suffix to split
Returns
-------
str or None
The suffix without the prefix. None if the resting suffix is ''
Raises
------
IndexError:
If the suffix doesn't contain the prefix
""" |
if not cls.prefix:
return suffix
if cls.prefix == suffix:
return None
return (suffix or '').split(cls.prefix + '__')[1] or None |
<SYSTEM_TASK:>
Tells if the current index is the one attached to the model field, not instance field
<END_TASK>
<USER_TASK:>
Description:
def attached_to_model(self):
"""Tells if the current index is the one attached to the model field, not instance field""" |
try:
if not bool(self.model):
return False
except AttributeError:
return False
else:
try:
return not bool(self.instance)
except AttributeError:
return True |
<SYSTEM_TASK:>
Restore the index in its previous state
<END_TASK>
<USER_TASK:>
Description:
def _rollback(self):
"""Restore the index in its previous state
This uses values that were indexed/deindexed since the last call
to `_reset_cache`.
This is used when an error is encountered while updating a value,
to return to the previous state
""" |
# to avoid using self set that may be updated during the process
indexed_values = set(self._indexed_values)
deindexed_values = set(self._deindexed_values)
for args in indexed_values:
self.remove(*args)
for args in deindexed_values:
self.add(*args, check_uniqueness=False) |
<SYSTEM_TASK:>
Check uniqueness of pks
<END_TASK>
<USER_TASK:>
Description:
def assert_pks_uniqueness(self, pks, exclude, value):
"""Check uniqueness of pks
Parameters
-----------
pks: iterable
The pks to check for uniqueness. If more than one different,
it will raise. If only one and different than `exclude`, it will
raise too.
exclude: str
The pk that we accept to be the only one in `pks`. For example
the pk of the instance we want to check for uniqueness: we don't
want to raise if the value is the one already set for this instance
value: any
Only to be displayed in the error message.
Raises
------
UniquenessError
- If at least two different pks
- If only one pk that is not the `exclude` one
""" |
pks = list(set(pks))
if len(pks) > 1:
# this may not happen !
raise UniquenessError(
"Multiple values indexed for unique field %s.%s: %s" % (
self.model.__name__, self.field.name, pks
)
)
elif len(pks) == 1 and (not exclude or pks[0] != exclude):
self.connection.delete(self.field.key)
raise UniquenessError(
'Value "%s" already indexed for unique field %s.%s (for instance %s)' % (
self.normalize_value(value), self.model.__name__, self.field.name, pks[0]
)
) |
<SYSTEM_TASK:>
Will deindex all the value for the current field
<END_TASK>
<USER_TASK:>
Description:
def clear(self, chunk_size=1000, aggressive=False):
"""Will deindex all the value for the current field
Parameters
----------
chunk_size: int
Default to 1000, it's the number of instances to load at once if not in aggressive mode.
aggressive: bool
Default to ``False``. When ``False``, the actual collection of instances will
be ran through to deindex all the values.
But when ``True``, the database keys will be scanned to find keys that matches the
pattern of the keys used by the index. This is a lot faster and may find forsgotten keys.
But may also find keys not related to the index.
Should be set to ``True`` if you are not sure about the already indexed values.
Raises
------
AssertionError
If called from an index tied to an instance field. It must be called from the model field
Examples
--------
>>> MyModel.get_field('myfield')._indexes[0].clear()
""" |
assert self.attached_to_model, \
'`clear` can only be called on an index attached to the model field'
if aggressive:
keys = self.get_all_storage_keys()
with self.model.database.pipeline(transaction=False) as pipe:
for key in keys:
pipe.delete(key)
pipe.execute()
else:
start = 0
while True:
instances = self.model.collection().sort().instances(skip_exist_test=True)[start:start + chunk_size]
for instance in instances:
field = instance.get_instance_field(self.field.name)
value = field.proxy_get()
if value is not None:
field.deindex(value, only_index=self)
if len(instances) < chunk_size: # not enough data, it means we are done
break
start += chunk_size |
<SYSTEM_TASK:>
Rebuild the whole index for this field.
<END_TASK>
<USER_TASK:>
Description:
def rebuild(self, chunk_size=1000, aggressive_clear=False):
"""Rebuild the whole index for this field.
Parameters
----------
chunk_size: int
Default to 1000, it's the number of instances to load at once.
aggressive_clear: bool
Will be passed to the `aggressive` argument of the `clear` method.
If `False`, all values will be normally deindexed. If `True`, the work
will be done at low level, scanning for keys that may match the ones used by the index
Examples
--------
>>> MyModel.get_field('myfield')._indexes[0].rebuild()
""" |
assert self.attached_to_model, \
'`rebuild` can only be called on an index attached to the model field'
self.clear(chunk_size=chunk_size, aggressive=aggressive_clear)
start = 0
while True:
instances = self.model.collection().sort().instances(skip_exist_test=True)[start:start + chunk_size]
for instance in instances:
field = instance.get_instance_field(self.field.name)
value = field.proxy_get()
if value is not None:
field.index(value, only_index=self)
if len(instances) < chunk_size: # not enough data, it means we are done
break
start += chunk_size |
<SYSTEM_TASK:>
Prepare the value to be stored in the zset
<END_TASK>
<USER_TASK:>
Description:
def prepare_value_for_storage(self, value, pk):
"""Prepare the value to be stored in the zset
We'll store the value and pk concatenated.
For the parameters, see BaseRangeIndex.prepare_value_for_storage
""" |
value = super(TextRangeIndex, self).prepare_value_for_storage(value, pk)
return self.separator.join([value, str(pk)]) |
<SYSTEM_TASK:>
Taking a string that was a member of the zset, extract the value and pk
<END_TASK>
<USER_TASK:>
Description:
def _extract_value_from_storage(self, string):
"""Taking a string that was a member of the zset, extract the value and pk
Parameters
----------
string: str
The member extracted from the sorted set
Returns
-------
tuple
Tuple with the value and the pk, extracted from the string
""" |
parts = string.split(self.separator)
pk = parts.pop()
return self.separator.join(parts), pk |
<SYSTEM_TASK:>
Compute the boundaries to pass to zrangebylex depending of the filter type
<END_TASK>
<USER_TASK:>
Description:
def get_boundaries(self, filter_type, value):
"""Compute the boundaries to pass to zrangebylex depending of the filter type
The third return value, ``exclude`` is ``None`` except for the filters
`lt` and `gt` because we cannot explicitly exclude it when
querying the sorted-set
For the parameters, see BaseRangeIndex.store
Notes
-----
For zrangebylex:
- `(` means "not included"
- `[` means "included"
- `\xff` is the last char, it allows to say "starting with"
- `-` alone means "from the very beginning"
- `+` alone means "to the very end"
""" |
assert filter_type in self.handled_suffixes
start = '-'
end = '+'
exclude = None
if filter_type in (None, 'eq'):
# we include the separator to only get the members with the exact value
start = u'[%s%s' % (value, self.separator)
end = start.encode('utf-8') + b'\xff'
elif filter_type == 'gt':
# starting at the value, excluded
start = u'(%s' % value
exclude = value
elif filter_type == 'gte':
# starting at the value, included
start = u'[%s' % value
elif filter_type == 'lt':
# ending with the value, excluded
end = u'(%s' % value
exclude = value
elif filter_type == 'lte':
# ending with the value, included (but not starting with, hence the separator)
end = u'[%s%s' % (value, self.separator)
end = end.encode('utf-8') + b'\xff'
elif filter_type == 'startswith':
# using `\xff` to simulate "startswith"
start = u'[%s' % value
end = start.encode('utf-8') + b'\xff'
return start, end, exclude |
<SYSTEM_TASK:>
Compute the boundaries to pass to the sorted-set command depending of the filter type
<END_TASK>
<USER_TASK:>
Description:
def get_boundaries(self, filter_type, value):
"""Compute the boundaries to pass to the sorted-set command depending of the filter type
The third return value, ``exclude`` is always ``None`` because we can easily restrict the
score to filter on in the sorted-set.
For the parameters, see BaseRangeIndex.store
Notes
-----
For zrangebyscore:
- `(` means "not included"
- `-inf` alone means "from the very beginning"
- `+inf` alone means "to the very end"
""" |
assert filter_type in self.handled_suffixes
start = '-inf'
end = '+inf'
exclude = None
if filter_type in (None, 'eq'):
# only one score
start = end = value
elif filter_type == 'gt':
start = '(%s' % value
elif filter_type == 'gte':
start = value
elif filter_type == 'lt':
end = '(%s' % value
elif filter_type == 'lte':
end = value
return start, end, exclude |
<SYSTEM_TASK:>
Print nice greetings.
<END_TASK>
<USER_TASK:>
Description:
def hello_command(name, print_counter=False, repeat=10):
"""Print nice greetings.""" |
for i in range(repeat):
if print_counter:
print i+1,
print 'Hello, %s!' % name |
<SYSTEM_TASK:>
Wrapper to send posted scene command and get response
<END_TASK>
<USER_TASK:>
Description:
def scene_command(self, command):
"""Wrapper to send posted scene command and get response""" |
self.logger.info("scene_command: Group %s Command %s", self.group_id, command)
command_url = self.hub.hub_url + '/0?' + command + self.group_id + "=I=0"
return self.hub.post_direct_command(command_url) |
<SYSTEM_TASK:>
Store all content of the given ListField in a redis set.
<END_TASK>
<USER_TASK:>
Description:
def _list_to_set(self, list_key, set_key):
"""
Store all content of the given ListField in a redis set.
Use scripting if available to avoid retrieving all values locally from
the list before sending them back to the set
""" |
if self.cls.database.support_scripting():
self.cls.database.call_script(
# be sure to use the script dict at the class level
# to avoid registering it many times
script_dict=self.__class__.scripts['list_to_set'],
keys=[list_key, set_key]
)
else:
conn = self.cls.get_connection()
conn.sadd(set_key, *conn.lrange(list_key, 0, -1)) |
<SYSTEM_TASK:>
Effectively retrieve data according to lazy_collection.
<END_TASK>
<USER_TASK:>
Description:
def _collection(self):
"""
Effectively retrieve data according to lazy_collection.
If we have a stored collection, without any result, return an empty list
""" |
old_sort_limits_and_len_mode = None if self._sort_limits is None else self._sort_limits.copy(), self._len_mode
old_sorts = None if self._sort is None else self._sort.copy(),\
None if self._sort_by_sortedset is None else self._sort_by_sortedset.copy()
try:
if self.stored_key and not self._stored_len:
if self._len_mode:
self._len = 0
self._len_mode = False
self._sort_limits = {}
return []
# Manage sort desc added by original `__getitem__` when we sort by score
if self._sort_by_sortedset and self._sort and self._sort.get('desc'):
self._sort = None
self._sort_by_sortedset['desc'] = not self._sort_by_sortedset.get('desc', False)
return super(ExtendedCollectionManager, self)._collection
finally:
self._sort_limits, self._len_mode = old_sort_limits_and_len_mode
self._sort, self._sort_by_sortedset = old_sorts |
<SYSTEM_TASK:>
The original "_prepare_sets" method simple return the list of sets in
<END_TASK>
<USER_TASK:>
Description:
def _prepare_sets(self, sets):
"""
The original "_prepare_sets" method simple return the list of sets in
_lazy_collection, know to be all keys of redis sets.
As the new "intersect" method can accept different types of "set", we
have to handle them because we must return only keys of redis sets.
""" |
if self.stored_key and not self.stored_key_exists():
raise DoesNotExist('This collection is based on a previous one, '
'stored at a key that does not exist anymore.')
conn = self.cls.get_connection()
all_sets = set()
tmp_keys = set()
lists = []
def add_key(key, key_type=None, is_tmp=False):
if not key_type:
key_type = conn.type(key)
if key_type == 'set':
all_sets.add(key)
elif key_type == 'zset':
all_sets.add(key)
self._has_sortedsets = True
elif key_type == 'list':
# if only one list, and no sets, at the end we'll directly use the list
# else lists will be converted to sets
lists.append(key)
elif key_type == 'none':
# considered as an empty set
all_sets.add(key)
else:
raise ValueError('Cannot use redis key %s of type %s for filtering' % (
key, key_type
))
if is_tmp:
tmp_keys.add(key)
for set_ in sets:
if isinstance(set_, str):
add_key(set_)
elif isinstance(set_, ParsedFilter):
value = set_.value
# We have a RedisModel and we'll use its pk, or a RedisField
# (single value) and we'll use its value
if isinstance(value, RedisModel):
value = value.pk.get()
elif isinstance(value, SingleValueField):
value = value.proxy_get()
elif isinstance(value, RedisField):
raise ValueError(u'Invalid filter value for %s: %s' % (set_.index.field.name, value))
for index_key, key_type, is_tmp in set_.index.get_filtered_keys(
set_.suffix,
accepted_key_types=self._accepted_key_types,
*(set_.extra_field_parts + [value])
):
if key_type not in self._accepted_key_types:
raise ValueError('The index key returned by the index %s is not valid' % (
set_.index.__class__.__name__
))
add_key(index_key, key_type, is_tmp)
elif isinstance(set_, SetField):
# Use the set key. If we need to intersect, we'll use
# sunionstore, and if not, store accepts set
add_key(set_.key, 'set')
elif isinstance(set_, SortedSetField):
# Use the sorted set key. If we need to intersect, we'll use
# zinterstore, and if not, store accepts zset
add_key(set_.key, 'zset')
elif isinstance(set_, (ListField, _StoredCollection)):
add_key(set_.key, 'list')
elif isinstance(set_, tuple) and len(set_):
# if we got a list or set, create a redis set to hold its values
tmp_key = self._unique_key()
conn.sadd(tmp_key, *set_)
add_key(tmp_key, 'set', True)
else:
raise ValueError('Invalid filter type')
if lists:
if not len(all_sets) and len(lists) == 1:
# only one list, nothing else, we can return the list key
all_sets = {lists[0]}
else:
# we have many sets/lists, we need to convert them to sets
for list_key in lists:
# many sets, convert the list to a simple redis set
tmp_key = self._unique_key()
self._list_to_set(list_key, tmp_key)
add_key(tmp_key, 'set', True)
return all_sets, tmp_keys |
<SYSTEM_TASK:>
Given a list of set, combine them to create the final set that will be
<END_TASK>
<USER_TASK:>
Description:
def _combine_sets(self, sets, final_set):
"""
Given a list of set, combine them to create the final set that will be
used to make the final redis call.
If we have a least a sorted set, use zinterstore insted of sunionstore
""" |
if self._has_sortedsets:
self.cls.get_connection().zinterstore(final_set, list(sets))
else:
final_set = super(ExtendedCollectionManager, self)._combine_sets(sets, final_set)
return final_set |
<SYSTEM_TASK:>
The final redis call to obtain the values to return from the "final_set"
<END_TASK>
<USER_TASK:>
Description:
def _final_redis_call(self, final_set, sort_options):
"""
The final redis call to obtain the values to return from the "final_set"
with some sort options.
IIf we have at leaset a sorted set and if we don't have any sort
options, call zrange on the final set wich is the result of a call to
zinterstore.
""" |
conn = self.cls.get_connection()
# we have a sorted set without need to sort, use zrange
if self._has_sortedsets and sort_options is None:
return conn.zrange(final_set, 0, -1)
# we have a stored collection, without other filter, and no need to
# sort, use lrange
if self.stored_key and not self._lazy_collection['sets']\
and len(self._lazy_collection['intersects']) == 1\
and (sort_options is None or sort_options == {'by': 'nosort'}):
return conn.lrange(final_set, 0, -1)
# normal call
return super(ExtendedCollectionManager, self)._final_redis_call(
final_set, sort_options) |
<SYSTEM_TASK:>
Return the length of the final collection, directly asking redis for the
<END_TASK>
<USER_TASK:>
Description:
def _collection_length(self, final_set):
"""
Return the length of the final collection, directly asking redis for the
count without calling sort
""" |
conn = self.cls.get_connection()
# we have a sorted set without need to sort, use zcard
if self._has_sortedsets:
return conn.zcard(final_set)
# we have a stored collection, without other filter, use llen
elif self.stored_key and not self._lazy_collection['sets']\
and len(self._lazy_collection['intersects']) == 1:
return conn.llen(final_set)
# normal call
return super(ExtendedCollectionManager, self)._collection_length(final_set) |
<SYSTEM_TASK:>
Create the key to sort on the sorted set references in
<END_TASK>
<USER_TASK:>
Description:
def _prepare_sort_by_score(self, values, sort_options):
"""
Create the key to sort on the sorted set references in
self._sort_by_sortedset and adapte sort options
""" |
# create the keys
base_tmp_key, tmp_keys = self._zset_to_keys(
key=self._sort_by_sortedset['by'],
values=values,
)
# ask to sort on our new keys
sort_options['by'] = '%s:*' % base_tmp_key
# retrieve original sort parameters
for key in ('desc', 'alpha', 'get', 'store'):
if key in self._sort_by_sortedset:
sort_options[key] = self._sort_by_sortedset[key]
# if we want to get the score with values/values_list
if sort_options.get('get'):
try:
pos = sort_options['get'].index(SORTED_SCORE)
except:
pass
else:
sort_options['get'][pos] = '%s:*' % base_tmp_key
return base_tmp_key, tmp_keys |
<SYSTEM_TASK:>
Prepare sort options for _values attributes.
<END_TASK>
<USER_TASK:>
Description:
def _prepare_sort_options(self, has_pk):
"""
Prepare sort options for _values attributes.
If we manager sort by score after getting the result, we do not want to
get values from the first sort call, but only from the last one, after
converting results in zset into keys
""" |
sort_options = super(ExtendedCollectionManager, self)._prepare_sort_options(has_pk)
if self._values:
# if we asked for values, we have to use the redis 'sort'
# command, which is able to return other fields.
if not sort_options:
sort_options = {}
sort_options['get'] = self._values['fields']['keys']
if self._sort_by_sortedset_after:
for key in ('get', 'store'):
if key in self._sort_by_sortedset:
del self._sort_by_sortedset[key]
if sort_options and (not has_pk or self._want_score_value):
for key in ('get', 'store'):
if key in sort_options:
self._sort_by_sortedset[key] = sort_options.pop(key)
if not sort_options:
sort_options = None
return sort_options |
<SYSTEM_TASK:>
Add intersects fo sets and call parent's _get_final_set.
<END_TASK>
<USER_TASK:>
Description:
def _get_final_set(self, sets, pk, sort_options):
"""
Add intersects fo sets and call parent's _get_final_set.
If we have to sort by sorted score, and we have a slice, we have to
convert the whole sorted set to keys now.
""" |
if self._lazy_collection['intersects']:
# if the intersect method was called, we had new sets to intersect
# to the global set of sets.
# And it there is no real filters, we had the set of the whole
# collection because we cannot be sure that entries in "intersects"
# are all real primary keys
sets = sets[::]
sets.extend(self._lazy_collection['intersects'])
if not self._lazy_collection['sets'] and not self.stored_key:
sets.append(self.cls.get_field('pk').collection_key)
final_set, keys_to_delete_later = super(ExtendedCollectionManager,
self)._get_final_set(sets, pk, sort_options)
# if we have a slice and we want to sort by the score of a sorted set,
# as redis sort command doesn't handle this, we have to create keys for
# each value of the sorted set and sort on them
# @antirez, y u don't allow this !!??!!
if final_set and self._sort_by_sortedset_before:
# TODO: if we have filters, maybe apply _zet_to_keys to only
# intersected values
base_tmp_key, tmp_keys = self._prepare_sort_by_score(None, sort_options)
# new keys have to be deleted once the final sort is done
if not keys_to_delete_later:
keys_to_delete_later = []
keys_to_delete_later.append(base_tmp_key)
keys_to_delete_later += tmp_keys
return final_set, keys_to_delete_later |
<SYSTEM_TASK:>
In addition to the normal _add_filters, this one accept RedisField objects
<END_TASK>
<USER_TASK:>
Description:
def _add_filters(self, **filters):
"""
In addition to the normal _add_filters, this one accept RedisField objects
on the right part of a filter. The value will be fetched from redis when
calling the collection.
The filter value can also be a model instance, in which case its PK will
be fetched when calling the collection, too.
""" |
string_filters = filters.copy()
for key, value in filters.items():
is_extended = False
if isinstance(value, RedisField):
# we will fetch the value when running the collection
if (not isinstance(value, SingleValueField)
or getattr(value, '_instance', None) is None):
raise ValueError('If a field is used as a filter value, it '
'must be a simple value field attached to '
'an instance')
is_extended = True
elif isinstance(value, RedisModel):
# we will fetch the PK when running the collection
is_extended = True
if is_extended:
if self._field_is_pk(key):
# create an RawFilter which will be used in _get_pk
raw_filter = RawFilter(key, value)
self._lazy_collection['pks'].add(raw_filter)
else:
# create an ParsedFilter which will be used in _prepare_sets
index, suffix, extra_field_parts = self._parse_filter_key(key)
parsed_filter = ParsedFilter(index, suffix, extra_field_parts, value)
self._lazy_collection['sets'].append(parsed_filter)
string_filters.pop(key)
super(ExtendedCollectionManager, self)._add_filters(**string_filters)
return self |
<SYSTEM_TASK:>
Override the default _get_pk method to retrieve the real pk value if we
<END_TASK>
<USER_TASK:>
Description:
def _get_pk(self):
"""
Override the default _get_pk method to retrieve the real pk value if we
have a SingleValueField or a RedisModel instead of a real PK value
""" |
pk = super(ExtendedCollectionManager, self)._get_pk()
if pk is not None and isinstance(pk, RawFilter):
# We have a RedisModel and we want its pk, or a RedisField
# (single value) and we want its value
if isinstance(pk.value, RedisModel):
pk = pk.value.pk.get()
elif isinstance(pk.value, SingleValueField):
pk = pk.value.proxy_get()
else:
raise ValueError(u'Invalide filter value for a PK: %s' % pk.value)
return pk |
<SYSTEM_TASK:>
Set the current collection as based on a stored one. The key argument
<END_TASK>
<USER_TASK:>
Description:
def from_stored(self, key):
"""
Set the current collection as based on a stored one. The key argument
is the key off the stored collection.
""" |
# only one stored key allowed
if self.stored_key:
raise ValueError('This collection is already based on a stored one')
# prepare the collection
self.stored_key = key
self.intersect(_StoredCollection(self.cls.get_connection(), key))
self.sort(by='nosort') # keep stored order
# count the number of results to manage empty result (to not behave like
# expired key)
self._stored_len = self.cls.get_connection().llen(key)
return self |
<SYSTEM_TASK:>
Ask the collection to return a list of dict of given fields for each
<END_TASK>
<USER_TASK:>
Description:
def values(self, *fields):
"""
Ask the collection to return a list of dict of given fields for each
instance found in the collection.
If no fields are given, all "simple value" fields are used.
""" |
if not fields:
fields = self._get_simple_fields()
fields = self._coerce_fields_parameters(fields)
self._instances = False
self._values = {'fields': fields, 'mode': 'dicts'}
return self |
<SYSTEM_TASK:>
Read iteration of size iterint
<END_TASK>
<USER_TASK:>
Description:
def readiter(d):
""" Read iteration of size iterint
""" |
da = ms.getdata([d['datacol'],'axis_info','u','v','w','flag','data_desc_id'], ifraxis=True)
good = n.where((da['data_desc_id']) == d['spwlist'][0])[0] # take first spw
time0 = da['axis_info']['time_axis']['MJDseconds'][good]
data0 = n.transpose(da[d['datacol']], axes=[3,2,1,0])[good]
flag0 = n.transpose(da['flag'], axes=[3,2,1,0])[good]
u0 = da['u'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8) # uvw are in m, so divide by wavelength of first chan to set in lambda
v0 = da['v'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8)
w0 = da['w'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8)
if len(d['spwlist']) > 1:
for spw in d['spwlist'][1:]:
good = n.where((da['data_desc_id']) == spw)[0]
data1 = n.transpose(da[d['datacol']], axes=[3,2,1,0])[good]
data0 = n.concatenate( (data0, data1), axis=2 )
flag0 = n.concatenate( (flag0, n.transpose(da['flag'], axes=[3,2,1,0])[good]), axis=2 )
del da
data0 = data0[:,:,d['chans'],:] * n.invert(flag0[:,:,d['chans'],:]) # flag==1 means bad data (for vla)
iterstatus = ms.iternext()
return data0.astype('complex64'), u0.astype('float32'), v0.astype('float32'), w0.astype('float32'), time0.astype('float32') |
<SYSTEM_TASK:>
Converts django's `LANGUAGE_CODE` settings to a proper locale code.
<END_TASK>
<USER_TASK:>
Description:
def language_to_locale(language):
"""
Converts django's `LANGUAGE_CODE` settings to a proper locale code.
""" |
tokens = language.split('-')
if len(tokens) == 1:
return tokens[0]
return "%s_%s" % (tokens[0], tokens[1].upper()) |
<SYSTEM_TASK:>
Find input files and log initialization info
<END_TASK>
<USER_TASK:>
Description:
def initializenb():
""" Find input files and log initialization info """ |
logger.info('Working directory: {0}'.format(os.getcwd()))
logger.info('Run on {0}'.format(asctime()))
try:
fileroot = os.environ['fileroot']
logger.info('Setting fileroot to {0} from environment variable.\n'.format(fileroot))
candsfile = 'cands_{0}_merge.pkl'.format(fileroot)
noisefile = 'noise_{0}_merge.pkl'.format(fileroot)
except KeyError:
sdmdir = os.getcwd()
logger.info('Setting sdmdir to current directory {0}\n'.format(os.path.abspath(sdmdir)))
candsfiles = glob.glob('cands_*_merge.pkl')
noisefiles = glob.glob('noise_*_merge.pkl')
if len(candsfiles) == 1 and len(noisefiles) == 1:
logger.info('Found one cands/merge file set')
else:
logger.warn('Found multiple cands/noise file sets. Taking first.')
candsfile = candsfiles[0]
noisefile = noisefiles[0]
fileroot = candsfile.rstrip('_merge.pkl').lstrip('cands_')
logger.info('Set: \n\t candsfile {} \n\t noisefile {} \n\t fileroot {} '.format(candsfile, noisefile, fileroot))
return (candsfile, noisefile, fileroot) |
<SYSTEM_TASK:>
Make a light-weight loc figure
<END_TASK>
<USER_TASK:>
Description:
def plotloc(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight loc figure """ |
fields = ['l1', 'm1', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
l1 = [data['l1'][i] for i in inds]
l1_min = min(l1)
l1_max = max(l1)
m1 = [data['m1'][i] for i in inds]
m1_min = min(m1)
m1_max = max(m1)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='l1 (rad)', y_axis_label='m1 (rad)',
x_range=(l1_min, l1_max), y_range=(m1_min,m1_max), tools=tools, output_backend='webgl')
loc.circle('l1', 'm1', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.cross('l1', 'm1', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.circle('l1', 'm1', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = loc.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}[email protected]'.format(url_path, fileroot)
taptool = loc.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return loc |
<SYSTEM_TASK:>
Make a light-weight stat figure
<END_TASK>
<USER_TASK:>
Description:
def plotstat(data, circleinds=None, crossinds=None, edgeinds=None, url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight stat figure """ |
fields = ['imkur', 'specstd', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
specstd = [data['specstd'][i] for i in inds]
specstd_min = min(specstd)
specstd_max = max(specstd)
imkur = [data['imkur'][i] for i in inds]
imkur_min = min(imkur)
imkur_max = max(imkur)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
stat = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='Spectral std',
y_axis_label='Image kurtosis', x_range=(specstd_min, specstd_max),
y_range=(imkur_min, imkur_max), tools=tools, output_backend='webgl')
stat.circle('specstd', 'imkur', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
stat.cross('specstd', 'imkur', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
stat.circle('specstd', 'imkur', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = stat.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}[email protected]'.format(url_path, fileroot)
taptool = stat.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return stat |
<SYSTEM_TASK:>
Make a light-weight norm figure
<END_TASK>
<USER_TASK:>
Description:
def plotnorm(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight norm figure """ |
fields = ['zs', 'sizes', 'colors', 'abssnr', 'key', 'snrs']
if not circleinds: circleinds = range(len(data['snrs']))
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
norm = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='SNR observed',
y_axis_label='SNR expected', tools=tools, output_backend='webgl')
norm.circle('abssnr', 'zs', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
norm.cross('abssnr', 'zs', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
norm.circle('abssnr', 'zs', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = norm.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}[email protected]'.format(url_path, fileroot)
taptool = norm.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return norm |
<SYSTEM_TASK:>
Make two panel plot to summary noise analysis with estimated flux scale
<END_TASK>
<USER_TASK:>
Description:
def plotnoise(noisepkl, mergepkl, plot_width=950, plot_height=400):
""" Make two panel plot to summary noise analysis with estimated flux scale """ |
d = pickle.load(open(mergepkl))
ndist, imstd, flagfrac = plotnoisedist(noisepkl, plot_width=plot_width/2, plot_height=plot_height)
fluxscale = calcfluxscale(d, imstd, flagfrac)
logger.info('Median image noise is {0:.3} Jy.'.format(fluxscale*imstd))
ncum, imnoise = plotnoisecum(noisepkl, fluxscale=fluxscale, plot_width=plot_width/2, plot_height=plot_height)
hndle = show(Row(ndist, ncum, width=plot_width, height=plot_height))
return imnoise |
<SYSTEM_TASK:>
Merged noise pkl converted to interactive cumulative histogram
<END_TASK>
<USER_TASK:>
Description:
def plotnoisecum(noisepkl, fluxscale=1, plot_width=450, plot_height=400):
""" Merged noise pkl converted to interactive cumulative histogram
noisepkl is standard noise pickle file.
fluxscale is scaling applied by gain calibrator. telcal solutions have fluxscale=1.
also returns corrected imnoise values if non-unity fluxscale provided
""" |
# noise histogram
noises = read_noise(noisepkl)
imnoise = np.sort(fluxscale*noises[4])
frac = [float(count)/len(imnoise) for count in reversed(range(1, len(imnoise)+1))]
noiseplot = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="above",
x_axis_label='Image noise (Jy; cal scaling {0:.3})'.format(fluxscale),
y_axis_label='Cumulative fraction', tools='pan, wheel_zoom, reset')
noiseplot.line(imnoise, frac)
if fluxscale != 1:
return noiseplot, imnoise
else:
return noiseplot |
<SYSTEM_TASK:>
Given state dict and noise properties, estimate flux scale at the VLA
<END_TASK>
<USER_TASK:>
Description:
def calcfluxscale(d, imstd_med, flagfrac_med):
""" Given state dict and noise properties, estimate flux scale at the VLA
imstd and flagfrac are expected to be median (typical) values from sample in merged noise pkl.
""" |
# useful functions and VLA parameters
sensitivity = lambda sefd, dt, bw, eta, nbl, npol: sefd/(eta*np.sqrt(nbl*2 * dt * bw * npol))
nbl = lambda nant: nant*(nant-1)/2
eta = {'L': 0.92, 'S': 0.92, 'C': 0.8, 'X': 0.8} # correlator efficiency
sefd = {'L': 420, 'S': 370, 'C': 310, 'X': 250} # fixed to match exposure calculator int time to 100 microJy.
bw = sum([d['spw_nchan_select'][i]*d['spw_chansize'][i] for i in range(len(d['spw_chansize']))])
dt = d['inttime']
npol = d['npol']
nant = d['nants']
freq = d['freq'][0]
if (freq >= 1 and freq < 2):
band = 'L'
elif (freq >= 2 and freq < 4):
band = 'S'
elif (freq >= 4 and freq < 8):
band = 'C'
elif (freq >= 8 and freq < 12):
band = 'X'
else:
logger.warn('first channel freq ({0}) not in bands L, S, C, or X. Assuming L band.'.format(freq))
band = 'L'
goodfrac = 1 - flagfrac_med # correct for flagged data
slim_theory = sensitivity(sefd[band], dt, bw, eta[band], goodfrac*nbl(nant), npol)
fluxscale = slim_theory/imstd_med
return fluxscale |
<SYSTEM_TASK:>
Find bad time ranges from distribution of candidates.
<END_TASK>
<USER_TASK:>
Description:
def findhight(data, ignoret=None, threshold=20):
""" Find bad time ranges from distribution of candidates.
ignoret is list of tuples [(t0, t1), (t2, t3)] defining ranges to ignore.
threshold is made above std of candidate distribution in time.
Returns the time (in seconds) and counts for bins above threshold.
""" |
time = np.sort(data['time'])
ww = np.ones(len(time), dtype=bool) # initialize pass filter
if ignoret:
for (t0, t1) in ignoret:
ww = ww & np.where( (time < t0) | (time > t1), True, False )
bins = np.round(time[ww]).astype('int')
counts = np.bincount(bins)
high = np.where(counts > np.median(counts) + threshold*counts.std())[0]
return high, counts[high] |
<SYSTEM_TASK:>
Given indices of good times, calculate total time per scan with indices.
<END_TASK>
<USER_TASK:>
Description:
def calcontime(data, inds=None):
""" Given indices of good times, calculate total time per scan with indices. """ |
if not inds:
inds = range(len(data['time']))
logger.info('No indices provided. Assuming all are valid.')
scans = set([data['scan'][i] for i in inds])
total = 0.
for scan in scans:
time = [data['time'][i] for i in inds if data['scan'][i] == scan]
total += max(time) - min(time)
return total |
<SYSTEM_TASK:>
Use set of values to calculate symbol size.
<END_TASK>
<USER_TASK:>
Description:
def calcsize(values, sizerange=(2,70), inds=None, plaw=3):
""" Use set of values to calculate symbol size.
values is a list of floats for candidate significance.
inds is an optional list of indexes to use to calculate symbol size.
Scaling of symbol size min max set by sizerange tuple (min, max).
plaw is powerlaw scaling of symbol size from values
""" |
if inds:
smax = max([abs(values[i]) for i in inds])
smin = min([abs(values[i]) for i in inds])
else:
smax = max([abs(val) for val in values])
smin = min([abs(val) for val in values])
return [sizerange[0] + sizerange[1] * ((abs(val) - smin)/(smax - smin))**plaw for val in values] |
<SYSTEM_TASK:>
Returns color for given l,m
<END_TASK>
<USER_TASK:>
Description:
def colorsat(l,m):
""" Returns color for given l,m
Designed to look like a color wheel that is more saturated in middle.
""" |
lm = np.zeros(len(l), dtype='complex')
lm.real = l
lm.imag = m
red = 0.5*(1+np.cos(np.angle(lm)))
green = 0.5*(1+np.cos(np.angle(lm) + 2*3.14/3))
blue = 0.5*(1+np.cos(np.angle(lm) - 2*3.14/3))
amp = 256*np.abs(lm)/np.abs(lm).max()
return ["#%02x%02x%02x" % (np.floor(amp[i]*red[i]), np.floor(amp[i]*green[i]), np.floor(amp[i]*blue[i])) for i in range(len(l))] |
<SYSTEM_TASK:>
Iteratively filter bad times and set indices for later plotting
<END_TASK>
<USER_TASK:>
Description:
def filterdata(data, plinds, d, threshold, ignorestr):
""" Iteratively filter bad times and set indices for later plotting """ |
logger.info('Ignoring times from ignorestr {0}'.format(ignorestr))
ignoret = parseignoret(ignorestr)
thresh0 = d['sigma_image1']
thresh1 = d['sigma_plot']
plinds['cir'] = calcinds(data, thresh0, ignoret=ignoret) # positive cands
plinds['cro'] = calcinds(data, -1*thresh0, ignoret=ignoret) # negative cands
plinds['edg'] = calcinds(data, thresh1, ignoret=ignoret) # cands with png plots
sortinds = sorted(set(plinds['cir'] + plinds['cro'] + plinds['edg']))
logger.info('Selected {} ({} linked) points.'.format(len(sortinds), len(plinds['edg'])))
logger.info('Estimated total on target time: {} s\n'.format(calcontime(
data, inds=plinds['cir']+plinds['cro']+plinds['edg'])))
# these must get get rescaled when cands are ignored
data['zs'] = normprob(d, data['snrs'], inds=sortinds)
# print high 1s bin counts
logger.info('Finding high 1-second bins with threshold {0}'.format(threshold))
hight, highcount = findhight(data, ignoret=ignoret, threshold=threshold)
if len(hight):
logger.info('High times \t High counts:')
for i in range(len(hight)):
logger.info('{0}\t{1}'.format(hight[i], highcount[i]))
else:
logger.info('No high 1s bin counts.\n')
# print high cands and their times
biginds = np.argsort(data['abssnr'][sortinds])[-5:]
logger.info('Top 5 abs(snr) candidates and times:')
for ind in biginds[::-1]:
logger.info('{0} {1}'.format(data['snrs'][sortinds][ind], data['time'][sortinds][ind]))
logger.info('\n') |
<SYSTEM_TASK:>
Calculates real score probability of prop from an activegit repo.
<END_TASK>
<USER_TASK:>
Description:
def addclassifications(agdir, prop, version=None, statfeats = [0,4,5,6,7,8]):
""" Calculates real score probability of prop from an activegit repo.
version is string name of activegit tag.
Default agdir initialization will have latest tag, so version is optional.
statfeats set to work with alnotebook naming.
""" |
try:
ag = activegit.ActiveGit(agdir)
if version:
ag.set_version(version)
clf = ag.classifier
score = clf.predict_proba((np.nan_to_num(prop[:,statfeats])))[:,1] # take real score
return score
except:
logger.info('Failure when parsing activegit repo or applying classification.\n{0}'.format(sys.exc_info()[0]))
return [] |
<SYSTEM_TASK:>
Fix list of emoticons with a single name
<END_TASK>
<USER_TASK:>
Description:
def cast_to_list(emoticons_list):
"""
Fix list of emoticons with a single name
to a list for easier future iterations,
and cast iterables to list.
""" |
emoticons_tuple = []
for emoticons, image in emoticons_list:
if isinstance(emoticons, basestring):
emoticons = [emoticons]
else:
emoticons = list(emoticons)
emoticons_tuple.append((emoticons, image))
return emoticons_tuple |
<SYSTEM_TASK:>
Compile a new list of emoticon tuples.
<END_TASK>
<USER_TASK:>
Description:
def compile_emoticons(emoticons_list):
"""
Compile a new list of emoticon tuples.
Each tuple contains a compiled regular expression
of the emoticon, and the html version of the emoticon.
""" |
emoticons_compiled = []
for emoticons, image in emoticons_list:
for emoticon in emoticons:
context = {
'name': emoticon,
'image': os.path.join(EMOTICONS_DIRECTORY, image),
'code': binascii.hexlify(emoticon.encode('utf-8'))}
emoticons_compiled.append(
(re.compile(re.escape(emoticon)),
EMOTICON_TEMPLATE.render(context).strip())
)
return emoticons_compiled |
<SYSTEM_TASK:>
Simple parse and return metadata for pipeline for first scan
<END_TASK>
<USER_TASK:>
Description:
def read(filename, paramfile, bdfdir, scan):
""" Simple parse and return metadata for pipeline for first scan """ |
filename = os.path.abspath(filename)
scans = ps.read_scans(filename, bdfdir=bdfdir)
logger.info('Scans, Target names:')
logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans]))
logger.info('Example pipeline:')
state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) |
<SYSTEM_TASK:>
Searches one scan of filename
<END_TASK>
<USER_TASK:>
Description:
def searchone(filename, scan, paramfile, logfile, bdfdir):
""" Searches one scan of filename
filename is name of local sdm ('filename.GN' expected locally).
scan is scan number to search. if none provided, script prints all.
assumes filename is an sdm.
""" |
filename = os.path.abspath(filename)
scans = ps.read_scans(filename, bdfdir=bdfdir)
if scan != 0:
d = rt.set_pipeline(filename, scan, paramfile=paramfile,
fileroot=os.path.basename(filename), logfile=logfile)
rt.pipeline(d, range(d['nsegments']))
# clean up and merge files
pc.merge_segments(filename, scan)
pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys())
else:
logger.info('Scans, Target names:')
logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans]))
logger.info('Example pipeline:')
state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile,
fileroot=os.path.basename(filename), logfile=logfile) |
<SYSTEM_TASK:>
Compile the baseinteract.ipynb notebook into an analysis notebook for filename
<END_TASK>
<USER_TASK:>
Description:
def nbcompile(filename, html, basenb, agdir):
""" Compile the baseinteract.ipynb notebook into an analysis notebook for filename """ |
filename = os.path.abspath(filename)
pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) |
<SYSTEM_TASK:>
When the instance is deleted, we propagate the deletion to the related
<END_TASK>
<USER_TASK:>
Description:
def delete(self):
"""
When the instance is deleted, we propagate the deletion to the related
collections, which will remove it from the related fields.
""" |
for related_collection_name in self.related_collections:
related_collection = getattr(self, related_collection_name)
related_collection.remove_instance()
return super(RelatedModel, self).delete() |
<SYSTEM_TASK:>
When we have a model, save the relation in the database, to later create
<END_TASK>
<USER_TASK:>
Description:
def _attach_to_model(self, model):
"""
When we have a model, save the relation in the database, to later create
RelatedCollection objects in the related model
""" |
super(RelatedFieldMixin, self)._attach_to_model(model)
if model.abstract:
# do not manage the relation if it's an abstract model
return
# now, check related_name and save the relation in the database
# get related parameters to identify the relation
self.related_name = self._get_related_name()
self.related_to = self._get_related_model_name()
# create entry for the model in the _relations list of the database
if not hasattr(self.database, '_relations'):
self.database._relations = {}
self.database._relations.setdefault(self.related_to, [])
# check unicity of related name for related model
self._assert_relation_does_not_exists()
# the relation didn't exists, we can save it
relation = (self._model._name, self.name, self.related_name)
self.database._relations[self.related_to].append(relation) |
<SYSTEM_TASK:>
Check if a relation with the current related_name doesn't already exists
<END_TASK>
<USER_TASK:>
Description:
def _assert_relation_does_not_exists(self):
"""
Check if a relation with the current related_name doesn't already exists
for the related model
""" |
relations = self.database._relations[self.related_to]
existing = [r for r in relations if r[2] == self.related_name]
if existing:
error = ("The related name defined for the field '%s.%s', named '%s', already exists "
"on the model '%s' (tied to the field '%s.%s')")
raise ImplementationError(error % (self._model.__name__, self.name, self.related_name,
self.related_to, existing[0][1], existing[0][0])) |
<SYSTEM_TASK:>
Provide the ability to pass a RedisModel instances or a FK field as
<END_TASK>
<USER_TASK:>
Description:
def from_python(self, value):
"""
Provide the ability to pass a RedisModel instances or a FK field as
value instead of passing the PK. The value will then be translated in
the real PK.
""" |
if isinstance(value, model.RedisModel):
value = value._pk
elif isinstance(value, SimpleValueRelatedFieldMixin):
value = value.proxy_get()
return value |
<SYSTEM_TASK:>
Returns the instance of the related object linked by the field.
<END_TASK>
<USER_TASK:>
Description:
def instance(self, skip_exist_test=False):
"""
Returns the instance of the related object linked by the field.
""" |
model = self.database._models[self.related_to]
meth = model.lazy_connect if skip_exist_test else model
return meth(self.proxy_get()) |
<SYSTEM_TASK:>
Convenience method for executing the callable `func` as a transaction
<END_TASK>
<USER_TASK:>
Description:
def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single arguement which is a Pipeline object.
""" |
with self.pipeline(True, share_in_threads=kwargs.get('share_in_threads', False)) as pipe:
while 1:
try:
if watches:
pipe.watch(*watches)
func(pipe)
return pipe.execute()
except WatchError:
continue |
<SYSTEM_TASK:>
If we have a pipeline, shared in thread, or in the good thread, return it
<END_TASK>
<USER_TASK:>
Description:
def _connection(self):
"""
If we have a pipeline, shared in thread, or in the good thread, return it
else use the direct connection
""" |
if self._pipelined_connection is not None:
if self._pipelined_connection.share_in_threads or \
threading.current_thread().ident == self._pipelined_connection.current_thread_id:
return self._pipelined_connection
return self._direct_connection |
<SYSTEM_TASK:>
If the value is a pipeline, save it as the connection to use for pipelines if to be shared
<END_TASK>
<USER_TASK:>
Description:
def _connection(self, value):
"""
If the value is a pipeline, save it as the connection to use for pipelines if to be shared
in threads or goot thread. Do not remove the direct connection.
If it is not a pipeline, clear it, and set the direct connection again.
""" |
if isinstance(value, _Pipeline):
self._pipelined_connection = value
else:
self._direct_connection = value
self._pipelined_connection = None |
<SYSTEM_TASK:>
Override the default watch method to allow the user to pass RedisField
<END_TASK>
<USER_TASK:>
Description:
def watch(self, *names):
"""
Override the default watch method to allow the user to pass RedisField
objects as names, which will be translated to their real keys and passed
to the default watch method
""" |
watches = []
for watch in names:
if isinstance(watch, RedisField):
watch = watch.key
watches.append(watch)
return super(_Pipeline, self).watch(*watches) |
<SYSTEM_TASK:>
Reads candidate file and returns data as python object.
<END_TASK>
<USER_TASK:>
Description:
def read_candidates(candsfile, snrmin=0, snrmax=999, returnstate=False):
""" Reads candidate file and returns data as python object.
candsfile is pkl file (for now) with (1) state dict and (2) cands object.
cands object can either be a dictionary or tuple of two numpy arrays.
Return tuple of two numpy arrays (location, properties).
returned values can be filtered by snrmin and snrmax (on absolute value).
returnstate will instead return (loc, prop, state).
""" |
# read in pickle file of candidates
assert os.path.exists(candsfile)
try:
with open(candsfile, 'rb') as pkl:
d = pickle.load(pkl)
cands = pickle.load(pkl)
except IOError:
logger.error('Trouble parsing candsfile')
loc = np.array([])
prop = np.array([])
if returnstate:
return (loc, prop, d)
else:
return (loc, prop)
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2')
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1')
# old style. here for backwards compatibility
if isinstance(cands, dict):
loc = []; prop = []
for kk in sorted(cands.keys()):
if ((np.abs(cands[kk][snrcol]) > snrmin) and (np.abs(cands[kk][snrcol]) < snrmax)):
loc.append( list(kk) )
prop.append( list(cands[kk]) )
loc = np.array(loc)
prop = np.array(prop)
# new style
elif isinstance(cands, tuple):
loc, prop = cands
assert isinstance(loc, np.ndarray) and isinstance(prop, np.ndarray), 'if cands object is tuple, contents must be two ndarrays'
if not len(loc):
if returnstate:
return (loc, prop, d)
else:
return (loc, prop)
snrsel = np.where( (np.abs(prop[:, snrcol]) > snrmin) & (np.abs(prop[:, snrcol]) < snrmax) )
loc = loc[snrsel]
prop = prop[snrsel]
else:
logger.error('Cands object (in cands file) must be dict or tuple(np.array, np.array).')
# if segment or scan pkl, insert scan number as first col and modify d
if 'scan' not in d['featureind']:
scanarr = d['scan'] * np.ones(len(loc), dtype=int)
loc = np.concatenate( (scanarr[:,None], loc), axis=1)
d['featureind'].insert(0, 'scan')
logger.info('Read %d candidates from %s.' % (len(loc), candsfile))
if returnstate:
return loc, prop, d
else:
return loc, prop |
<SYSTEM_TASK:>
Function to read a noise file and parse columns.
<END_TASK>
<USER_TASK:>
Description:
def read_noise(noisefile):
""" Function to read a noise file and parse columns.
Works with both per-scan and merged noise files.
""" |
noises = pickle.load(open(noisefile, 'r'))
scan = []; seg = []; noiseperbl = []; flagfrac = []; imnoise = []
if len(noises[0]) == 4:
for noise in noises:
seg.append(noise[0]); noiseperbl.append(noise[1])
flagfrac.append(noise[2]); imnoise.append(noise[3])
return (np.array(seg), np.array(noiseperbl), np.array(flagfrac), np.array(imnoise))
elif len(noises[0]) == 5:
for noise in noises:
scan.append(noise[0])
seg.append(noise[1]); noiseperbl.append(noise[2])
flagfrac.append(noise[3]); imnoise.append(noise[4])
return (np.array(scan), np.array(seg), np.array(noiseperbl), np.array(flagfrac), np.array(imnoise))
else:
logger.warn('structure of noise file not understood. first entry should be length 4 of 5.') |
<SYSTEM_TASK:>
Merge noise files from multiple segments.
<END_TASK>
<USER_TASK:>
Description:
def merge_noises(pkllist, outroot=''):
""" Merge noise files from multiple segments.
Output noise file has scan number at start of each entry.
""" |
assert isinstance(pkllist, list), "pkllist must be list of file names"
if not outroot:
outroot = '_'.join(pkllist[0].split('_')[1:-1])
workdir = os.path.dirname(pkllist[0])
mergepkl = os.path.join(workdir, 'noise_' + outroot + '_merge.pkl')
pkllist = [pkllist[i] for i in range(len(pkllist)) if ('merge' not in pkllist[i]) and ('seg' not in pkllist[i])] # filter list down to per-scan noise pkls
pkllist.sort(key=lambda i: int(i.rstrip('.pkl').split('_sc')[1])) # sort by scan assuming filename structure
scans = [int(ff.rstrip('.pkl').split('_sc')[1]) for ff in pkllist]
logger.info('Aggregating noise from scans %s' % scans)
allnoise = []
for pklfile in pkllist:
scan = int(pklfile.rstrip('.pkl').split('_sc')[1]) # parsing filename to get scan number
with open(pklfile, 'r') as pkl:
noises = pickle.load(pkl) # gets all noises for segment as list
allnoise += [[scan] + list(noise) for noise in noises] # prepend scan number
# write noise to single file
if os.path.exists(mergepkl):
logger.info('Overwriting merged noise file %s' % mergepkl)
os.remove(mergepkl)
else:
logger.info('Writing merged noise file %s' % mergepkl)
with open(mergepkl, 'w') as pkl:
pickle.dump(allnoise, pkl, protocol=2) |
<SYSTEM_TASK:>
Split features from one candsfile into two new candsfiles
<END_TASK>
<USER_TASK:>
Description:
def split_candidates(candsfile, featind1, featind2, candsfile1, candsfile2):
""" Split features from one candsfile into two new candsfiles
featind1/2 is list of indices to take from d['features'].
New features and updated state dict go to candsfile1/2.
""" |
with open(candsfile, 'rb') as pkl:
d = pickle.load(pkl)
cands = pickle.load(pkl)
features = d['features']
d1 = d.copy()
d2 = d.copy()
d1['features'] = [features[i] for i in featind1]
d2['features'] = [features[i] for i in featind2]
cands1 = {}
cands2 = {}
for key in cands:
cands1[key] = tuple([cands[key][i] for i in featind1])
cands2[key] = tuple([cands[key][i] for i in featind2])
with open(candsfile1, 'w') as pkl:
pickle.dump(d1, pkl, protocol=2)
pickle.dump(cands1, pkl, protocol=2)
with open(candsfile2, 'w') as pkl:
pickle.dump(d2, pkl, protocol=2)
pickle.dump(cands2, pkl, protocol=2) |
<SYSTEM_TASK:>
Run analysis pipeline from jupyter base notebook and save as notebook and html.
<END_TASK>
<USER_TASK:>
Description:
def nbcompile(workdir, fileroot, html=True, basenb='', agdir=''):
""" Run analysis pipeline from jupyter base notebook and save as notebook and html.
html will also compile static html version
basenb can be provided, else will get distributed version.
agdir is the activegit repo (optional)
""" |
from shutil import copy
from rtpipe import get_notebook
from subprocess import call
os.environ['fileroot'] = fileroot
if agdir:
os.environ['agdir'] = agdir
if not basenb:
basenb = get_notebook('baseinteract.ipynb')
logger.info('Moving to {0} and building notebook for {1}'.format(workdir, fileroot))
os.chdir(workdir)
copy(basenb, '{0}/{1}.ipynb'.format(workdir, fileroot))
cmd = 'jupyter nbconvert {0}.ipynb --inplace --execute --to notebook --allow-errors --ExecutePreprocessor.timeout=3600'.format(fileroot).split(' ')
status = call(cmd)
cmd = 'jupyter trust {0}.ipynb'.format(fileroot).split(' ')
status = call(cmd)
if html:
cmd = 'jupyter nbconvert {0}.ipynb --to html --output {0}.html'.format(fileroot).split(' ')
status = call(cmd) |
<SYSTEM_TASK:>
Returns list of significant candidate loc in candsfile.
<END_TASK>
<USER_TASK:>
Description:
def thresholdcands(candsfile, threshold, numberperscan=1):
""" Returns list of significant candidate loc in candsfile.
Can define threshold and maximum number of locs per scan.
Works on merge or per-scan cands pkls.
""" |
# read metadata and define columns of interest
loc, prop, d = read_candidates(candsfile, returnstate=True)
try:
scancol = d['featureind'].index('scan')
except ValueError:
scancol = -1
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2')
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1')
snrs = [prop[i][snrcol] for i in range(len(prop)) if prop[i][snrcol] > threshold]
# calculate unique list of locs of interest
siglocs = [list(loc[i]) for i in range(len(prop)) if prop[i][snrcol] > threshold]
siglocssort = sorted(zip([list(ll) for ll in siglocs], snrs), key=lambda stuff: stuff[1], reverse=True)
if scancol >= 0:
scanset = list(set([siglocs[i][scancol] for i in range(len(siglocs))]))
candlist= []
for scan in scanset:
logger.debug('looking in scan %d' % scan)
count = 0
for sigloc in siglocssort:
if sigloc[0][scancol] == scan:
logger.debug('adding sigloc %s' % str(sigloc))
candlist.append(sigloc)
count += 1
if count >= numberperscan:
break
else:
candlist = siglocssort[:numberperscan]
logger.debug('Returning %d cands above threshold %.1f' % (len(candlist), threshold))
return [loc for loc,snr in candlist] |
<SYSTEM_TASK:>
Function to convert segment+integration into mjd seconds.
<END_TASK>
<USER_TASK:>
Description:
def int2mjd(d, loc):
""" Function to convert segment+integration into mjd seconds.
""" |
# needs to take merge pkl dict
if len(loc):
intcol = d['featureind'].index('int')
segmentcol = d['featureind'].index('segment')
if d.has_key('segmenttimesdict'): # using merged pkl
scancol = d['featureind'].index('scan')
t0 = np.array([d['segmenttimesdict'][loc[i,scancol]][loc[i,segmentcol],0] for i in range(len(loc))])
else:
t0 = d['segmenttimes'][loc[:,segmentcol]][:,0]
return (t0 + (d['inttime']/(24*3600.))*loc[:,intcol]) * 24*3600
else:
return np.array([]) |
<SYSTEM_TASK:>
Plot 'full' features, such as cutout image and spectrum.
<END_TASK>
<USER_TASK:>
Description:
def plot_full(candsfile, cands, mode='im'):
""" Plot 'full' features, such as cutout image and spectrum.
""" |
loc, prop, d = read_candidates(candsfile, returnstate=True)
npixx, npixy = prop[0][4].shape
nints, nchan, npol = prop[0][5].shape
bin = 10
plt.figure(1)
for i in cands:
if mode == 'spec':
rr = np.array([np.abs(prop[i][5][:,i0:i0+bin,0].mean(axis=1)) for i0 in range(0,nchan,bin)])
ll = np.array([np.abs(prop[i][5][:,i0:i0+bin,1].mean(axis=1)) for i0 in range(0,nchan,bin)])
sh = ll.shape
data = np.concatenate( (rr, np.zeros(shape=(sh[0], sh[1]/2)), ll), axis=1)
elif mode == 'im':
data = prop[i][4]
plt.subplot(np.sqrt(len(cands)), np.sqrt(len(cands)), cands.index(i))
plt.imshow(data, interpolation='nearest')
plt.show() |
<SYSTEM_TASK:>
Associates mock cands with detections in candsfile by integration.
<END_TASK>
<USER_TASK:>
Description:
def mock_fluxratio(candsfile, mockcandsfile, dmbin=0):
""" Associates mock cands with detections in candsfile by integration.
Returns ratio of detected to expected flux for all associations.
""" |
loc, prop = read_candidates(candsfile)
loc2, prop2 = read_candidates(mockcandsfile)
dmselect = np.where(loc[:,2] == dmbin)[0]
mocki = [i for i in loc2[:,1].astype(int)] # known transients
rat = []; newloc = []; newprop = []
for i in mocki:
try:
detind = list(loc[dmselect,1]).index(i) # try to find detection
rat.append(prop[dmselect][detind][1]/prop2[mocki.index(i)][1])
newloc.append(list(loc2[mocki.index(i)]))
newprop.append(list(prop2[mocki.index(i)]))
except ValueError:
pass
return rat, np.array(newloc), newprop |
<SYSTEM_TASK:>
Calculate median absolute deviation of data array
<END_TASK>
<USER_TASK:>
Description:
def flag_calcmad(data):
""" Calculate median absolute deviation of data array """ |
absdev = np.abs(data - np.median(data))
return np.median(absdev) |
<SYSTEM_TASK:>
Convert numeric brightness percentage into hex for insteon
<END_TASK>
<USER_TASK:>
Description:
def brightness_to_hex(self, level):
"""Convert numeric brightness percentage into hex for insteon""" |
level_int = int(level)
new_int = int((level_int * 255)/100)
new_level = format(new_int, '02X')
self.logger.debug("brightness_to_hex: %s to %s", level, str(new_level))
return str(new_level) |
<SYSTEM_TASK:>
Send raw command via post
<END_TASK>
<USER_TASK:>
Description:
def post_direct_command(self, command_url):
"""Send raw command via post""" |
self.logger.info("post_direct_command: %s", command_url)
req = requests.post(command_url, timeout=self.timeout,
auth=requests.auth.HTTPBasicAuth(self.username,
self.password))
self.http_code = req.status_code
req.raise_for_status()
return req |
<SYSTEM_TASK:>
Send raw command via get
<END_TASK>
<USER_TASK:>
Description:
def get_direct_command(self, command_url):
"""Send raw command via get""" |
self.logger.info("get_direct_command: %s", command_url)
req = requests.get(command_url, timeout=self.timeout,
auth=requests.auth.HTTPBasicAuth(self.username,
self.password))
self.http_code = req.status_code
req.raise_for_status()
return req |
<SYSTEM_TASK:>
Send direct hub command
<END_TASK>
<USER_TASK:>
Description:
def direct_command_hub(self, command):
"""Send direct hub command""" |
self.logger.info("direct_command_hub: Command %s", command)
command_url = (self.hub_url + '/3?' + command + "=I=3")
return self.post_direct_command(command_url) |
<SYSTEM_TASK:>
Return the device category and name given the category id
<END_TASK>
<USER_TASK:>
Description:
def get_device_category(self, cat):
"""Return the device category and name given the category id""" |
if cat in self.device_categories:
return self.device_categories[cat]
else:
return False |
<SYSTEM_TASK:>
Check if last command succeeded by checking buffer
<END_TASK>
<USER_TASK:>
Description:
def check_success(self, device_id, sent_cmd1, sent_cmd2):
"""Check if last command succeeded by checking buffer""" |
device_id = device_id.upper()
self.logger.info('check_success: for device %s cmd1 %s cmd2 %s',
device_id, sent_cmd1, sent_cmd2)
sleep(2)
status = self.get_buffer_status(device_id)
check_id = status.get('id_from', '')
cmd1 = status.get('cmd1', '')
cmd2 = status.get('cmd2', '')
if (check_id == device_id) and (cmd1 == sent_cmd1) and (cmd2 == sent_cmd2):
self.logger.info("check_success: Response device %s cmd %s cmd2 %s SUCCESS",
check_id, cmd1, cmd2)
return True
self.logger.info("check_success: No valid response found for device %s cmd %s cmd2 %s",
device_id, sent_cmd1, sent_cmd2)
return False |
<SYSTEM_TASK:>
Connect to redis and cache the new connection
<END_TASK>
<USER_TASK:>
Description:
def connect(self, **settings):
"""
Connect to redis and cache the new connection
""" |
# compute a unique key for this settings, for caching. Work on the whole
# dict without directly using known keys to allow the use of unix socket
# connection or any other (future ?) way to connect to redis
if not settings:
settings = self.connection_settings
connection_key = ':'.join([str(settings[k]) for k in sorted(settings)])
if connection_key not in self._connections:
self._connections[connection_key] = redis.StrictRedis(
decode_responses=True, **settings)
return self._connections[connection_key] |
<SYSTEM_TASK:>
Call a redis script with keys and args
<END_TASK>
<USER_TASK:>
Description:
def call_script(self, script_dict, keys=None, args=None):
"""Call a redis script with keys and args
The first time we call a script, we register it to speed up later calls.
We expect a dict with a ``lua`` key having the script, and the dict will be
updated with a ``script_object`` key, with the content returned by the
the redis-py ``register_script`` command.
Parameters
----------
script_dict: dict
A dict with a ``lua`` entry containing the lua code. A new key, ``script_object``
will be added after that.
keys: list of str
List of the keys that will be read/updated by the lua script
args: list of str
List of all the args expected by the script.
Returns
-------
Anything that will be returned by the script
""" |
if keys is None:
keys = []
if args is None:
args = []
if 'script_object' not in script_dict:
script_dict['script_object'] = self.connection.register_script(script_dict['lua'])
return script_dict['script_object'](keys=keys, args=args, client=self.connection) |
<SYSTEM_TASK:>
Take a pattern expected by the redis `scan` command and iter on all matching keys
<END_TASK>
<USER_TASK:>
Description:
def scan_keys(self, match=None, count=None):
"""Take a pattern expected by the redis `scan` command and iter on all matching keys
Parameters
----------
match: str
The pattern of keys to look for
count: int, default to None (redis uses 10)
Hint for redis about the number of expected result
Yields
-------
str
All keys found by the scan, one by one. A key can be returned multiple times, it's
related to the way the SCAN command works in redis.
""" |
cursor = 0
while True:
cursor, keys = self.connection.scan(cursor, match=match, count=count)
for key in keys:
yield key
if not cursor or cursor == '0': # string for redis.py < 2.10
break |
<SYSTEM_TASK:>
Given a set of coefficients,
<END_TASK>
<USER_TASK:>
Description:
def calcChebyshev(coeffs, validDomain, freqs):
"""
Given a set of coefficients,
this method evaluates a Chebyshev approximation.
Used for CASA bandpass reading.
input coeffs and freqs are numpy arrays
""" |
logger = logging.getLogger(__name__)
domain = (validDomain[1] - validDomain[0])[0]
bins = -1 + 2* n.array([ (freqs[i]-validDomain[0,i])/domain for i in range(len(freqs))])
ncoeffs = len(coeffs[0])/2
rr = n.array([n.polynomial.chebyshev.chebval(bins[i], coeffs[i,:ncoeffs]) for i in range(len(coeffs))])
ll = n.array([n.polynomial.chebyshev.chebval(bins[i], coeffs[i,ncoeffs:]) for i in range(len(coeffs))])
return rr,ll |
<SYSTEM_TASK:>
Calculates antennas to flag, based on bad gain and bp solutions.
<END_TASK>
<USER_TASK:>
Description:
def calc_flag(self, sig=3.0):
""" Calculates antennas to flag, based on bad gain and bp solutions.
""" |
if len(self.gain.shape) == 4:
gamp = n.abs(self.gain).mean(axis=0) # mean gain amp for each ant over time
elif len(self.gain.shape) == 3:
gamp = n.abs(self.gain) # gain amp for selected time
# badgain = n.where(gamp < gamp.mean() - sig*gamp.std())
badgain = n.where( (gamp < gamp.mean() - sig*gamp.std()) | gamp.mask)
self.logger.info('Flagging low/bad gains for ant/spw/pol: %s %s %s' % (str(self.antnum[badgain[0]]), str(badgain[1]), str(badgain[2])))
badants = badgain
return badants |
<SYSTEM_TASK:>
Quick visualization of calibration solution.
<END_TASK>
<USER_TASK:>
Description:
def plot(self):
""" Quick visualization of calibration solution.
""" |
import pylab as p
p.clf()
fig = p.figure(1)
nspw = len(self.gain[0])
ext = n.ceil(n.sqrt(nspw)) # find best squre plot (simplest)
for spw in range(len(self.gain[0])):
ax = fig.add_subplot(ext, ext, spw+1)
for pol in [0,1]:
ax.scatter(range(len(self.gain)), n.abs(self.gain.data[:,spw,pol]), color=n.array(['k','y']).take(self.gain.mask[:,spw,pol]), marker=['x','.'][pol])
fig.show() |
<SYSTEM_TASK:>
Flags solutions with amplitude more than threshold larger than median.
<END_TASK>
<USER_TASK:>
Description:
def flagants(self, threshold=50):
""" Flags solutions with amplitude more than threshold larger than median.
""" |
# identify very low gain amps not already flagged
badsols = n.where( (n.median(self.amp)/self.amp > threshold) & (self.flagged == False))[0]
if len(badsols):
self.logger.info('Solutions %s flagged (times %s, ants %s, freqs %s) for low gain amplitude.' % (str(badsols), self.mjd[badsols], self.antname[badsols], self.ifid[badsols]))
for sol in badsols:
self.flagged[sol] = True |
<SYSTEM_TASK:>
Write data to the ring buffer.
<END_TASK>
<USER_TASK:>
Description:
def write(self, data, size=-1):
"""Write data to the ring buffer.
This advances the write index after writing;
calling :meth:`advance_write_index` is *not* necessary.
:param data: Data to write to the buffer.
:type data: CData pointer or buffer or bytes
:param size: The number of elements to be written.
:type size: int, optional
:returns: The number of elements written.
:rtype: int
""" |
try:
data = self._ffi.from_buffer(data)
except TypeError:
pass # input is not a buffer
if size < 0:
size, rest = divmod(self._ffi.sizeof(data), self.elementsize)
if rest:
raise ValueError('data size must be multiple of elementsize')
return self._lib.PaUtil_WriteRingBuffer(self._ptr, data, size) |
<SYSTEM_TASK:>
Read data from the ring buffer into a new buffer.
<END_TASK>
<USER_TASK:>
Description:
def read(self, size=-1):
"""Read data from the ring buffer into a new buffer.
This advances the read index after reading;
calling :meth:`advance_read_index` is *not* necessary.
:param size: The number of elements to be read.
If not specified, all available elements are read.
:type size: int, optional
:returns: A new buffer containing the read data.
Its size may be less than the requested *size*.
:rtype: buffer
""" |
if size < 0:
size = self.read_available
data = self._ffi.new('unsigned char[]', size * self.elementsize)
size = self.readinto(data)
return self._ffi.buffer(data, size * self.elementsize) |
<SYSTEM_TASK:>
Read data from the ring buffer into a user-provided buffer.
<END_TASK>
<USER_TASK:>
Description:
def readinto(self, data):
"""Read data from the ring buffer into a user-provided buffer.
This advances the read index after reading;
calling :meth:`advance_read_index` is *not* necessary.
:param data: The memory where the data should be stored.
:type data: CData pointer or buffer
:returns: The number of elements read, which may be less than
the size of *data*.
:rtype: int
""" |
try:
data = self._ffi.from_buffer(data)
except TypeError:
pass # input is not a buffer
size, rest = divmod(self._ffi.sizeof(data), self.elementsize)
if rest:
raise ValueError('data size must be multiple of elementsize')
return self._lib.PaUtil_ReadRingBuffer(self._ptr, data, size) |
<SYSTEM_TASK:>
close all open connections
<END_TASK>
<USER_TASK:>
Description:
def close_all(self):
"""close all open connections""" |
for host, conns in self._cm.get_all().items():
for h in conns:
self._cm.remove(h)
h.close() |
<SYSTEM_TASK:>
Uses sdmpy to read a given range of integrations from sdm of given scan.
<END_TASK>
<USER_TASK:>
Description:
def read_bdf(sdmfile, scannum, nskip=0, readints=0, bdfdir=''):
""" Uses sdmpy to read a given range of integrations from sdm of given scan.
readints=0 will read all of bdf (skipping nskip).
""" |
assert os.path.exists(sdmfile), 'sdmfile %s does not exist' % sdmfile
sdm = getsdm(sdmfile, bdfdir=bdfdir)
scan = sdm.scan(scannum)
assert scan.bdf.fname, 'bdfstr not defined for scan %d' % scannum
if readints == 0:
readints = scan.bdf.numIntegration - nskip
logger.info('Reading %d ints starting at int %d' % (readints, nskip))
npols = len(sdmpy.scan.sdmarray(sdm['Polarization'][0].corrType))
data = np.empty( (readints, scan.bdf.numBaseline, sum(scan.numchans), npols), dtype='complex64', order='C')
data[:] = scan.bdf.get_data(trange=[nskip, nskip+readints]).reshape(data.shape)
return data |
<SYSTEM_TASK:>
Calculates uvw for each baseline at mid time of a given segment.
<END_TASK>
<USER_TASK:>
Description:
def get_uvw_segment(d, segment=-1):
""" Calculates uvw for each baseline at mid time of a given segment.
d defines pipeline state. assumes segmenttimes defined by RT.set_pipeline.
""" |
# define times to read
if segment != -1:
assert 'segmenttimes' in d, 'd must have segmenttimes defined'
t0 = d['segmenttimes'][segment][0]
t1 = d['segmenttimes'][segment][1]
datetime = qa.time(qa.quantity((t1+t0)/2, 'd'),
form=['ymdhms'], prec=9)[0]
logger.debug('Calculating uvw for segment %d' % (segment))
else:
datetime = 0
(u, v, w) = calc_uvw(d['filename'], d['scan'],
datetime=datetime, bdfdir=d['bdfdir'])
# cast to units of lambda at first channel.
# -1 keeps consistent with ms reading convention
u = u * d['freq_orig'][0] * (1e9/3e8) * (-1)
v = v * d['freq_orig'][0] * (1e9/3e8) * (-1)
w = w * d['freq_orig'][0] * (1e9/3e8) * (-1)
return u.astype('float32'), v.astype('float32'), w.astype('float32') |
<SYSTEM_TASK:>
Use sdmpy to get all sources and ra,dec per scan as dict
<END_TASK>
<USER_TASK:>
Description:
def read_sources(sdmname):
""" Use sdmpy to get all sources and ra,dec per scan as dict """ |
sdm = getsdm(sdmname)
sourcedict = {}
for row in sdm['Field']:
src = str(row.fieldName)
sourcenum = int(row.sourceId)
direction = str(row.referenceDir)
(ra,dec) = [float(val) for val in direction.split(' ')[3:]] # skip first two values in string
sourcedict[sourcenum] = {}
sourcedict[sourcenum]['source'] = src
sourcedict[sourcenum]['ra'] = ra
sourcedict[sourcenum]['dec'] = dec
return sourcedict |
<SYSTEM_TASK:>
Use sdmpy to get all scans and info needed for rtpipe as dict
<END_TASK>
<USER_TASK:>
Description:
def read_scans(sdmfile, bdfdir=''):
""" Use sdmpy to get all scans and info needed for rtpipe as dict """ |
sdm = getsdm(sdmfile, bdfdir=bdfdir)
scandict = {}
skippedscans = []
for scan in sdm.scans():
scannum = int(scan.idx)
scandict[scannum] = {}
intentstr = ' '.join(scan.intents)
src = scan.source
scandict[scannum]['source'] = src
scandict[scannum]['intent'] = intentstr
# bdf specific properties
try:
startmjd = scan.bdf.startTime
nints = scan.bdf.numIntegration
interval = scan.bdf.get_integration(0).interval
endmjd = startmjd + (nints*interval)/(24*3600)
bdfstr = scan.bdf.fname
except AttributeError, IOError:
skippedscans.append(scannum)
else:
scandict[scannum]['startmjd'] = startmjd
scandict[scannum]['endmjd'] = endmjd
scandict[scannum]['duration'] = endmjd-startmjd
scandict[scannum]['nints'] = nints
scandict[scannum]['bdfstr'] = bdfstr
# clear reference to nonexistent BDFs (either bad or not in standard locations)
if (not os.path.exists(scandict[scannum]['bdfstr'])) or ('X1' in bdfstr):
scandict[scannum]['bdfstr'] = None
logger.debug('Invalid bdf for %d of %s' % (scannum, sdmfile) )
if skippedscans:
logger.warn('No BDF found for scans {0}'.format(skippedscans))
return scandict |
<SYSTEM_TASK:>
Wrap sdmpy.SDM to get around schema change error
<END_TASK>
<USER_TASK:>
Description:
def getsdm(*args, **kwargs):
""" Wrap sdmpy.SDM to get around schema change error """ |
try:
sdm = sdmpy.SDM(*args, **kwargs)
except XMLSyntaxError:
kwargs['use_xsd'] = False
sdm = sdmpy.SDM(*args, **kwargs)
return sdm |
<SYSTEM_TASK:>
Start changing light level manually. Direction should be 'up' or 'down
<END_TASK>
<USER_TASK:>
Description:
def start_change(self, direction):
"""Start changing light level manually. Direction should be 'up' or 'down'""" |
self.logger.info("Dimmer %s start_change: %s", self.device_id, direction)
if direction == 'up':
level = '01'
elif direction == 'down':
level = '00'
else:
self.logger.error("Dimmer %s start_change: %s is invalid, use up or down",
self.device_id, direction)
return False
self.hub.direct_command(self.device_id, '17', level)
success = self.hub.check_success(self.device_id, '17',
self.hub.brightness_to_hex(level))
if success:
self.logger.info("Dimmer %s start_change: Light started changing successfully",
self.device_id)
self.hub.clear_device_command_cache(self.device_id)
else:
self.logger.error("Dimmer %s start_change: Light did not change",
self.device_id)
return success |
<SYSTEM_TASK:>
Stop changing light level manually
<END_TASK>
<USER_TASK:>
Description:
def stop_change(self):
"""Stop changing light level manually""" |
self.logger.info("Dimmer %s stop_change", self.device_id)
self.hub.direct_command(self.device_id, '18', '00')
success = self.hub.check_success(self.device_id, '18', '00')
if success:
self.logger.info("Dimmer %s stop_change: Light stopped changing successfully",
self.device_id)
self.hub.clear_device_command_cache(self.device_id)
else:
self.logger.error("Dimmer %s stop_change: Light did not stop",
self.device_id)
return success |
<SYSTEM_TASK:>
Make dimmer beep. Not all devices support this
<END_TASK>
<USER_TASK:>
Description:
def beep(self):
"""Make dimmer beep. Not all devices support this""" |
self.logger.info("Dimmer %s beep", self.device_id)
self.hub.direct_command(self.device_id, '30', '00')
success = self.hub.check_success(self.device_id, '30', '00')
return success |
<SYSTEM_TASK:>
Return None if we don't have any filter on a pk, the pk if we have one,
<END_TASK>
<USER_TASK:>
Description:
def _get_pk(self):
"""
Return None if we don't have any filter on a pk, the pk if we have one,
or raise a ValueError if we have more than one.
For internal use only.
""" |
pk = None
if self._lazy_collection['pks']:
if len(self._lazy_collection['pks']) > 1:
raise ValueError('Too much pks !')
pk = list(self._lazy_collection['pks'])[0]
return pk |
<SYSTEM_TASK:>
Prepare "sort" options to use when calling the collection, depending
<END_TASK>
<USER_TASK:>
Description:
def _prepare_sort_options(self, has_pk):
"""
Prepare "sort" options to use when calling the collection, depending
on "_sort" and "_sort_limits" attributes
""" |
sort_options = {}
if self._sort is not None and not has_pk:
sort_options.update(self._sort)
if self._sort_limits is not None:
if 'start' in self._sort_limits and 'num' not in self._sort_limits:
self._sort_limits['num'] = -1
elif 'num' in self._sort_limits and 'start' not in self._sort_limits:
self._sort_limits['start'] = 0
sort_options.update(self._sort_limits)
if not sort_options and self._sort is None:
sort_options = None
return sort_options |
<SYSTEM_TASK:>
The final redis call to obtain the values to return from the "final_set"
<END_TASK>
<USER_TASK:>
Description:
def _final_redis_call(self, final_set, sort_options):
"""
The final redis call to obtain the values to return from the "final_set"
with some sort options.
""" |
conn = self.cls.get_connection()
if sort_options is not None:
# a sort, or values, call the SORT command on the set
return conn.sort(final_set, **sort_options)
else:
# no sort, nor values, simply return the full set
return conn.smembers(final_set) |
<SYSTEM_TASK:>
Returns a list of instances for each given pk, respecting the condition
<END_TASK>
<USER_TASK:>
Description:
def _to_instances(self, pks):
"""
Returns a list of instances for each given pk, respecting the condition
about checking or not if a pk exists.
""" |
# we want instances, so create an object for each pk, without
# checking for pk existence if asked
meth = self.cls.lazy_connect if self._instances_skip_exist_test else self.cls
return [meth(pk) for pk in pks] |
<SYSTEM_TASK:>
Called in _collection to prepare results from redis before returning
<END_TASK>
<USER_TASK:>
Description:
def _prepare_results(self, results):
"""
Called in _collection to prepare results from redis before returning
them.
""" |
if self._instances:
results = self._to_instances(results)
else:
results = list(results)
# cache the len for future use
self._len = len(results)
return results |
<SYSTEM_TASK:>
Given a list of set, combine them to create the final set that will be
<END_TASK>
<USER_TASK:>
Description:
def _combine_sets(self, sets, final_set):
"""
Given a list of set, combine them to create the final set that will be
used to make the final redis call.
""" |
self.cls.get_connection().sinterstore(final_set, list(sets))
return final_set |
<SYSTEM_TASK:>
Ask the collection to return a list of instances.
<END_TASK>
<USER_TASK:>
Description:
def instances(self, skip_exist_test=False):
"""
Ask the collection to return a list of instances.
If skip_exist_test is set to True, the instances returned by the
collection won't have their primary key checked for existence.
""" |
self.reset_result_type()
self._instances = True
self._instances_skip_exist_test = skip_exist_test
return self |
<SYSTEM_TASK:>
Prints candidate info in time order above some threshold
<END_TASK>
<USER_TASK:>
Description:
def list_cands(candsfile, threshold=0.):
""" Prints candidate info in time order above some threshold """ |
loc, prop, d0 = pc.read_candidates(candsfile, snrmin=threshold, returnstate=True)
if 'snr2' in d0['features']:
snrcol = d0['features'].index('snr2')
elif 'snr1' in d0['features']:
snrcol = d0['features'].index('snr1')
dmindcol = d0['featureind'].index('dmind')
if len(loc):
snrs = prop[:, snrcol]
times = pc.int2mjd(d0, loc)
times = times - times[0]
logger.info('Getting candidates...')
logger.info('candnum: loc, SNR, DM (pc/cm3), time (s; rel)')
for i in range(len(loc)):
logger.info("%d: %s, %.1f, %.1f, %.1f" % (i, str(loc[i]), prop[i, snrcol], np.array(d0['dmarr'])[loc[i,dmindcol]], times[i])) |
<SYSTEM_TASK:>
Helper function to interact with merged cands file and refine analysis
<END_TASK>
<USER_TASK:>
Description:
def refine_cand(candsfile, candloc=[], candnum=-1, threshold=0, scaledm=2.1, scalepix=2, scaleuv=1.0, chans=[], returndata=False):
""" Helper function to interact with merged cands file and refine analysis
candsfile is merged pkl file
candloc (scan, segment, candint, dmind, dtind, beamnum) is as above.
if no candloc, then it prints out cands above threshold.
""" |
if candnum >= 0:
candlocs, candprops, d0 = pc.read_candidates(candsfile, snrmin=threshold, returnstate=True)
candloc = candlocs[candnum]
candprop = candprops[candnum]
logger.info('Refining cand {0} with features {1}'.format(candloc, candprop))
values = rt.pipeline_refine(d0, candloc, scaledm=scaledm, scalepix=scalepix, scaleuv=scaleuv, chans=chans, returndata=returndata)
return values
elif candloc:
logger.info('Refining cand {0}'.format(candloc))
d0 = pickle.load(open(candsfile, 'r'))
values = rt.pipeline_refine(d0, candloc, scaledm=scaledm, scalepix=scalepix, scaleuv=scaleuv, chans=chans, returndata=returndata)
return d, cands
else:
return None |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.