docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Authenticate the gmusicapi Musicmanager instance.
Parameters:
oauth_filename (str): The filename of the oauth credentials file to use/create for login.
Default: ``oauth``
uploader_id (str): A unique id as a MAC address (e.g. ``'00:11:22:33:AA:BB'``).
This should only be provided in cases where the default (host MAC address incremented by 1) won't work.
Returns:
``True`` on successful login, ``False`` on unsuccessful login.
|
def login(self, oauth_filename="oauth", uploader_id=None):
cls_name = type(self).__name__
oauth_cred = os.path.join(os.path.dirname(OAUTH_FILEPATH), oauth_filename + '.cred')
try:
if not self.api.login(oauth_credentials=oauth_cred, uploader_id=uploader_id):
try:
self.api.perform_oauth(storage_filepath=oauth_cred)
except OSError:
logger.exception("\nUnable to login with specified oauth code.")
self.api.login(oauth_credentials=oauth_cred, uploader_id=uploader_id)
except (OSError, ValueError):
logger.exception("{} authentication failed.".format(cls_name))
return False
if not self.is_authenticated:
logger.warning("{} authentication failed.".format(cls_name))
return False
logger.info("{} authentication succeeded.\n".format(cls_name))
return True
| 846,342 |
Download Google Music songs.
Parameters:
songs (list or dict): Google Music song dict(s).
template (str): A filepath which can include template patterns.
Returns:
A list of result dictionaries.
::
[
{'result': 'downloaded', 'id': song_id, 'filepath': downloaded[song_id]}, # downloaded
{'result': 'error', 'id': song_id, 'message': error[song_id]} # error
]
|
def download(self, songs, template=None):
if not template:
template = os.getcwd()
songnum = 0
total = len(songs)
results = []
errors = {}
pad = len(str(total))
for result in self._download(songs, template):
song_id = songs[songnum]['id']
songnum += 1
downloaded, error = result
if downloaded:
logger.info(
"({num:>{pad}}/{total}) Successfully downloaded -- {file} ({song_id})".format(
num=songnum, pad=pad, total=total, file=downloaded[song_id], song_id=song_id
)
)
results.append({'result': 'downloaded', 'id': song_id, 'filepath': downloaded[song_id]})
elif error:
title = songs[songnum].get('title', "<empty>")
artist = songs[songnum].get('artist', "<empty>")
album = songs[songnum].get('album', "<empty>")
logger.info(
"({num:>{pad}}/{total}) Error on download -- {title} -- {artist} -- {album} ({song_id})".format(
num=songnum, pad=pad, total=total, title=title, artist=artist, album=album, song_id=song_id
)
)
results.append({'result': 'error', 'id': song_id, 'message': error[song_id]})
if errors:
logger.info("\n\nThe following errors occurred:\n")
for filepath, e in errors.items():
logger.info("{file} | {error}".format(file=filepath, error=e))
logger.info("\nThese files may need to be synced again.\n")
return results
| 846,345 |
Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler
|
def configure_stream(level='WARNING'):
# get the root logger
root_logger = logging.getLogger()
# set the logger level to the same as will be used by the handler
root_logger.setLevel(level)
# customize formatter, align each column
template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s"
formatter = logging.Formatter(template)
# add a basic STDERR handler to the logger
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(formatter)
root_logger.addHandler(console)
return root_logger
| 846,352 |
Create a Transcript based on the vep annotation
Args:
transcript_info (dict): A dict with vep info
Returns:
transcript (puzzle.models.Transcript): A Transcripts
|
def _get_vep_transcript(self, transcript_info):
transcript = Transcript(
hgnc_symbol = transcript_info.get('SYMBOL'),
transcript_id = transcript_info.get('Feature'),
ensembl_id = transcript_info.get('Gene'),
biotype = transcript_info.get('BIOTYPE'),
consequence = transcript_info.get('Consequence'),
strand = transcript_info.get('STRAND'),
sift = transcript_info.get('SIFT'),
polyphen = transcript_info.get('PolyPhen'),
exon = transcript_info.get('EXON'),
HGVSc = transcript_info.get('HGVSc'),
HGVSp = transcript_info.get('HGVSp'),
GMAF = transcript_info.get('GMAF'),
ExAC_MAF = transcript_info.get('ExAC_MAF')
)
return transcript
| 846,392 |
Create a transcript based on the snpeff annotation
Args:
transcript_info (dict): A dict with snpeff info
Returns:
transcript (puzzle.models.Transcript): A Transcripts
|
def _get_snpeff_transcript(self, transcript_info):
transcript = Transcript(
hgnc_symbol = transcript_info.get('Gene_Name'),
transcript_id = transcript_info.get('Feature'),
ensembl_id = transcript_info.get('Gene_ID'),
biotype = transcript_info.get('Transcript_BioType'),
consequence = transcript_info.get('Annotation'),
exon = transcript_info.get('Rank'),
HGVSc = transcript_info.get('HGVS.c'),
HGVSp = transcript_info.get('HGVS.p')
)
return transcript
| 846,393 |
Generates parsed VcfRecord objects.
Typically called in a for loop to process each vcf record in a
VcfReader. VcfReader must be opened in advanced and closed when
complete. Skips all headers.
Args:
qualified: When True, sample names are prefixed with file name
Returns:
Parsed VcfRecord
Raises:
StopIteration: when reader is exhausted.
TypeError: if reader is closed.
|
def vcf_records(self, format_tags=None, qualified=False):
if qualified:
sample_names = self.qualified_sample_names
else:
sample_names = self.sample_names
for line in self._file_reader.read_lines():
if line.startswith("#"):
continue
vcf_record = vcf.VcfRecord.parse_record(line, sample_names)
if format_tags:
vcf_record = self.modify_format_tag(vcf_record, format_tags)
yield vcf_record
| 846,442 |
Extracts a single clip according to audioClipSpec.
Arguments:
audioClipSpec (AudioClipSpec): Clip specification
showLogs (bool): Show ffmpeg output
|
def _extractClipData(self, audioClipSpec, showLogs=False):
command = [self._ffmpegPath]
if not showLogs:
command += ['-nostats', '-loglevel', '0']
command += [
'-i', self._audioFilePath,
'-ss', '%.3f' % audioClipSpec.start,
'-t', '%.3f' % audioClipSpec.duration(),
'-c', 'copy',
'-map', '0',
'-acodec', 'libmp3lame',
'-ab', '128k',
'-f', 'mp3'
]
# Add clip TEXT as metadata and set a few more to default
metadata = { self._textMetadataName: audioClipSpec.text }
for k, v in metadata.items():
command.append('-metadata')
command.append("{}='{}'".format(k, v))
command.append('pipe:1')
return subprocess.check_output(command)
| 846,489 |
Return a gemini query
Args:
name (str)
|
def gemini_query(self, query_id):
logger.debug("Looking for query with id {0}".format(query_id))
return self.query(GeminiQuery).filter_by(id=query_id).first()
| 846,504 |
Add a user defined gemini query
Args:
name (str)
query (str)
|
def add_gemini_query(self, name, query):
logger.info("Adding query {0} with text {1}".format(name, query))
new_query = GeminiQuery(name=name, query=query)
self.session.add(new_query)
self.save()
return new_query
| 846,505 |
Delete a gemini query
Args:
name (str)
|
def delete_gemini_query(self, query_id):
query_obj = self.gemini_query(query_id)
logger.debug("Delete query: {0}".format(query_obj.name_query))
self.session.delete(query_obj)
self.save()
| 846,506 |
Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case)
|
def _add_genotype_calls(self, variant_obj, variant_line, case_obj):
variant_line = variant_line.split('\t')
#if there is gt calls we have no individuals to add
if len(variant_line) > 8:
gt_format = variant_line[8].split(':')
for individual in case_obj.individuals:
sample_id = individual.ind_id
index = individual.ind_index
gt_call = variant_line[9+index].split(':')
raw_call = dict(zip(gt_format, gt_call))
genotype = Genotype(**raw_call)
variant_obj.add_individual(puzzle_genotype(
sample_id = sample_id,
genotype = genotype.genotype,
case_id = case_obj.name,
phenotype = individual.phenotype,
ref_depth = genotype.ref_depth,
alt_depth = genotype.alt_depth,
genotype_quality = genotype.genotype_quality,
depth = genotype.depth_of_coverage,
supporting_evidence = genotype.supporting_evidence,
pe_support = genotype.pe_support,
sr_support = genotype.sr_support,
))
| 846,547 |
Add a case obj with individuals to adapter
Args:
case_obj (puzzle.models.Case)
|
def add_case(self, case_obj):
for ind_obj in case_obj.individuals:
self._add_individual(ind_obj)
logger.debug("Adding case {0} to plugin".format(case_obj.case_id))
self.case_objs.append(case_obj)
if case_obj.tabix_index:
logger.debug("Setting filters.can_filter_range to True")
self.filters.can_filter_range = True
| 846,584 |
Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
A Case object
|
def case(self, case_id=None):
if case_id:
for case in self.case_objs:
if case.case_id == case_id:
return case
else:
if self.cases:
return list(self.case_objs)[0]
return Case(case_id='unknown')
| 846,585 |
Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual)
|
def individual(self, ind_id=None):
for ind_obj in self.individual_objs:
if ind_obj.ind_id == ind_id:
return ind_obj
return None
| 846,586 |
Return information about individuals
Args:
ind_ids (list(str)): List of individual ids
Returns:
individuals (Iterable): Iterable with Individuals
|
def individuals(self, ind_ids=None):
if ind_ids:
for ind_id in ind_ids:
for ind in self.individual_objs:
if ind.ind_id == ind_id:
yield ind
else:
for ind in self.individual_objs:
yield ind
| 846,587 |
Append sql to a gemini query
Args:
query(str): The gemini query
extra_info(str): The text that should be added
Return:
extended_query(str)
|
def build_gemini_query(self, query, extra_info):
if 'WHERE' in query:
return "{0} AND {1}".format(query, extra_info)
else:
return "{0} WHERE {1}".format(query, extra_info)
| 846,602 |
Return a specific variant.
We solve this by building a gemini query and send it to _variants
Args:
case_id (str): Path to a gemini database
variant_id (int): A gemini variant id
Returns:
variant_obj (dict): A puzzle variant
|
def variant(self, case_id, variant_id):
#Use the gemini id for fast lookup
variant_id = int(variant_id)
gemini_query = "SELECT * from variants WHERE variant_id = {0}".format(
variant_id
)
individuals = []
# Get the individuals for the case
case_obj = self.case(case_id)
for individual in case_obj.individuals:
individuals.append(individual)
self.db = case_obj.variant_source
self.variant_type = case_obj.variant_type
gq = GeminiQuery(self.db)
gq.run(gemini_query)
for gemini_variant in gq:
variant = self._format_variant(
case_id=case_id,
gemini_variant=gemini_variant,
individual_objs=individuals,
index=gemini_variant['variant_id'],
add_all_info = True
)
return variant
return None
| 846,604 |
Return variants found in the gemini database
Args:
case_id (str): The case for which we want to see information
gemini_query (str): What variants should be chosen
filters (dict): A dictionary with filters
Yields:
variant_obj (dict): A Variant formatted dictionary
|
def _variants(self, case_id, gemini_query):
individuals = []
# Get the individuals for the case
case_obj = self.case(case_id)
for individual in case_obj.individuals:
individuals.append(individual)
self.db = case_obj.variant_source
self.variant_type = case_obj.variant_type
gq = GeminiQuery(self.db)
gq.run(gemini_query)
index = 0
for gemini_variant in gq:
variant = None
# Check if variant is non ref in the individuals
is_variant = self._is_variant(gemini_variant, individuals)
if self.variant_type == 'snv' and not is_variant:
variant = None
else:
index += 1
logger.debug("Updating index to: {0}".format(index))
variant = self._format_variant(
case_id=case_id,
gemini_variant=gemini_variant,
individual_objs=individuals,
index=index
)
if variant:
yield variant
| 846,605 |
Make a puzzle variant from a gemini variant
Args:
case_id (str): related case id
gemini_variant (GeminiQueryRow): The gemini variant
individual_objs (list(dict)): A list of Individuals
index(int): The index of the variant
Returns:
variant (dict): A Variant object
|
def _format_variant(self, case_id, gemini_variant, individual_objs,
index=0, add_all_info=False):
chrom = gemini_variant['chrom']
if chrom.startswith('chr') or chrom.startswith('CHR'):
chrom = chrom[3:]
variant_dict = {
'CHROM':chrom,
'POS':str(gemini_variant['start']),
'ID':gemini_variant['rs_ids'],
'REF':gemini_variant['ref'],
'ALT':gemini_variant['alt'],
'QUAL':gemini_variant['qual'],
'FILTER':gemini_variant['filter']
}
variant = Variant(**variant_dict)
# Use the gemini id for fast search
variant.update_variant_id(gemini_variant['variant_id'])
logger.debug("Creating a variant object of variant {0}".format(
variant.variant_id))
variant['index'] = index
# Add the most severe consequence
self._add_most_severe_consequence(variant, gemini_variant)
#Add the impact severity
self._add_impact_severity(variant, gemini_variant)
### POSITON ANNOATTIONS ###
variant.start = int(gemini_variant['start'])
variant.stop = int(gemini_variant['end'])
#Add the sv specific coordinates
if self.variant_type == 'sv':
variant.sv_type = gemini_variant['sub_type']
variant.stop = int(gemini_variant['end'])
self._add_sv_coordinates(variant)
else:
### Consequence and region annotations
#Add the transcript information
self._add_transcripts(variant, gemini_variant)
self._add_thousand_g(variant, gemini_variant)
self._add_exac(variant, gemini_variant)
self._add_gmaf(variant, gemini_variant)
#### Check the impact annotations ####
if gemini_variant['cadd_scaled']:
variant.cadd_score = gemini_variant['cadd_scaled']
# We use the prediction in text
polyphen = gemini_variant['polyphen_pred']
if polyphen:
variant.add_severity('Polyphen', polyphen)
# We use the prediction in text
sift = gemini_variant['sift_pred']
if sift:
variant.add_severity('SIFT', sift)
#Add the genes based on the hgnc symbols
self._add_hgnc_symbols(variant)
if self.variant_type == 'snv':
self._add_genes(variant)
self._add_consequences(variant)
### GENOTYPE ANNOATTIONS ###
#Get the genotype info
if add_all_info:
self._add_genotypes(variant, gemini_variant, case_id, individual_objs)
if self.variant_type == 'sv':
self._add_genes(variant)
return variant
| 846,606 |
Check if the variant is a variation in any of the individuals
Args:
gemini_variant (GeminiQueryRow): The gemini variant
ind_objs (list(puzzle.models.individual)): A list of individuals to check
Returns:
bool : If any of the individuals has the variant
|
def _is_variant(self, gemini_variant, ind_objs):
indexes = (ind.ind_index for ind in ind_objs)
#Check if any individual have a heterozygous or homozygous variant call
for index in indexes:
gt_call = gemini_variant['gt_types'][index]
if (gt_call == 1 or gt_call == 3):
return True
return False
| 846,607 |
Add the consequences found in all transcripts
Args:
variant_obj (puzzle.models.Variant)
|
def _add_consequences(self, variant_obj):
consequences = set()
for transcript in variant_obj.transcripts:
for consequence in transcript.consequence.split('&'):
consequences.add(consequence)
variant_obj.consequences = list(consequences)
| 846,670 |
Add the impact severity for the most severe consequence
Args:
variant_obj (puzzle.models.Variant)
gemini_variant (GeminiQueryRow)
|
def _add_impact_severity(self, variant_obj, gemini_variant):
gemini_impact = gemini_variant['impact_severity']
if gemini_impact == 'MED':
gemini_impact = 'MEDIUM'
variant_obj.impact_severity = gemini_impact
| 846,671 |
Compare two song collections to find missing songs.
Parameters:
src_songs (list): Google Music song dicts or filepaths of local songs.
dest_songs (list): Google Music song dicts or filepaths of local songs.
Returns:
A list of Google Music song dicts or local song filepaths from source missing in destination.
|
def compare_song_collections(src_songs, dst_songs):
def gather_field_values(song):
return tuple((_normalize_metadata(song[field]) for field in _filter_comparison_fields(song)))
dst_songs_criteria = {gather_field_values(_normalize_song(dst_song)) for dst_song in dst_songs}
return [src_song for src_song in src_songs if gather_field_values(_normalize_song(src_song)) not in dst_songs_criteria]
| 846,680 |
Get filepaths with supported extensions from given filepaths.
Parameters:
filepaths (list or str): Filepath(s) to check.
supported_extensions (tuple or str): Supported file extensions or a single file extension.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
Returns:
A list of supported filepaths.
|
def get_supported_filepaths(filepaths, supported_extensions, max_depth=float('inf')):
supported_filepaths = []
for path in filepaths:
if os.name == 'nt' and CYGPATH_RE.match(path):
path = convert_cygwin_path(path)
if os.path.isdir(path):
for root, __, files in walk_depth(path, max_depth):
for f in files:
if f.lower().endswith(supported_extensions):
supported_filepaths.append(os.path.join(root, f))
elif os.path.isfile(path) and path.lower().endswith(supported_extensions):
supported_filepaths.append(path)
return supported_filepaths
| 846,681 |
Exclude file paths based on regex patterns.
Parameters:
filepaths (list or str): Filepath(s) to check.
exclude_patterns (list): Python regex patterns to check filepaths against.
Returns:
A list of filepaths to include and a list of filepaths to exclude.
|
def exclude_filepaths(filepaths, exclude_patterns=None):
if not exclude_patterns:
return filepaths, []
exclude_re = re.compile("|".join(pattern for pattern in exclude_patterns))
included_songs = []
excluded_songs = []
for filepath in filepaths:
if exclude_patterns and exclude_re.search(filepath):
excluded_songs.append(filepath)
else:
included_songs.append(filepath)
return included_songs, excluded_songs
| 846,682 |
Generate a filename for a song based on metadata.
Parameters:
metadata (dict): A metadata dict.
Returns:
A filename.
|
def get_suggested_filename(metadata):
if metadata.get('title') and metadata.get('track_number'):
suggested_filename = '{track_number:0>2} {title}'.format(**metadata)
elif metadata.get('title') and metadata.get('trackNumber'):
suggested_filename = '{trackNumber:0>2} {title}'.format(**metadata)
elif metadata.get('title') and metadata.get('tracknumber'):
suggested_filename = '{tracknumber:0>2} {title}'.format(**metadata)
else:
suggested_filename = '00 {}'.format(metadata.get('title', ''))
return suggested_filename
| 846,687 |
Create directory structure and file name based on metadata template.
Parameters:
template (str): A filepath which can include template patterns as defined by :param template_patterns:.
metadata (dict): A metadata dict.
template_patterns (dict): A dict of ``pattern: field`` pairs used to replace patterns with metadata field values.
Default: :const TEMPLATE_PATTERNS:
Returns:
A filepath.
|
def template_to_filepath(template, metadata, template_patterns=None):
if template_patterns is None:
template_patterns = TEMPLATE_PATTERNS
metadata = metadata if isinstance(metadata, dict) else _mutagen_fields_to_single_value(metadata)
assert isinstance(metadata, dict)
suggested_filename = get_suggested_filename(metadata).replace('.mp3', '')
if template == os.getcwd() or template == '%suggested%':
filepath = suggested_filename
else:
t = template.replace('%suggested%', suggested_filename)
filepath = _replace_template_patterns(t, metadata, template_patterns)
return filepath
| 846,689 |
Walk a directory tree with configurable depth.
Parameters:
path (str): A directory path to walk.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
|
def walk_depth(path, max_depth=float('inf')):
start_level = os.path.abspath(path).count(os.path.sep)
for dir_entry in os.walk(path):
root, dirs, _ = dir_entry
level = root.count(os.path.sep) - start_level
yield dir_entry
if level >= max_depth:
dirs[:] = []
| 846,690 |
Check what kind of file variant source is
Args:
variant_source (str): Path to variant source
Returns:
file_type (str): 'vcf', 'gemini' or 'unknown'
|
def get_file_type(variant_source):
file_type = 'unknown'
valid_vcf_suffixes = ('.vcf', '.vcf.gz')
if variant_source:
logger.debug("Check file type with file: {0}".format(variant_source))
if variant_source.endswith('.db'):
file_type = 'gemini'
logger.debug("File {0} is a gemini database".format(variant_source))
elif variant_source.endswith(valid_vcf_suffixes):
file_type = 'vcf'
logger.debug("File {0} is a vcf".format(variant_source))
else:
logger.debug("File is in a unknown format")
return file_type
| 846,726 |
Try to find out what type of variants that exists in a variant source
Args:
variant_source (str): Path to variant source
source_mode (str): 'vcf' or 'gemini'
Returns:
variant_type (str): 'sv' or 'snv'
|
def get_variant_type(variant_source):
file_type = get_file_type(variant_source)
variant_type = 'sv'
if file_type == 'vcf':
variants = VCF(variant_source)
elif file_type == 'gemini':
variants = GeminiQuery(variant_source)
gemini_query = "SELECT * from variants"
variants.run(gemini_query)
# Check 1000 first variants, if anyone is a snv we set the variant_type
# to 'snv'
for i,variant in enumerate(variants):
if file_type == 'vcf':
if variant.is_snp:
variant_type = 'snv'
elif file_type == 'gemini':
if variant['type'] == 'snp':
variant_type = 'snv'
if i > 1000:
break
return variant_type
| 846,727 |
Recognizes and claims Strelka VCFs form the set of all input VCFs.
Each defined caller has a chance to evaluate and claim all the incoming
files as something that it can process.
Args:
file_readers: the collection of currently unclaimed files
Returns:
A tuple of unclaimed readers and StrelkaVcfReaders.
|
def claim(self, file_readers):
(prefix_to_reader,
unclaimed_readers) = self._find_strelka_files(file_readers)
prefix_by_patients = self._split_prefix_by_patient(prefix_to_reader)
self._validate_vcf_readers(prefix_by_patients)
vcf_readers = self._create_vcf_readers(prefix_to_reader)
return (unclaimed_readers, vcf_readers)
| 846,797 |
Add a frequency that will be displayed on the variant level
Args:
name (str): The name of the frequency field
|
def add_frequency(self, name, value):
logger.debug("Adding frequency {0} with value {1} to variant {2}".format(
name, value, self['variant_id']))
self['frequencies'].append({'label': name, 'value': value})
| 846,825 |
Set the max frequency for the variant
If max_freq use this, otherwise go through all frequencies and
set the highest as self['max_freq']
Args:
max_freq (float): The max frequency
|
def set_max_freq(self, max_freq=None):
if max_freq:
self['max_freq'] = max_freq
else:
for frequency in self['frequencies']:
if self['max_freq']:
if frequency['value'] > self['max_freq']:
self['max_freq'] = frequency['value']
else:
self['max_freq'] = frequency['value']
return
| 846,826 |
Add a severity to the variant
Args:
name (str): The name of the severity
value : The value of the severity
|
def add_severity(self, name, value):
logger.debug("Adding severity {0} with value {1} to variant {2}".format(
name, value, self['variant_id']))
self['severities'].append({name: value})
| 846,827 |
Add the information for a individual
This adds a genotype dict to variant['individuals']
Args:
genotype (dict): A genotype dictionary
|
def add_individual(self, genotype):
logger.debug("Adding genotype {0} to variant {1}".format(
genotype, self['variant_id']))
self['individuals'].append(genotype)
| 846,828 |
Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary
|
def add_transcript(self, transcript):
logger.debug("Adding transcript {0} to variant {1}".format(
transcript, self['variant_id']))
self['transcripts'].append(transcript)
| 846,829 |
Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary
|
def add_gene(self, gene):
logger.debug("Adding gene {0} to variant {1}".format(
gene, self['variant_id']))
self['genes'].append(gene)
| 846,830 |
Add the information of a compound variant
This adds a compound dict to variant['compounds']
Args:
compound (dict): A compound dictionary
|
def add_compound(self, compound):
logger.debug("Adding compound {0} to variant {1}".format(
compound, self['variant_id']))
self['compounds'].append(compound)
| 846,831 |
Generates parsed VcfRecord objects.
Typically called in a for loop to process each vcf record in a
VcfReader. VcfReader must be opened in advanced and closed when
complete. Skips all headers.
Args:
qualified: When True, sample names are prefixed with file name
Returns:
Parsed VcfRecord
Raises:
StopIteration: when reader is exhausted.
TypeError: if reader is closed.
|
def vcf_records(self, qualified=False):
if qualified:
sample_names = self.qualified_sample_names
else:
sample_names = self.sample_names
for line in self._file_reader.read_lines():
if line.startswith("#"):
continue
yield VcfRecord.parse_record(line, sample_names)
| 846,852 |
Adds new info field (flag or key=value pair).
Args:
field: String flag (e.g. "SOMATIC") or key-value ("NEW_DP=42")
Raises:
KeyError: if info field already exists
|
def add_info_field(self, field):
if field in self.info_dict:
msg = "New info field [{}] already exists.".format(field)
raise KeyError(msg)
if "=" in field:
key, value = field.split("=")
self.info_dict[key] = value
else:
self.info_dict[field] = field
self._join_info_fields()
| 846,860 |
Appends a new format tag-value for all samples.
Args:
tag_name: string tag name; must not already exist
new_sample
Raises:
KeyError: if tag_name to be added already exists
|
def add_sample_tag_value(self, tag_name, new_sample_values):
if tag_name in self.format_tags:
msg = "New format value [{}] already exists.".format(tag_name)
raise KeyError(msg)
if not self._samples_match(new_sample_values):
raise KeyError("Sample name values must match "
"existing sample names")
for sample in self.sample_tag_values.keys():
value = str(new_sample_values[sample])
self.sample_tag_values[sample][tag_name] = value
| 846,867 |
Add all transcripts for a variant
Go through all transcripts found for the variant
Args:
gemini_variant (GeminiQueryRow): The gemini variant
Yields:
transcript (puzzle.models.Transcript)
|
def _add_transcripts(self, variant_obj, gemini_variant):
query = "SELECT * from variant_impacts WHERE variant_id = {0}".format(
gemini_variant['variant_id']
)
gq = GeminiQuery(self.db)
gq.run(query)
for gemini_transcript in gq:
transcript = Transcript(
hgnc_symbol=gemini_transcript['gene'],
transcript_id=gemini_transcript['transcript'],
consequence=gemini_transcript['impact_so'],
biotype=gemini_transcript['biotype'],
polyphen=gemini_transcript['polyphen_pred'],
sift=gemini_transcript['sift_pred'],
HGVSc=gemini_transcript['codon_change'],
HGVSp=', '.join([gemini_transcript['aa_change'] or '', gemini_transcript['aa_length'] or ''])
)
variant_obj.add_transcript(transcript)
| 846,872 |
Return a specific variant.
Args:
case_id (str): Path to vcf file
variant_id (str): A variant id
Returns:
variant (Variant): The variant object for the given id
|
def variant(self, case_id, variant_id):
case_obj = self.case(case_id=case_id)
vcf_file_path = case_obj.variant_source
self.head = get_header(vcf_file_path)
self.vep_header = self.head.vep_columns
self.snpeff_header = self.head.snpeff_columns
handle = VCF(vcf_file_path)
for index, variant in enumerate(handle):
index += 1
line_id = get_variant_id(variant_line=str(variant)).lstrip('chrCHR')
if line_id == variant_id:
return self._format_variants(
variant=variant,
index=index,
case_obj=case_obj,
add_all_info=True
)
return None
| 846,960 |
Check if variants follows the filters
This function will try to make filters faster for the vcf adapter
Args:
vcf_file_path(str): Path to vcf
filters (dict): A dictionary with filters
Yields:
varian_line (str): A vcf variant line
|
def _get_filtered_variants(self, vcf_file_path, filters={}):
genes = set()
consequences = set()
sv_types = set()
if filters.get('gene_ids'):
genes = set([gene_id.strip() for gene_id in filters['gene_ids']])
if filters.get('consequence'):
consequences = set(filters['consequence'])
if filters.get('sv_types'):
sv_types = set(filters['sv_types'])
logger.info("Get variants from {0}".format(vcf_file_path))
if filters.get('range'):
range_str = "{0}:{1}-{2}".format(
filters['range']['chromosome'],
filters['range']['start'],
filters['range']['end'])
vcf = VCF(vcf_file_path)
handle = vcf(range_str)
else:
handle = VCF(vcf_file_path)
for variant in handle:
variant_line = str(variant)
keep_variant = True
if genes and keep_variant:
keep_variant = False
for gene in genes:
if "{0}".format(gene) in variant_line:
keep_variant = True
break
if consequences and keep_variant:
keep_variant = False
for consequence in consequences:
if consequence in variant_line:
keep_variant = True
break
if sv_types and keep_variant:
keep_variant = False
for sv_type in sv_types:
if sv_type in variant_line:
keep_variant = True
break
if keep_variant:
yield variant
| 846,962 |
Return a Variant object
Format variant make a variant that includes enough information for
the variant view.
If add_all_info then all transcripts will be parsed
Args:
variant (cython2.Variant): A variant object
index (int): The index of the variant
case_obj (puzzle.models.Case): A case object
|
def _format_variants(self, variant, index, case_obj, add_all_info=False):
header_line = self.head.header
# Get the individual ids for individuals in vcf file
vcf_individuals = set([ind_id for ind_id in self.head.individuals])
#Create a info dict:
info_dict = dict(variant.INFO)
chrom = variant.CHROM
if chrom.startswith('chr') or chrom.startswith('CHR'):
chrom = chrom[3:]
variant_obj = Variant(
CHROM=chrom,
POS=variant.POS,
ID=variant.ID,
REF=variant.REF,
ALT=variant.ALT[0],
QUAL=variant.QUAL,
FILTER=variant.FILTER,
)
variant_obj._set_variant_id()
logger.debug("Creating a variant object of variant {0}".format(
variant_obj.variant_id))
variant_obj.index = index
logger.debug("Updating index to: {0}".format(
index))
########### Get the coordinates for the variant ##############
variant_obj.start = variant.start
variant_obj.stop = variant.end
#SV variants needs to be handeled a bit different since the can be huge
#it would take to much power to parse all vep/snpeff entrys for these.
if self.variant_type == 'sv':
variant_obj.stop = int(info_dict.get('END', variant_obj.POS))
self._add_sv_coordinates(variant_obj)
variant_obj.sv_type = info_dict.get('SVTYPE')
# Special for FindSV software:
# SV specific tag for number of occurances
occurances = info_dict.get('OCC')
if occurances:
logger.debug("Updating occurances to: {0}".format(
occurances))
variant_obj['occurances'] = float(occurances)
variant_obj.add_frequency('OCC', occurances)
else:
self._add_thousand_g(variant_obj, info_dict)
self._add_cadd_score(variant_obj, info_dict)
self._add_genetic_models(variant_obj, info_dict)
self._add_transcripts(variant_obj, info_dict)
self._add_exac(variant_obj, info_dict)
self._add_hgnc_symbols(variant_obj)
if add_all_info:
self._add_genotype_calls(variant_obj, str(variant), case_obj)
self._add_compounds(variant_obj, info_dict)
self._add_gmaf(variant_obj, info_dict)
self._add_genes(variant_obj)
##### Add consequences ####
self._add_consequences(variant_obj, str(variant))
self._add_most_severe_consequence(variant_obj)
self._add_impact_severity(variant_obj)
self._add_rank_score(variant_obj, info_dict)
variant_obj.set_max_freq()
return variant_obj
| 846,963 |
Add the genes for a variant
Get the hgnc symbols from all transcripts and add them
to the variant
Args:
variant (dict): A variant dictionary
Returns:
genes (list): A list of Genes
|
def _get_genes(self, variant):
ensembl_ids = []
hgnc_symbols = []
for transcript in variant.transcripts:
if transcript.ensembl_id:
ensembl_ids.append(transcript.ensembl_id)
if transcript.hgnc_symbol:
hgnc_symbols.append(transcript.hgnc_symbol)
genes = get_gene_info(
ensembl_ids=ensembl_ids,
hgnc_symbols=hgnc_symbols
)
return genes
| 847,000 |
Add the neccesary sv coordinates for a variant
Args:
variant (puzzle.models.variant)
|
def _add_sv_coordinates(self, variant):
variant.stop_chrom = variant.CHROM
variant.start = int(variant.POS)
# If we have a translocation:
if ':' in variant.ALT:
other_coordinates = variant.ALT.strip('ACGTN[]').split(':')
variant.stop_chrom = other_coordinates[0].lstrip('chrCHR')
other_position = other_coordinates[1]
# variant.stop = other_position
#Set 'infinity' to length if translocation
variant.sv_len = float('inf')
variant.sv_type = 'BND'
else:
variant.sv_len = variant.stop - variant.start
variant['cytoband_start'] = get_cytoband_coord(
chrom=variant.CHROM,
pos=variant.start
)
variant['cytoband_stop'] = get_cytoband_coord(
chrom=variant.stop_chrom,
pos=variant.stop
)
| 847,001 |
Initialize a vcf adapter.
When instansiating all cases are found.
Args:
variant_type(str) : 'snv' or 'sv'
|
def __init__(self, variant_type='snv'):
super(VcfPlugin, self).__init__()
self.individual_objs = []
self.case_objs = []
self.variant_type = variant_type
logger.info("Setting variant type to {0}".format(variant_type))
self.variant_columns = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER']
self.head = None
self.vep_header = None
self.snpeff_header = None
self.filters.can_filter_gene = True
self.filters.can_filter_frequency = True
self.filters.can_filter_cadd = True
self.filters.can_filter_consequence = True
self.filters.can_filter_impact_severity = True
self.filters.can_filter_sv = True
self.filters.can_filter_sv_len = True
self.filters.can_filter_inheritance = True
| 847,002 |
Parse the header and return a header object
Args:
vcf_file_path(str): Path to vcf
Returns:
head: A HeaderParser object
|
def get_header(vcf_file_path):
logger.info("Parsing header of file {0}".format(vcf_file_path))
head = HeaderParser()
handle = get_vcf_handle(infile=vcf_file_path)
# Parse the header
for line in handle:
line = line.rstrip()
if line.startswith('#'):
if line.startswith('##'):
head.parse_meta_data(line)
else:
head.parse_header_line(line)
else:
break
handle.close()
return head
| 847,021 |
Calculate the 2D integral of the 1D surface brightness profile
(i.e, the flux) between rmin and rmax (elliptical radii).
Parameters:
-----------
rmin : minimum integration radius (deg)
rmax : maximum integration radius (deg)
Returns:
--------
integral : Solid angle integral (deg^2)
|
def integrate(self, rmin=0, rmax=np.inf):
if rmin < 0: raise Exception('rmin must be >= 0')
integrand = lambda r: self._pdf(r) * 2*np.pi * r
return scipy.integrate.quad(integrand,rmin,rmax,full_output=True,epsabs=0)[0]
| 847,034 |
Calculate Jenks natural breaks.
Adapted from http://danieljlewis.org/files/2010/06/Jenks.pdf
Credit: Daniel Lewis
Arguments:
data -- Array of values to classify.
num_breaks -- Number of breaks to perform.
|
def jenks(data, num_breaks):
data = numpy.ma.compressed(data)
if len(data) > 1000:
data.sort()
ls = numpy.linspace(0, len(data)-1, 1000)
ls = [int(round(x)) for x in ls]
data_list = data[ls]
else:
data_list = data
data_list.sort()
mat1 = []
for i in range(0, len(data_list) + 1):
temp = []
for j in range(0, num_breaks + 1):
temp.append(0)
mat1.append(temp)
mat2 = []
for i in range(0, len(data_list) + 1):
temp = []
for j in range(0, num_breaks + 1):
temp.append(0)
mat2.append(temp)
for i in range(1, num_breaks + 1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2, len(data_list) + 1):
mat2[j][i] = float('inf')
v = 0.0
for l in range(2, len(data_list) + 1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in range(1, l + 1):
i3 = l - m + 1
val = float(data_list[i3-1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, num_breaks + 1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(data_list)
kclass = []
for i in range(0, num_breaks + 1):
kclass.append(0)
kclass[num_breaks] = float(data_list[len(data_list) - 1])
count_num = num_breaks
while count_num >= 2:
id = int((mat1[k][count_num]) - 2)
kclass[count_num - 1] = data_list[id]
k = int((mat1[k][count_num] - 1))
count_num -= 1
return [float(x) for x in kclass][1:]
| 847,171 |
Calculate quantile breaks.
Arguments:
data -- Array of values to classify.
num_breaks -- Number of breaks to perform.
|
def quantile(data, num_breaks):
def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()):
def _quantiles1D(data,m,p):
x = numpy.sort(data.compressed())
n = len(x)
if n == 0:
return numpy.ma.array(numpy.empty(len(p), dtype=float), mask=True)
elif n == 1:
return numpy.ma.array(numpy.resize(x, p.shape), mask=numpy.ma.nomask)
aleph = (n*p + m)
k = numpy.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
# Initialization & checks ---------
data = numpy.ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
#
if limit:
condition = (limit[0] < data) & (data < limit[1])
data[~condition.filled(True)] = numpy.ma.masked
#
p = numpy.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return numpy.ma.apply_along_axis(_quantiles1D, axis, data, m, p)
return scipy_mquantiles(data, numpy.linspace(1.0 / num_breaks, 1, num_breaks))
| 847,172 |
Calculate equal interval breaks.
Arguments:
data -- Array of values to classify.
num_breaks -- Number of breaks to perform.
|
def equal(data, num_breaks):
step = (numpy.amax(data) - numpy.amin(data)) / num_breaks
return numpy.linspace(numpy.amin(data) + step, numpy.amax(data), num_breaks)
| 847,173 |
Read a generic input file into a recarray.
Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat]
Parameters:
filename : input file name
kwargs : keyword arguments for the reader
Returns:
recarray : data array
|
def read(filename,**kwargs):
base,ext = os.path.splitext(filename)
if ext in ('.fits','.fz'):
# Abstract fits here...
return fitsio.read(filename,**kwargs)
elif ext in ('.npy'):
return np.load(filename,**kwargs)
elif ext in ('.csv'):
return np.recfromcsv(filename,**kwargs)
elif ext in ('.txt','.dat'):
return np.genfromtxt(filename,**kwargs)
msg = "Unrecognized file type: %s"%filename
raise ValueError(msg)
| 847,196 |
Write a recarray to a specific format.
Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat]
Parameters:
filename : output file name
data : the recarray data
kwargs : keyword arguments for the writer
Returns:
ret : writer return (usually None)
|
def write(filename,data,**kwargs):
base,ext = os.path.splitext(filename)
if ext in ('.fits','.fz'):
# Abstract fits here...
return fitsio.write(filename,data,**kwargs)
elif ext in ('.npy'):
return np.save(filename,data,**kwargs)
elif ext in ('.csv'):
return np.savetxt(filename,data,header=','.join(data.dtype.names),delimiter=',',**kwargs)
elif ext in ('.txt','.dat'):
return np.savetxt(filename,data,**kwargs)
msg = "Unrecognized file type: %s"%filename
raise ValueError(msg)
| 847,197 |
Write a catalog file of the likelihood region including
membership properties.
Parameters:
-----------
loglike : input loglikelihood object
filename : output filename
Returns:
--------
None
|
def write_membership(loglike,filename):
ra,dec = gal2cel(loglike.catalog.lon,loglike.catalog.lat)
name_objid = loglike.config['catalog']['objid_field']
name_mag_1 = loglike.config['catalog']['mag_1_field']
name_mag_2 = loglike.config['catalog']['mag_2_field']
name_mag_err_1 = loglike.config['catalog']['mag_err_1_field']
name_mag_err_2 = loglike.config['catalog']['mag_err_2_field']
# Angular and isochrone separations
sep = angsep(loglike.source.lon,loglike.source.lat,
loglike.catalog.lon,loglike.catalog.lat)
isosep = loglike.isochrone.separation(loglike.catalog.mag_1,loglike.catalog.mag_2)
data = odict()
data[name_objid] = loglike.catalog.objid
data['GLON'] = loglike.catalog.lon
data['GLAT'] = loglike.catalog.lat
data['RA'] = ra
data['DEC'] = dec
data[name_mag_1] = loglike.catalog.mag_1
data[name_mag_err_1] = loglike.catalog.mag_err_1
data[name_mag_2] = loglike.catalog.mag_2
data[name_mag_err_2] = loglike.catalog.mag_err_2
data['COLOR'] = loglike.catalog.color
data['ANGSEP'] = sep
data['ISOSEP'] = isosep
data['PROB'] = loglike.p
# HIERARCH allows header keywords longer than 8 characters
header = []
for param,value in loglike.source.params.items():
card = dict(name='HIERARCH %s'%param.upper(),
value=value.value,
comment=param)
header.append(card)
card = dict(name='HIERARCH %s'%'TS',value=loglike.ts(),
comment='test statistic')
header.append(card)
card = dict(name='HIERARCH %s'%'TIMESTAMP',value=time.asctime(),
comment='creation time')
header.append(card)
fitsio.write(filename,data,header=header,clobber=True)
| 847,204 |
Take the value from a two-dimensional histogram from the bin corresponding to (x, y).
Parameters:
-----------
histogram : The values in the histogram (n,m) (ADW: is this ordering right?)
x : the x-value to take from the hist
y : the y-value to take from the hist
bins_x : the xbin edges, including upper edge (n-dim)
bins_y : the ybin edges, including upper edge (m-dim)
|
def take2D(histogram, x, y, bins_x, bins_y):
histogram = np.array(histogram)
if np.isscalar(x):
x = [x]
if np.isscalar(y):
y = [y]
bins_x[-1] += 1.e-10 * (bins_x[-1] - bins_x[-2]) # Numerical stability
bins_y[-1] += 1.e-10 * (bins_y[-1] - bins_y[-2])
#return np.take(histogram, (histogram.shape[1] * (np.digitize(y, bins_y) - 1)) + (np.digitize(x, bins_x) - 1))
# Return np.nan for entries which are outside the binning range on either axis
index = (histogram.shape[1] * (np.digitize(y, bins_y) - 1)) + (np.digitize(x, bins_x) - 1)
index_clipped = np.clip(index, 0, (histogram.shape[0] * histogram.shape[1]) - 1)
val = np.take(histogram, index_clipped)
outlier_x = np.logical_or(x < bins_x[0], x > bins_x[-1])
outlier_y = np.logical_or(y < bins_y[0], y > bins_y[-1])
outlier = np.logical_or(outlier_x, outlier_y)
val[outlier] = np.nan
return val
| 847,205 |
Numerical Riemannn integral of the IMF (stupid simple).
Parameters:
-----------
mass_min: minimum mass bound for integration (solar masses)
mass_max: maximum mass bound for integration (solar masses)
log_mode[True]: use logarithmic steps in stellar mass as oppose to linear
weight[False]: weight the integral by stellar mass
steps: number of numerical integration steps
Returns:
--------
result of integral
|
def integrate(self, mass_min, mass_max, log_mode=True, weight=False, steps=1e4):
if log_mode:
d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
mass = 10.**log_mass
if weight:
return np.sum(mass * d_log_mass * self.pdf(mass, log_mode=True))
else:
return np.sum(d_log_mass * self.pdf(mass, log_mode=True))
else:
d_mass = (mass_max - mass_min) / float(steps)
mass = np.linspace(mass_min, mass_max, steps)
if weight:
return np.sum(mass * d_mass * self.pdf(mass, log_mode=False))
else:
return np.sum(d_mass * self.pdf(mass, log_mode=False))
| 847,320 |
New method.
Args:
cls (str): class name.
bases (tuple): base classes to inherit from.
dct (dict): class attributes.
Returns:
class: the new created class.
|
def __new__(mcs, cls, bases, dct):
super_new = super(_Metaclass, mcs).__new__
# Also ensure initialization is only performed for subclasses
# of AppSettings (excluding AppSettings class itself).
parents = [b for b in bases if isinstance(b, _Metaclass)]
if not parents:
return super_new(mcs, cls, bases, dct)
new_attr = {}
_meta = dct.pop("Meta", type("Meta", (), {"setting_prefix": ""}))()
_meta.settings = {}
for name, setting in dct.items():
if isinstance(setting, Setting):
_meta.settings[name] = setting
# populate name
if setting.name == "":
setting.name = name
# populate prefix
if setting.prefix == "":
setting.prefix = _meta.setting_prefix
else:
new_attr[name] = setting
new_attr["_meta"] = _meta
new_attr["settings"] = _meta.settings
return super_new(mcs, cls, bases, new_attr)
| 847,362 |
Return a setting object if it is in the ``_meta.settings`` dictionary.
Args:
item (str):
the name of the setting variable (not the setting's name).
Returns:
``Setting``: the setting object.
Raises:
AttributeError if the setting does not exist.
|
def __getattr__(cls, item):
if item in cls._meta.settings.keys():
return cls._meta.settings[item]
raise AttributeError("'%s' class has no attribute '%s'" % (cls.__name__, item))
| 847,363 |
r"""Wrap a string (tyically a regex) with a prefix and suffix (usually a nonconuming word break)
Arguments:
prefix, suffix (str): strings to append to the front and back of the provided string
grouper (2-len str or 2-tuple): characters or strings to separate prefix and suffix from the middle
>>> wrap(r'\w*')
'\\b(\\w*)\\b'
>>> wrap(r'middle', prefix=None)
'(middle)\\b'
|
def wrap(s, prefix=r'\b', suffix=r'\b', grouper='()'):
r
return ((prefix or '') + try_get(grouper, 0, '') + (s or '') +
try_get(grouper, 1, try_get(grouper, 0, '')) + (suffix or ''))
| 847,449 |
Merge a list of Catalogs.
Parameters:
-----------
catalog_list : List of Catalog objects.
Returns:
--------
catalog : Combined Catalog object
|
def mergeCatalogs(catalog_list):
# Check the columns
for c in catalog_list:
if c.data.dtype.names != catalog_list[0].data.dtype.names:
msg = "Catalog data columns not the same."
raise Exception(msg)
data = np.concatenate([c.data for c in catalog_list])
config = catalog_list[0].config
return Catalog(config,data=data)
| 847,457 |
Class to store information about detected objects. This class
augments the raw data array with several aliases and derived
quantities.
Parameters:
-----------
config : Configuration object
roi : Region of Interest to load catalog data for
data : Data array object
filenames : FITS filenames to read catalog from
Returns:
--------
catalog : The Catalog object
|
def __init__(self, config, roi=None, data=None, filenames=None):
self.config = Config(config)
if data is None:
self._parse(roi,filenames)
else:
self.data = data
self._defineVariables()
| 847,458 |
Write the current object catalog to FITS file.
Parameters:
-----------
filename : the FITS file to write.
clobber : remove existing file
kwargs : passed to fitsio.write
Returns:
--------
None
|
def write(self, outfile, clobber=True, **kwargs):
fitsio.write(outfile,self.data,clobber=True,**kwargs)
| 847,463 |
Parse catalog FITS files into recarray.
Parameters:
-----------
roi : The region of interest; if 'roi=None', read all catalog files
Returns:
--------
None
|
def _parse(self, roi=None, filenames=None):
if (roi is not None) and (filenames is not None):
msg = "Cannot take both roi and filenames"
raise Exception(msg)
if roi is not None:
pixels = roi.getCatalogPixels()
filenames = self.config.getFilenames()['catalog'][pixels]
elif filenames is None:
filenames = self.config.getFilenames()['catalog'].compressed()
else:
filenames = np.atleast_1d(filenames)
if len(filenames) == 0:
msg = "No catalog files found."
raise Exception(msg)
# Load the data
self.data = load_infiles(filenames)
# Apply a selection cut
self._applySelection()
# Cast data to recarray (historical reasons)
self.data = self.data.view(np.recarray)
| 847,464 |
Calculate the surface intensity for each pixel in the interior
region of the ROI. Pixels are adaptively subsampled around the
kernel centroid out to a radius of 'factor * max_pixrad'.
Parameters:
-----------
factor : the radius of the oversample region in units of max_pixrad
Returns:
--------
surface_intensity : the surface intensity at each pixel
|
def calc_surface_intensity(self, factor=10):
# First we calculate the surface intensity at native resolution
pixels = self.roi.pixels_interior
nside_in = self.config['coords']['nside_pixel']
surface_intensity = self.kernel.pdf(pixels.lon,pixels.lat)
# Then we recalculate the surface intensity around the kernel
# centroid at higher resolution
for i in np.arange(1,5):
# Select pixels within the region of interest
nside_out = 2**i * nside_in
radius = factor*np.degrees(hp.max_pixrad(nside_out))
pix = ang2disc(nside_in,self.kernel.lon,self.kernel.lat,
radius,inclusive=True)
# Select pix within the interior region of the ROI
idx = ugali.utils.healpix.index_pix_in_pixels(pix,pixels)
pix = pix[(idx >= 0)]; idx = idx[(idx >= 0)]
# Reset the surface intensity for the subsampled pixels
subpix = ugali.utils.healpix.ud_grade_ipix(pix,nside_in,nside_out)
pix_lon,pix_lat = pix2ang(nside_out,subpix)
surface_intensity[idx]=np.mean(self.kernel.pdf(pix_lon,pix_lat),axis=1)
return surface_intensity
| 847,555 |
Calculate the spatial signal probability for each catalog object.
Parameters:
-----------
None
Returns:
--------
u_spatial : array of spatial probabilities per object
|
def calc_signal_spatial(self):
# Calculate the surface intensity
self.surface_intensity_sparse = self.calc_surface_intensity()
# Calculate the probability per object-by-object level
self.surface_intensity_object = self.kernel.pdf(self.catalog.lon,
self.catalog.lat)
# Spatial component of signal probability
u_spatial = self.surface_intensity_object
return u_spatial
| 847,556 |
Maximize the log-likelihood as a function of richness.
ADW 2018-06-04: Does it make sense to set the richness to the mle?
Parameters:
-----------
atol : absolute tolerence for conversion
maxiter : maximum number of iterations
Returns:
--------
loglike, richness, parabola : the maximum loglike, the mle, and the parabola
|
def fit_richness(self, atol=1.e-3, maxiter=50):
# Check whether the signal probability for all objects are zero
# This can occur for finite kernels on the edge of the survey footprint
if np.isnan(self.u).any():
logger.warning("NaN signal probability found")
return 0., 0., None
if not np.any(self.u):
logger.warning("Signal probability is zero for all objects")
return 0., 0., None
if self.f == 0:
logger.warning("Observable fraction is zero")
return 0., 0., None
# Richness corresponding to 0, 1, and 10 observable stars
richness = np.array([0., 1./self.f, 10./self.f])
loglike = np.array([self.value(richness=r) for r in richness])
found_maximum = False
iteration = 0
while not found_maximum:
parabola = ugali.utils.parabola.Parabola(richness, 2.*loglike)
if parabola.vertex_x < 0.:
found_maximum = True
else:
richness = np.append(richness, parabola.vertex_x)
loglike = np.append(loglike, self.value(richness=richness[-1]))
if np.fabs(loglike[-1] - np.max(loglike[0: -1])) < atol:
found_maximum = True
iteration+=1
if iteration > maxiter:
logger.warning("Maximum number of iterations reached")
break
index = np.argmax(loglike)
return loglike[index], richness[index], parabola
| 847,557 |
Write a catalog file of the likelihood region including
membership properties.
Parameters:
-----------
filename : output filename
Returns:
--------
None
|
def write_membership(self,filename):
# Column names
name_objid = self.config['catalog']['objid_field']
name_mag_1 = self.config['catalog']['mag_1_field']
name_mag_2 = self.config['catalog']['mag_2_field']
name_mag_err_1 = self.config['catalog']['mag_err_1_field']
name_mag_err_2 = self.config['catalog']['mag_err_2_field']
# Coordinate conversion
#ra,dec = gal2cel(self.catalog.lon,self.catalog.lat)
glon,glat = self.catalog.glon_glat
ra,dec = self.catalog.ra_dec
# Angular and isochrone separations
sep = angsep(self.source.lon,self.source.lat,
self.catalog.lon,self.catalog.lat)
isosep = self.isochrone.separation(self.catalog.mag_1,self.catalog.mag_2)
# If size becomes an issue we can make everything float32
data = odict()
data[name_objid] = self.catalog.objid
data['GLON'] = glon
data['GLAT'] = glat
data['RA'] = ra
data['DEC'] = dec
data[name_mag_1] = self.catalog.mag_1
data[name_mag_err_1] = self.catalog.mag_err_1
data[name_mag_2] = self.catalog.mag_2
data[name_mag_err_2] = self.catalog.mag_err_2
data['COLOR'] = self.catalog.color
data['ANGSEP'] = sep.astype(np.float32)
data['ISOSEP'] = isosep.astype(np.float32)
data['PROB'] = self.p.astype(np.float32)
# HIERARCH allows header keywords longer than 8 characters
header = []
for param,value in self.source.params.items():
card = dict(name='HIERARCH %s'%param.upper(),
value=value.value,
comment=param)
header.append(card)
card = dict(name='HIERARCH %s'%'TS',value=self.ts(),
comment='test statistic')
header.append(card)
card = dict(name='HIERARCH %s'%'TIMESTAMP',value=time.asctime(),
comment='creation time')
header.append(card)
fitsio.write(filename,data,header=header,clobber=True)
| 847,559 |
Decorator that stores the result of the stored function in the
user's results cache until the batch completes. Keyword arguments are
not yet supported.
Arguments:
func (callable(*a)): The function whose results we want
to store. The positional arguments, ``a``, are used as cache
keys.
Returns:
callable(*a): The memosing version of ``func``.
|
def memoise(cls, func):
@functools.wraps(func)
def f(*a):
for arg in a:
if isinstance(arg, User):
user = arg
break
else:
raise ValueError("One position argument must be a User")
func_key = (func, tuple(a))
cache = cls.get_cache(user)
if func_key not in cache:
cache[func_key] = func(*a)
return cache[func_key]
return f
| 847,565 |
Call method.
Args:
name (str): the value's name.
value (object): the value to check.
Raises:
ValueError: if value is not type base_type.
|
def __call__(self, name, value):
if not isinstance(value, self.base_type):
raise ValueError("%s must be %s, not %s" % (name, self.base_type, value.__class__))
| 847,806 |
Initialization method.
Args:
minimum (int): a minimum value (included).
maximum (int): a maximum value (included).
|
def __init__(self, minimum=None, maximum=None):
super(IntegerTypeChecker, self).__init__(base_type=int)
self.minimum = minimum
self.maximum = maximum
| 847,807 |
Call method.
Args:
name (str): the value's name.
value (int): the value to check.
Raises:
ValueError: if value is not type int.
ValueError: if value is less than minimum.
ValueError: if value is more than maximum.
|
def __call__(self, name, value):
super(IntegerTypeChecker, self).__call__(name, value)
if isinstance(self.minimum, int):
if value < self.minimum:
raise ValueError("%s must be greater or equal %s" % (name, self.minimum))
if isinstance(self.maximum, int):
if value > self.maximum:
raise ValueError("%s must be less or equal %s" % (name, self.maximum))
| 847,808 |
Initialization method.
Args:
minimum (float): a minimum value (included).
maximum (float): a maximum value (included).
|
def __init__(self, minimum=None, maximum=None):
super(FloatTypeChecker, self).__init__(base_type=float)
self.minimum = minimum
self.maximum = maximum
| 847,809 |
Call method.
Args:
name (str): the value's name.
value (float): the value to check.
Raises:
ValueError: if value is not type float.
ValueError: if value is less than minimum.
ValueError: if value is more than maximum.
|
def __call__(self, name, value):
super(FloatTypeChecker, self).__call__(name, value)
if isinstance(self.minimum, float):
if value < self.minimum:
raise ValueError("%s must be greater or equal %s" % (name, self.minimum))
if isinstance(self.maximum, float):
if value > self.maximum:
raise ValueError("%s must be less or equal %s" % (name, self.maximum))
| 847,810 |
Initialization method.
Args:
iter_type (type): the type of the iterable object.
item_type (type): the type of the items inside the object.
min_length (int): a minimum length (included).
max_length (int): a maximum length (included).
empty (bool): whether emptiness is allowed.
|
def __init__(self, iter_type, item_type=None, min_length=None, max_length=None, empty=True):
super(IterableTypeChecker, self).__init__(base_type=iter_type)
self.item_type = item_type
self.min_length = min_length
self.max_length = max_length
self.empty = empty
| 847,811 |
Call method.
Args:
name (str): the value's name.
value (iterable): the value to check.
Raises:
ValueError: if value is not type iter_type.
ValueError: if any item in value is not type item_type.
ValueError: if value's length is less than min_length.
ValueError: if value's length is more than max_length.
ValueError: if value's length is 0 and emptiness is not allowed.
|
def __call__(self, name, value):
super(IterableTypeChecker, self).__call__(name, value)
if isinstance(self.item_type, type):
if not all(isinstance(o, self.item_type) for o in value):
raise ValueError("All elements of %s must be %s" % (name, self.item_type))
if isinstance(self.min_length, int):
if len(value) < self.min_length:
raise ValueError("%s must be longer than %s (or equal)" % (name, self.min_length))
if isinstance(self.max_length, int):
if len(value) > self.max_length:
raise ValueError("%s must be shorter than %s (or equal)" % (name, self.max_length))
if len(value) == 0 and not self.empty:
raise ValueError("%s must not be empty" % name)
| 847,812 |
Initialization method.
Args:
min_length (int): minimum length of the string (included).
max_length (int): maximum length of the string (included).
empty (bool): whether empty string is allowed.
|
def __init__(self, min_length=None, max_length=None, empty=True):
super(StringTypeChecker, self).__init__(
iter_type=str, min_length=min_length, max_length=max_length, empty=empty
)
| 847,813 |
Initialization method.
Args:
item_type (type): the type of the items inside the list.
min_length (int): minimum length of the list (included).
max_length (int): maximum length of the list (included).
empty (bool): whether empty list is allowed.
|
def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):
super(ListTypeChecker, self).__init__(
iter_type=list, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty
)
| 847,814 |
Initialization method.
Args:
item_type (type): the type of the items inside the set.
min_length (int): minimum length of the set (included).
max_length (int): maximum length of the set (included).
empty (bool): whether empty set is allowed.
|
def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):
super(SetTypeChecker, self).__init__(
iter_type=set, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty
)
| 847,815 |
Initialization method.
Args:
item_type (type): the type of the items inside the tuple.
min_length (int): minimum length of the tuple (included).
max_length (int): maximum length of the tuple (included).
empty (bool): whether empty tuple is allowed.
|
def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):
super(TupleTypeChecker, self).__init__(
iter_type=tuple, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty
)
| 847,816 |
Initialization method.
Args:
key_type (type): the type of the dict keys.
value_type (type): the type of the dict values.
min_length (int): minimum length of the dict (included).
max_length (int): maximum length of the dict (included).
empty (bool): whether empty dict is allowed.
|
def __init__(self, key_type=None, value_type=None, min_length=None, max_length=None, empty=True):
super(DictTypeChecker, self).__init__(base_type=dict)
self.key_type = key_type
self.value_type = value_type
self.min_length = min_length
self.max_length = max_length
self.empty = empty
| 847,817 |
Initialization method.
Args:
empty (bool):
|
def __init__(self, empty=True):
super(ObjectTypeChecker, self).__init__(empty=empty)
| 847,819 |
Call method.
Args:
name (str): the value's name.
value (str): the value to check.
Raises:
ValueError: if value is not type str.
|
def __call__(self, name, value):
super(ObjectTypeChecker, self).__call__(name, value)
| 847,820 |
Sum an array of magnitudes in flux space.
Parameters:
-----------
mags : array of magnitudes
weights : array of weights for each magnitude (i.e. from a pdf)
Returns:
--------
sum_mag : the summed magnitude of all the stars
|
def sum_mags(mags, weights=None):
flux = 10**(-np.asarray(mags) / 2.5)
if weights is None:
return -2.5 * np.log10(np.sum(flux))
else:
return -2.5 * np.log10(np.sum(weights*flux))
| 847,844 |
Compute the stellar mass (Msun; average per star). PDF comes
from IMF, but weight by actual stellar mass.
Parameters:
-----------
mass_min : Minimum mass to integrate the IMF
steps : Number of steps to sample the isochrone
Returns:
--------
mass : Stellar mass [Msun]
|
def stellar_mass(self, mass_min=0.1, steps=10000):
mass_max = self.mass_init_upper_bound
d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
mass = 10.**log_mass
if mass_min < np.min(self.mass_init):
mass_act_interpolation = scipy.interpolate.interp1d(np.insert(self.mass_init, 0, mass_min),
np.insert(self.mass_act, 0, mass_min))
else:
mass_act_interpolation = scipy.interpolate.interp1d(self.mass_init, self.mass_act)
mass_act = mass_act_interpolation(mass)
return np.sum(mass_act * d_log_mass * self.imf.pdf(mass, log_mode=True))
| 847,851 |
Calculate the absolute visual magnitude (Mv) from the richness
by transforming the isochrone in the SDSS system and using the
g,r -> V transform equations from Jester 2005
[astro-ph/0506022].
Parameters:
-----------
richness : isochrone normalization parameter
steps : number of isochrone sampling steps
Returns:
--------
abs_mag : Absolute magnitude (Mv)
|
def absolute_magnitude(self, richness=1, steps=1e4):
# Using the SDSS g,r -> V from Jester 2005 [astro-ph/0506022]
# for stars with R-I < 1.15
# V = g_sdss - 0.59*(g_sdss - r_sdss) - 0.01
# Create a copy of the isochrone in the SDSS system
params = {k:v.value for k,v in self._params.items()}
params.update(band_1='g',band_2='r',survey='sdss')
iso = self.__class__(**params)
# g, r are absolute magnitude
mass_init, mass_pdf, mass_act, sdss_g, sdss_r = iso.sample(mass_steps=steps)
V = jester_mag_v(sdss_g,sdss_r)
# Sum the V-band absolute magnitudes
return sum_mags(V,weights=mass_pdf*richness)
| 847,854 |
Simulate a set of stellar magnitudes (no uncertainty) for a
satellite of a given stellar mass and distance.
Parameters:
-----------
stellar_mass : the total stellar mass of the system (Msun)
distance_modulus : distance modulus of the system (if None takes from isochrone)
kwargs : passed to iso.imf.sample
Returns:
--------
mag_1, mag_2 : simulated magnitudes with length stellar_mass/iso.stellar_mass()
|
def simulate(self, stellar_mass, distance_modulus=None, **kwargs):
if distance_modulus is None: distance_modulus = self.distance_modulus
# Total number of stars in system
n = int(round(stellar_mass / self.stellar_mass()))
f_1 = scipy.interpolate.interp1d(self.mass_init, self.mag_1)
f_2 = scipy.interpolate.interp1d(self.mass_init, self.mag_2)
mass_init_sample = self.imf.sample(n, np.min(self.mass_init), np.max(self.mass_init), **kwargs)
mag_1_sample, mag_2_sample = f_1(mass_init_sample), f_2(mass_init_sample)
return mag_1_sample + distance_modulus, mag_2_sample + distance_modulus
| 847,856 |
Return a 2D histogram the isochrone in mag-mag space.
Parameters:
-----------
distance_modulus : distance modulus to calculate histogram at
delta_mag : magnitude bin size
mass_steps : number of steps to sample isochrone at
Returns:
--------
bins_mag_1 : bin edges for first magnitude
bins_mag_2 : bin edges for second magnitude
isochrone_pdf : weighted pdf of isochrone in each bin
|
def histogram2d(self,distance_modulus=None,delta_mag=0.03,steps=10000):
if distance_modulus is not None:
self.distance_modulus = distance_modulus
# Isochrone will be binned, so might as well sample lots of points
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_steps=steps)
#logger.warning("Fudging intrinisic dispersion in isochrone.")
#mag_1 += np.random.normal(scale=0.02,size=len(mag_1))
#mag_2 += np.random.normal(scale=0.02,size=len(mag_2))
# We cast to np.float32 to save memory
bins_mag_1 = np.arange(self.mod+mag_1.min() - (0.5*delta_mag),
self.mod+mag_1.max() + (0.5*delta_mag),
delta_mag).astype(np.float32)
bins_mag_2 = np.arange(self.mod+mag_2.min() - (0.5*delta_mag),
self.mod+mag_2.max() + (0.5*delta_mag),
delta_mag).astype(np.float32)
# ADW: Completeness needs to go in mass_pdf here...
isochrone_pdf = np.histogram2d(self.mod + mag_1,
self.mod + mag_2,
bins=[bins_mag_1, bins_mag_2],
weights=mass_pdf)[0].astype(np.float32)
return isochrone_pdf, bins_mag_1, bins_mag_2
| 847,862 |
Calculate the separation between a specific point and the
isochrone in magnitude-magnitude space. Uses an interpolation
ADW: Could speed this up...
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test points in the second band
Returns:
--------
sep : Minimum separation between test points and isochrone interpolation
|
def separation(self, mag_1, mag_2):
iso_mag_1 = self.mag_1 + self.distance_modulus
iso_mag_2 = self.mag_2 + self.distance_modulus
def interp_iso(iso_mag_1,iso_mag_2,mag_1,mag_2):
interp_1 = scipy.interpolate.interp1d(iso_mag_1,iso_mag_2,bounds_error=False)
interp_2 = scipy.interpolate.interp1d(iso_mag_2,iso_mag_1,bounds_error=False)
dy = interp_1(mag_1) - mag_2
dx = interp_2(mag_2) - mag_1
dmag_1 = np.fabs(dx*dy) / (dx**2 + dy**2) * dy
dmag_2 = np.fabs(dx*dy) / (dx**2 + dy**2) * dx
return dmag_1, dmag_2
# Separate the various stellar evolution stages
if np.issubdtype(self.stage.dtype,np.number):
sel = (self.stage < self.hb_stage)
else:
sel = (self.stage != self.hb_stage)
# First do the MS/RGB
rgb_mag_1 = iso_mag_1[sel]
rgb_mag_2 = iso_mag_2[sel]
dmag_1,dmag_2 = interp_iso(rgb_mag_1,rgb_mag_2,mag_1,mag_2)
# Then do the HB (if it exists)
if not np.all(sel):
hb_mag_1 = iso_mag_1[~sel]
hb_mag_2 = iso_mag_2[~sel]
hb_dmag_1,hb_dmag_2 = interp_iso(hb_mag_1,hb_mag_2,mag_1,mag_2)
dmag_1 = np.nanmin([dmag_1,hb_dmag_1],axis=0)
dmag_2 = np.nanmin([dmag_2,hb_dmag_2],axis=0)
#return dmag_1,dmag_2
return np.sqrt(dmag_1**2 + dmag_2**2)
| 847,866 |
Aggregates the items that this user has purchased.
Arguments:
cart_status (int or Iterable(int)): etc
category (Optional[models.inventory.Category]): the category
of items to restrict to.
Returns:
[ProductAndQuantity, ...]: A list of product-quantity pairs,
aggregating like products from across multiple invoices.
|
def _items(self, cart_status, category=None):
if not isinstance(cart_status, Iterable):
cart_status = [cart_status]
status_query = (
Q(productitem__cart__status=status) for status in cart_status
)
in_cart = Q(productitem__cart__user=self.user)
in_cart = in_cart & reduce(operator.__or__, status_query)
quantities_in_cart = When(
in_cart,
then="productitem__quantity",
)
quantities_or_zero = Case(
quantities_in_cart,
default=Value(0),
)
products = inventory.Product.objects
if category:
products = products.filter(category=category)
products = products.select_related("category")
products = products.annotate(quantity=Sum(quantities_or_zero))
products = products.filter(quantity__gt=0)
out = []
for prod in products:
out.append(ProductAndQuantity(prod, prod.quantity))
return out
| 847,881 |
Aggregates the items that this user has purchased.
Arguments:
category (Optional[models.inventory.Category]): the category
of items to restrict to.
Returns:
[ProductAndQuantity, ...]: A list of product-quantity pairs,
aggregating like products from across multiple invoices.
|
def items_purchased(self, category=None):
return self._items(commerce.Cart.STATUS_PAID, category=category)
| 847,883 |
Render a color map (image) of a matrix or sequence of Matrix objects
A color map is like a contour map except the "height" or "value" of each matrix element
is used to select a color from a continuous spectrum of colors (for heatmap white is max and red is medium)
Arguments:
mat (np.matrix or np.array or list of list): the matrix to be rendered as a color map
|
def __init__(self, mat, **kwargs):
# try:
# self.colormaps = [ColorMap(m, cmap=cmap, pixelspervalue=pixelspervalue,
# minvalue=minvalue, maxvalue=maxvalue) for m in mat]
# except:
# pass
# # raise ValueError("Don't know how to display ColorMaps for a sequence of type {}".format(type(mat)))
try:
mat = np.array(mat.values)
except AttributeError:
try:
mat = np.array(mat)
except ValueError:
pass
if not isinstance(mat, np.ndarray):
raise ValueError("Don't know how to display a ColorMap for a matrix of type {}".format(type(mat)))
kwargs['vmin'] = kwargs.get('vmin', np.amin(mat))
kwargs['vmax'] = kwargs.get('vmax', np.amax(mat))
kwargs['cmap'] = kwargs.get('cmap', 'bone') # 'hot', 'greens', 'blues'
kwargs['linewidths'] = kwargs.get('linewidths', 0.25)
kwargs['square'] = kwargs.get('square', True)
sb.heatmap(mat, **kwargs)
| 848,326 |
Initialize a configuration object from a filename or a dictionary.
Provides functionality to merge with a default configuration.
Parameters:
config: filename, dict, or Config object (deep copied)
default: default configuration to merge
Returns:
config
|
def __init__(self, config, default=None):
self.update(self._load(default))
self.update(self._load(config))
self._formatFilepaths()
# For back-compatibility...
self.params = self
# Run some basic validation
# ADW: This should be run after creating filenames
self._validate()
# Filenames from this config (masked by existence)
# ADW: We should not recreate filenames if they already exist
# in the input config
if not hasattr(self,'filenames'):
try:
self.filenames = self._createFilenames()
except:
exc_type,exc_value,exc_traceback = sys.exc_info()
logger.warning("%s %s"%(exc_type,exc_value))
logger.warning("Filenames could not be created for config.")
| 848,369 |
Load this config from an existing config
Parameters:
-----------
config : filename, config object, or dict to load
Returns:
--------
params : configuration parameters
|
def _load(self, config):
if isstring(config):
self.filename = config
params = yaml.load(open(config))
elif isinstance(config, Config):
# This is the copy constructor...
self.filename = config.filename
params = copy.deepcopy(config)
elif isinstance(config, dict):
params = copy.deepcopy(config)
elif config is None:
params = {}
else:
raise Exception('Unrecognized input')
return params
| 848,370 |
Write a copy of this config object.
Parameters:
-----------
outfile : output filename
Returns:
--------
None
|
def write(self, filename):
ext = os.path.splitext(filename)[1]
writer = open(filename, 'w')
if ext == '.py':
writer.write(pprint.pformat(self))
elif ext == '.yaml':
writer.write(yaml.dump(self))
else:
writer.close()
raise Exception('Unrecognized config format: %s'%ext)
writer.close()
| 848,373 |
Create a masked records array of all filenames for the given set of
pixels and store the existence of those files in the mask values.
Parameters:
-----------
None
Returns:
--------
recarray : pixels and mask value
|
def _createFilenames(self):
nside_catalog = self['coords']['nside_catalog']
npix = hp.nside2npix(nside_catalog)
pixels = np.arange(npix)
catalog_dir = self['catalog']['dirname']
catalog_base = self['catalog']['basename']
catalog_path = os.path.join(catalog_dir,catalog_base)
mask_dir = self['mask']['dirname']
mask_base_1 = self['mask']['basename_1']
mask_base_2 = self['mask']['basename_2']
mask_path_1 = os.path.join(mask_dir,mask_base_1)
mask_path_2 = os.path.join(mask_dir,mask_base_2)
data = np.ma.empty(npix,dtype=[('pix',int), ('catalog',object),
('mask_1',object), ('mask_2',object)])
mask = np.ma.empty(npix,dtype=[('pix',bool), ('catalog',bool),
('mask_1',bool), ('mask_2',bool)])
# Build the filenames
data['pix'] = pixels
data['catalog'] = np.char.mod(catalog_path,pixels)
data['mask_1'] = np.char.mod(mask_path_1,pixels)
data['mask_2'] = np.char.mod(mask_path_2,pixels)
# Build the mask of existing files using glob
mask['catalog'] = ~np.in1d(data['catalog'],glob.glob(catalog_dir+'/*'))
mask['mask_1'] = ~np.in1d(data['mask_1'],glob.glob(mask_dir+'/*'))
mask['mask_2'] = ~np.in1d(data['mask_2'],glob.glob(mask_dir+'/*'))
for name in ['catalog','mask_1','mask_2']:
if np.all(mask[name]): logger.warn("All '%s' files masked"%name)
# mask 'pix' if all files not present
mask['pix'] = mask['catalog'] | mask['mask_1'] | mask['mask_2']
if np.all(mask['pix']): logger.warn("All pixels masked")
return np.ma.MaskedArray(data, mask, fill_value=[-1,'','',''])
| 848,375 |
Return the requested filenames.
Parameters:
-----------
pixels : requeseted pixels
Returns:
--------
filenames : recarray
|
def getFilenames(self,pixels=None):
logger.debug("Getting filenames...")
if pixels is None:
return self.filenames
else:
return self.filenames[np.in1d(self.filenames['pix'],pixels)]
| 848,376 |
Return the indices of the super-pixels which contain each of the
sub-pixels (nside_in > nside_out).
Parameters:
-----------
ipix : index of the input subpixels
nside_in : nside of the input subpix
nside_out : nside of the desired superpixels
Returns:
--------
ipix_out : superpixels for each subpixel
|
def d_grade_ipix(ipix, nside_in, nside_out, nest=False):
if nside_in==nside_out: return ipix
if not (nside_in > nside_out):
raise ValueError("nside_out must be less than nside_in")
return hp.vec2pix(nside_out, *hp.pix2vec(nside_in, ipix, nest), nest=nest)
| 848,387 |
Return the indices of sub-pixels (resolution nside_subpix) within
the super-pixel(s) (resolution nside_superpix).
Parameters:
-----------
ipix : index of the input superpixel(s)
nside_in : nside of the input superpixel
nside_out : nside of the desired subpixels
Returns:
--------
ipix_out : subpixels for each superpixel
|
def u_grade_ipix(ipix, nside_in, nside_out, nest=False):
if nside_in==nside_out: return ipix
if not (nside_in < nside_out):
raise ValueError("nside_in must be less than nside_out")
if nest: nest_ipix = ipix
else: nest_ipix = hp.ring2nest(nside_in, ipix)
factor = (nside_out//nside_in)**2
if np.isscalar(ipix):
nest_ipix_out = factor*nest_ipix + np.arange(factor)
else:
nest_ipix_out = factor*np.asarray(nest_ipix)[:,np.newaxis]+np.arange(factor)
if nest: return nest_ipix_out
else: return hp.nest2ring(nside_out, nest_ipix_out)
| 848,388 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.