text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Decipher force field atom ids.
<END_TASK>
<USER_TASK:>
Description:
def decipher_atom_keys(self, forcefield='DLF', dict_key='atom_ids'):
"""
Decipher force field atom ids.
This takes all values in :attr:`MolecularSystem.system['atom_ids']`
that match force field type criteria and creates
:attr:`MolecularSystem.system['elements']` with the corresponding
periodic table of elements equivalents.
If a forcefield is not supported by this method, the
:func:`MolecularSystem.swap_atom_keys()` can be used instead.
DLF stands for DL_F notation.
See: C. W. Yong, Descriptions and Implementations of DL_F Notation: A
Natural Chemical Expression System of Atom Types for Molecular
Simulations, J. Chem. Inf. Model., 2016, 56, 1405–1409.
Parameters
----------
forcefield : :class:`str`
The forcefield used to decipher atom ids. Allowed (not case
sensitive): 'OPLS', 'OPLS2005', 'OPLSAA', 'OPLS3', 'DLF', 'DL_F'.
(default='DLF')
dict_key : :class:`str`
The :attr:`MolecularSystem.system` dictionary key to the array
containing the force field atom ids. (default='atom_ids')
Returns
-------
None : :class:`NoneType`
""" |
# In case there is no 'atom_ids' key we try 'elements'. This is for
# XYZ and MOL files mostly. But, we keep the dict_key keyword for
# someone who would want to decipher 'elements' even if 'atom_ids' key
# is present in the system's dictionary.
if 'atom_ids' not in self.system.keys():
dict_key = 'elements'
# I do it on temporary object so that it only finishes when successful
temp = deepcopy(self.system[dict_key])
for element in range(len(temp)):
temp[element] = "{0}".format(
decipher_atom_key(
temp[element], forcefield=forcefield))
self.system['elements'] = temp |
<SYSTEM_TASK:>
Get value from SQLAlchemy instance by column name
<END_TASK>
<USER_TASK:>
Description:
def get_attrname_by_colname(instance, name):
""" Get value from SQLAlchemy instance by column name
:Parameters:
- `instance`: SQLAlchemy model instance.
- `name`: Column name
:Examples:
>>> from sqlalchemy import Column, Integer
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base()
>>> class MPTTPages(Base):
... __tablename__ = "mptt_pages"
... id = Column(Integer, primary_key=True)
... left = Column("lft", Integer, nullable=False)
>>> get_attrname_by_colname(MPTTPages(), 'lft')
'left'
""" |
for attr, column in list(sqlalchemy.inspect(instance.__class__).c.items()):
if column.name == name:
return attr |
<SYSTEM_TASK:>
Return primary key name by model class or instance.
<END_TASK>
<USER_TASK:>
Description:
def get_pk(obj):
""" Return primary key name by model class or instance.
:Parameters:
- `obj`: SQLAlchemy model instance or class.
:Examples:
>>> from sqlalchemy import Column, Integer
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base()
>>> class User(Base):
... __tablename__ = 'users'
... id = Column(Integer, primary_key=True)
>>> get_pk(User())
(Column('id', Integer(), table=<users>, primary_key=True, nullable=False),)
>>> get_pk(User)
(Column('id', Integer(), table=<users>, primary_key=True, nullable=False),)
""" |
if inspect.isclass(obj):
pk_list = sqlalchemy.inspect(obj).primary_key
else:
pk_list = obj.__mapper__.primary_key
return pk_list |
<SYSTEM_TASK:>
Clear fault injection rules from all known service proxies.
<END_TASK>
<USER_TASK:>
Description:
def clear_rules_from_all_proxies(self):
"""
Clear fault injection rules from all known service proxies.
""" |
self._queue = []
if self.debug:
print 'Clearing rules'
for service in self.app.get_services():
for instance in self.app.get_service_instances(service):
if self.debug:
print 'Clearing rules for %s - instance %s' % (service, instance)
resp = requests.delete("http://{}/gremlin/v1/rules".format(instance))
if resp.status_code != 200:
print 'Failed to clear rules for %s - instance %s' % (service, instance) |
<SYSTEM_TASK:>
Add a given failure scenario
<END_TASK>
<USER_TASK:>
Description:
def setup_failure(self, scenario=None, **args):
"""Add a given failure scenario
@param scenario: string 'delayrequests' or 'crash'
""" |
assert scenario is not None and scenario in self.functiondict
self.functiondict[scenario](**args) |
<SYSTEM_TASK:>
add vertex by coordinate and uniq name
<END_TASK>
<USER_TASK:>
Description:
def add_vertex(self, x, y, z, name):
"""add vertex by coordinate and uniq name
x y z is coordinates of vertex
name is uniq name to refer the vertex
returns Vertex object whici is added.
""" |
self.vertices[name] = Vertex(x, y, z, name)
return self.vertices[name] |
<SYSTEM_TASK:>
treat name1, name2, ... as same point.
<END_TASK>
<USER_TASK:>
Description:
def reduce_vertex(self, name1, *names):
"""treat name1, name2, ... as same point.
name2.alias, name3.alias, ... are merged with name1.alias
the key name2, name3, ... in self.vertices are kept and mapped to
same Vertex instance as name1
""" |
v = self.vertices[name1]
for n in names:
w = self.vertices[n]
v.alias.update(w.alias)
# replace mapping from n w by to v
self.vertices[n] = v |
<SYSTEM_TASK:>
Unpack the tar or zip file at the specified path to the directory
<END_TASK>
<USER_TASK:>
Description:
def extract(path, to_path='', ext='', **kwargs):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
""" |
Archive(path, ext=ext).extract(to_path, **kwargs) |
<SYSTEM_TASK:>
Return the proper Archive implementation class, based on the file type.
<END_TASK>
<USER_TASK:>
Description:
def _archive_cls(file, ext=''):
"""
Return the proper Archive implementation class, based on the file type.
""" |
cls = None
filename = None
if is_string(file):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
lookup_filename = filename + ext
base, tail_ext = os.path.splitext(lookup_filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls |
<SYSTEM_TASK:>
Check that all of the files contained in the archive are within the
<END_TASK>
<USER_TASK:>
Description:
def check_files(self, to_path=None):
"""
Check that all of the files contained in the archive are within the
target directory.
""" |
if to_path:
target_path = os.path.normpath(os.path.realpath(to_path))
else:
target_path = os.getcwd()
for filename in self.filenames():
extract_path = os.path.join(target_path, filename)
extract_path = os.path.normpath(os.path.realpath(extract_path))
if not extract_path.startswith(target_path):
raise UnsafeArchive(
"Archive member destination is outside the target"
" directory. member: %s" % filename) |
<SYSTEM_TASK:>
some methods handled individually to improve efficiency with huge files
<END_TASK>
<USER_TASK:>
Description:
def meanstack(infn: Path, Navg: int, ut1: Optional[datetime]=None,
method: str='mean') -> Tuple[np.ndarray, Optional[datetime]]:
infn = Path(infn).expanduser()
# %% parse indicies to load
if isinstance(Navg, slice):
key = Navg
elif isinstance(Navg, int):
key = slice(0, Navg)
elif len(Navg) == 1:
key = slice(0, Navg[0])
elif len(Navg) == 2:
key = slice(Navg[0], Navg[1])
else:
raise ValueError(f'not sure what you mean by Navg={Navg}')
# %% load images
"""
some methods handled individually to improve efficiency with huge files
""" |
if infn.suffix == '.h5':
img, ut1 = _h5mean(infn, ut1, key, method)
elif infn.suffix == '.fits':
with fits.open(infn, mode='readonly', memmap=False) as f: # mmap doesn't work with BZERO/BSCALE/BLANK
img = collapsestack(f[0].data, key, method)
elif infn.suffix == '.mat':
img = loadmat(infn)
img = collapsestack(img['data'].T, key, method) # matlab is fortran order
else: # .tif etc.
img = imageio.imread(infn, as_gray=True)
if img.ndim in (3, 4) and img.shape[-1] == 3: # assume RGB
img = collapsestack(img, key, method)
return img, ut1 |
<SYSTEM_TASK:>
Runs the specified request at the specified object_name and
<END_TASK>
<USER_TASK:>
Description:
def _run_search_request(
self, protocol_request, object_name, protocol_response_class):
"""
Runs the specified request at the specified object_name and
instantiates an object of the specified class. We yield each object in
listAttr. If pages of results are present, repeat this process
until the pageToken is null.
""" |
not_done = True
while not_done:
response_object = self._run_search_page_request(
protocol_request, object_name, protocol_response_class)
value_list = getattr(
response_object,
protocol.getValueListName(protocol_response_class))
for extract in value_list:
yield extract
not_done = bool(response_object.next_page_token)
protocol_request.page_token = response_object.next_page_token |
<SYSTEM_TASK:>
Returns an iterator over the bases from the server in the form
<END_TASK>
<USER_TASK:>
Description:
def list_reference_bases(self, id_, start=0, end=None):
"""
Returns an iterator over the bases from the server in the form
of consecutive strings. This command does not conform to the
patterns of the other search and get requests, and is implemented
differently.
""" |
request = protocol.ListReferenceBasesRequest()
request.start = pb.int(start)
request.end = pb.int(end)
request.reference_id = id_
not_done = True
# TODO We should probably use a StringIO here to make string buffering
# a bit more efficient.
bases_list = []
while not_done:
response = self._run_list_reference_bases_page_request(request)
bases_list.append(response.sequence)
not_done = bool(response.next_page_token)
request.page_token = response.next_page_token
return "".join(bases_list) |
<SYSTEM_TASK:>
Returns an iterator over the Variants fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_variants(
self, variant_set_id, start=None, end=None, reference_name=None,
call_set_ids=None):
"""
Returns an iterator over the Variants fulfilling the specified
conditions from the specified VariantSet.
:param str variant_set_id: The ID of the
:class:`ga4gh.protocol.VariantSet` of interest.
:param int start: Required. The beginning of the window (0-based,
inclusive) for which overlapping variants should be returned.
Genomic positions are non-negative integers less than reference
length. Requests spanning the join of circular genomes are
represented as two requests one on each side of the join
(position 0).
:param int end: Required. The end of the window (0-based, exclusive)
for which overlapping variants should be returned.
:param str reference_name: The name of the
:class:`ga4gh.protocol.Reference` we wish to return variants from.
:param list call_set_ids: Only return variant calls which belong to
call sets with these IDs. If an empty array, returns variants
without any call objects. If null, returns all variant calls.
:return: An iterator over the :class:`ga4gh.protocol.Variant` objects
defined by the query parameters.
:rtype: iter
""" |
request = protocol.SearchVariantsRequest()
request.reference_name = pb.string(reference_name)
request.start = pb.int(start)
request.end = pb.int(end)
request.variant_set_id = variant_set_id
request.call_set_ids.extend(pb.string(call_set_ids))
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "variants", protocol.SearchVariantsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the Variant Annotations fulfilling
<END_TASK>
<USER_TASK:>
Description:
def search_variant_annotations(
self, variant_annotation_set_id, reference_name="",
reference_id="", start=0, end=0, effects=[]):
"""
Returns an iterator over the Variant Annotations fulfilling
the specified conditions from the specified VariantSet.
:param str variant_annotation_set_id: The ID of the
:class:`ga4gh.protocol.VariantAnnotationSet` of interest.
:param int start: Required. The beginning of the window (0-based,
inclusive) for which overlapping variants should be returned.
Genomic positions are non-negative integers less than reference
length. Requests spanning the join of circular genomes are
represented as two requests one on each side of the join
(position 0).
:param int end: Required. The end of the window (0-based, exclusive)
for which overlapping variants should be returned.
:param str reference_name: The name of the
:class:`ga4gh.protocol.Reference` we wish to return variants from.
:return: An iterator over the
:class:`ga4gh.protocol.VariantAnnotation` objects
defined by the query parameters.
:rtype: iter
""" |
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = variant_annotation_set_id
request.reference_name = reference_name
request.reference_id = reference_id
request.start = start
request.end = end
for effect in effects:
request.effects.add().CopyFrom(protocol.OntologyTerm(**effect))
for effect in request.effects:
if not effect.term_id:
raise exceptions.ErrantRequestException(
"Each ontology term should have an id set")
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "variantannotations",
protocol.SearchVariantAnnotationsResponse) |
<SYSTEM_TASK:>
Returns the result of running a search_features method
<END_TASK>
<USER_TASK:>
Description:
def search_features(
self, feature_set_id=None, parent_id="", reference_name="",
start=0, end=0, feature_types=[], name="", gene_symbol=""):
"""
Returns the result of running a search_features method
on a request with the passed-in parameters.
:param str feature_set_id: ID of the feature Set being searched
:param str parent_id: ID (optional) of the parent feature
:param str reference_name: name of the reference to search
(ex: "chr1")
:param int start: search start position on reference
:param int end: end position on reference
:param feature_types: array of terms to limit search by (ex: "gene")
:param str name: only return features with this name
:param str gene_symbol: only return features on this gene
:return: an iterator over Features as returned in the
SearchFeaturesResponse object.
""" |
request = protocol.SearchFeaturesRequest()
request.feature_set_id = feature_set_id
request.parent_id = parent_id
request.reference_name = reference_name
request.name = name
request.gene_symbol = gene_symbol
request.start = start
request.end = end
request.feature_types.extend(feature_types)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "features",
protocol.SearchFeaturesResponse) |
<SYSTEM_TASK:>
Returns the result of running a search_continuous method
<END_TASK>
<USER_TASK:>
Description:
def search_continuous(
self, continuous_set_id=None, reference_name="", start=0, end=0):
"""
Returns the result of running a search_continuous method
on a request with the passed-in parameters.
:param str continuous_set_id: ID of the ContinuousSet being searched
:param str reference_name: name of the reference to search
(ex: "chr1")
:param int start: search start position on reference
:param int end: end position on reference
:return: an iterator over Continuous returned in the
SearchContinuousResponse object.
""" |
request = protocol.SearchContinuousRequest()
request.continuous_set_id = continuous_set_id
request.reference_name = reference_name
request.start = start
request.end = end
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "continuous",
protocol.SearchContinuousResponse) |
<SYSTEM_TASK:>
Returns an iterator over the Datasets on the server.
<END_TASK>
<USER_TASK:>
Description:
def search_datasets(self):
"""
Returns an iterator over the Datasets on the server.
:return: An iterator over the :class:`ga4gh.protocol.Dataset`
objects on the server.
""" |
request = protocol.SearchDatasetsRequest()
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "datasets", protocol.SearchDatasetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the VariantSets fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_variant_sets(self, dataset_id):
"""
Returns an iterator over the VariantSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the :class:`ga4gh.protocol.Dataset`
of interest.
:return: An iterator over the :class:`ga4gh.protocol.VariantSet`
objects defined by the query parameters.
""" |
request = protocol.SearchVariantSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "variantsets", protocol.SearchVariantSetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the Annotation Sets fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_variant_annotation_sets(self, variant_set_id):
"""
Returns an iterator over the Annotation Sets fulfilling the specified
conditions from the specified variant set.
:param str variant_set_id: The ID of the
:class:`ga4gh.protocol.VariantSet` of interest.
:return: An iterator over the :class:`ga4gh.protocol.AnnotationSet`
objects defined by the query parameters.
""" |
request = protocol.SearchVariantAnnotationSetsRequest()
request.variant_set_id = variant_set_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "variantannotationsets",
protocol.SearchVariantAnnotationSetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the FeatureSets fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_feature_sets(self, dataset_id):
"""
Returns an iterator over the FeatureSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the
:class:`ga4gh.protocol.Dataset` of interest.
:return: An iterator over the :class:`ga4gh.protocol.FeatureSet`
objects defined by the query parameters.
""" |
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "featuresets", protocol.SearchFeatureSetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the ContinuousSets fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_continuous_sets(self, dataset_id):
"""
Returns an iterator over the ContinuousSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the
:class:`ga4gh.protocol.Dataset` of interest.
:return: An iterator over the :class:`ga4gh.protocol.ContinuousSet`
objects defined by the query parameters.
""" |
request = protocol.SearchContinuousSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "continuoussets", protocol.SearchContinuousSetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the ReferenceSets fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_reference_sets(
self, accession=None, md5checksum=None, assembly_id=None):
"""
Returns an iterator over the ReferenceSets fulfilling the specified
conditions.
:param str accession: If not null, return the reference sets for which
the `accession` matches this string (case-sensitive, exact match).
:param str md5checksum: If not null, return the reference sets for
which the `md5checksum` matches this string (case-sensitive, exact
match). See :class:`ga4gh.protocol.ReferenceSet::md5checksum` for
details.
:param str assembly_id: If not null, return the reference sets for
which the `assembly_id` matches this string (case-sensitive,
exact match).
:return: An iterator over the :class:`ga4gh.protocol.ReferenceSet`
objects defined by the query parameters.
""" |
request = protocol.SearchReferenceSetsRequest()
request.accession = pb.string(accession)
request.md5checksum = pb.string(md5checksum)
request.assembly_id = pb.string(assembly_id)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "referencesets", protocol.SearchReferenceSetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the References fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_references(
self, reference_set_id, accession=None, md5checksum=None):
"""
Returns an iterator over the References fulfilling the specified
conditions from the specified Dataset.
:param str reference_set_id: The ReferenceSet to search.
:param str accession: If not None, return the references for which the
`accession` matches this string (case-sensitive, exact match).
:param str md5checksum: If not None, return the references for which
the `md5checksum` matches this string (case-sensitive, exact
match).
:return: An iterator over the :class:`ga4gh.protocol.Reference`
objects defined by the query parameters.
""" |
request = protocol.SearchReferencesRequest()
request.reference_set_id = reference_set_id
request.accession = pb.string(accession)
request.md5checksum = pb.string(md5checksum)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "references", protocol.SearchReferencesResponse) |
<SYSTEM_TASK:>
Returns an iterator over the CallSets fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_call_sets(self, variant_set_id, name=None, biosample_id=None):
"""
Returns an iterator over the CallSets fulfilling the specified
conditions from the specified VariantSet.
:param str variant_set_id: Find callsets belonging to the
provided variant set.
:param str name: Only CallSets matching the specified name will
be returned.
:param str biosample_id: Only CallSets matching this id will
be returned.
:return: An iterator over the :class:`ga4gh.protocol.CallSet`
objects defined by the query parameters.
""" |
request = protocol.SearchCallSetsRequest()
request.variant_set_id = variant_set_id
request.name = pb.string(name)
request.biosample_id = pb.string(biosample_id)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "callsets", protocol.SearchCallSetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the Biosamples fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_biosamples(self, dataset_id, name=None, individual_id=None):
"""
Returns an iterator over the Biosamples fulfilling the specified
conditions.
:param str dataset_id: The dataset to search within.
:param str name: Only Biosamples matching the specified name will
be returned.
:param str individual_id: Only Biosamples matching matching this
id will be returned.
:return: An iterator over the :class:`ga4gh.protocol.Biosample`
objects defined by the query parameters.
""" |
request = protocol.SearchBiosamplesRequest()
request.dataset_id = dataset_id
request.name = pb.string(name)
request.individual_id = pb.string(individual_id)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "biosamples", protocol.SearchBiosamplesResponse) |
<SYSTEM_TASK:>
Returns an iterator over the Individuals fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_individuals(self, dataset_id, name=None):
"""
Returns an iterator over the Individuals fulfilling the specified
conditions.
:param str dataset_id: The dataset to search within.
:param str name: Only Individuals matching the specified name will
be returned.
:return: An iterator over the :class:`ga4gh.protocol.Biosample`
objects defined by the query parameters.
""" |
request = protocol.SearchIndividualsRequest()
request.dataset_id = dataset_id
request.name = pb.string(name)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "individuals", protocol.SearchIndividualsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the ReadGroupSets fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_read_group_sets(
self, dataset_id, name=None, biosample_id=None):
"""
Returns an iterator over the ReadGroupSets fulfilling the specified
conditions from the specified Dataset.
:param str name: Only ReadGroupSets matching the specified name
will be returned.
:param str biosample_id: Only ReadGroups matching the specified
biosample will be included in the response.
:return: An iterator over the :class:`ga4gh.protocol.ReadGroupSet`
objects defined by the query parameters.
:rtype: iter
""" |
request = protocol.SearchReadGroupSetsRequest()
request.dataset_id = dataset_id
request.name = pb.string(name)
request.biosample_id = pb.string(biosample_id)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "readgroupsets", protocol.SearchReadGroupSetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the Reads fulfilling the specified
<END_TASK>
<USER_TASK:>
Description:
def search_reads(
self, read_group_ids, reference_id=None, start=None, end=None):
"""
Returns an iterator over the Reads fulfilling the specified
conditions from the specified read_group_ids.
:param str read_group_ids: The IDs of the
:class:`ga4gh.protocol.ReadGroup` of interest.
:param str reference_id: The name of the
:class:`ga4gh.protocol.Reference` we wish to return reads
mapped to.
:param int start: The start position (0-based) of this query. If a
reference is specified, this defaults to 0. Genomic positions are
non-negative integers less than reference length. Requests spanning
the join of circular genomes are represented as two requests one on
each side of the join (position 0).
:param int end: The end position (0-based, exclusive) of this query.
If a reference is specified, this defaults to the reference's
length.
:return: An iterator over the
:class:`ga4gh.protocol.ReadAlignment` objects defined by
the query parameters.
:rtype: iter
""" |
request = protocol.SearchReadsRequest()
request.read_group_ids.extend(read_group_ids)
request.reference_id = pb.string(reference_id)
request.start = pb.int(start)
request.end = pb.int(end)
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "reads", protocol.SearchReadsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the PhenotypeAssociationSets on the server.
<END_TASK>
<USER_TASK:>
Description:
def search_phenotype_association_sets(self, dataset_id):
"""
Returns an iterator over the PhenotypeAssociationSets on the server.
""" |
request = protocol.SearchPhenotypeAssociationSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "phenotypeassociationsets",
protocol.SearchPhenotypeAssociationSetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the GeneotypePhenotype associations from
<END_TASK>
<USER_TASK:>
Description:
def search_genotype_phenotype(
self, phenotype_association_set_id=None, feature_ids=None,
phenotype_ids=None, evidence=None):
"""
Returns an iterator over the GeneotypePhenotype associations from
the server
""" |
request = protocol.SearchGenotypePhenotypeRequest()
request.phenotype_association_set_id = phenotype_association_set_id
if feature_ids:
request.feature_ids.extend(feature_ids)
if phenotype_ids:
request.phenotype_ids.extend(phenotype_ids)
if evidence:
request.evidence.extend(evidence)
request.page_size = pb.int(self._page_size)
self._logger.debug("search_genotype_phenotype {}".format(request))
return self._run_search_request(
request, "featurephenotypeassociations",
protocol.SearchGenotypePhenotypeResponse) |
<SYSTEM_TASK:>
Returns an iterator over the Phenotypes from the server
<END_TASK>
<USER_TASK:>
Description:
def search_phenotype(
self, phenotype_association_set_id=None, phenotype_id=None,
description=None, type_=None, age_of_onset=None):
"""
Returns an iterator over the Phenotypes from the server
""" |
request = protocol.SearchPhenotypesRequest()
request.phenotype_association_set_id = phenotype_association_set_id
if phenotype_id:
request.id = phenotype_id
if description:
request.description = description
if type_:
request.type.mergeFrom(type_)
if age_of_onset:
request.age_of_onset = age_of_onset
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "phenotypes",
protocol.SearchPhenotypesResponse) |
<SYSTEM_TASK:>
Returns an iterator over the RnaQuantificationSet objects from the
<END_TASK>
<USER_TASK:>
Description:
def search_rna_quantification_sets(self, dataset_id):
"""
Returns an iterator over the RnaQuantificationSet objects from the
server
""" |
request = protocol.SearchRnaQuantificationSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "rnaquantificationsets",
protocol.SearchRnaQuantificationSetsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the RnaQuantification objects from the server
<END_TASK>
<USER_TASK:>
Description:
def search_rna_quantifications(self, rna_quantification_set_id=""):
"""
Returns an iterator over the RnaQuantification objects from the server
:param str rna_quantification_set_id: The ID of the
:class:`ga4gh.protocol.RnaQuantificationSet` of interest.
""" |
request = protocol.SearchRnaQuantificationsRequest()
request.rna_quantification_set_id = rna_quantification_set_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "rnaquantifications",
protocol.SearchRnaQuantificationsResponse) |
<SYSTEM_TASK:>
Returns an iterator over the ExpressionLevel objects from the server
<END_TASK>
<USER_TASK:>
Description:
def search_expression_levels(
self, rna_quantification_id="", names=[], threshold=0.0):
"""
Returns an iterator over the ExpressionLevel objects from the server
:param str feature_ids: The IDs of the
:class:`ga4gh.protocol.Feature` of interest.
:param str rna_quantification_id: The ID of the
:class:`ga4gh.protocol.RnaQuantification` of interest.
:param float threshold: Minimum expression of responses to return.
""" |
request = protocol.SearchExpressionLevelsRequest()
request.rna_quantification_id = rna_quantification_id
request.names.extend(names)
request.threshold = threshold
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "expressionlevels",
protocol.SearchExpressionLevelsResponse) |
<SYSTEM_TASK:>
Sets up the common HTTP session parameters used by requests.
<END_TASK>
<USER_TASK:>
Description:
def _setup_http_session(self):
"""
Sets up the common HTTP session parameters used by requests.
""" |
headers = {"Content-type": "application/json"}
if (self._id_token):
headers.update({"authorization": "Bearer {}".format(
self._id_token)})
self._session.headers.update(headers)
# TODO is this unsafe????
self._session.verify = False |
<SYSTEM_TASK:>
Checks the speficied HTTP response from the requests package and
<END_TASK>
<USER_TASK:>
Description:
def _check_response_status(self, response):
"""
Checks the speficied HTTP response from the requests package and
raises an exception if a non-200 HTTP code was returned by the
server.
""" |
if response.status_code != requests.codes.ok:
self._logger.error("%s %s", response.status_code, response.text)
raise exceptions.RequestNonSuccessException(
"Url {0} had status_code {1}".format(
response.url, response.status_code)) |
<SYSTEM_TASK:>
Initialize the captcha extension to the given app object.
<END_TASK>
<USER_TASK:>
Description:
def init_app(self, app):
"""
Initialize the captcha extension to the given app object.
""" |
self.enabled = app.config.get("CAPTCHA_ENABLE", True)
self.digits = app.config.get("CAPTCHA_LENGTH", 4)
self.max = 10**self.digits
self.image_generator = ImageCaptcha()
self.rand = SystemRandom()
def _generate():
if not self.enabled:
return ""
base64_captcha = self.generate()
return Markup("<img src='{}'>".format("data:image/png;base64, {}".format(base64_captcha)))
app.jinja_env.globals['captcha'] = _generate
# Check for sessions that do not persist on the server. Issue a warning because they are most likely open to replay attacks.
# This addon is built upon flask-session.
session_type = app.config.get('SESSION_TYPE', None)
if session_type is None or session_type == "null":
raise RuntimeWarning("Flask-Sessionstore is not set to use a server persistent storage type. This likely means that captchas are vulnerable to replay attacks.")
elif session_type == "sqlalchemy":
# I have to do this as of version 0.3.1 of flask-session if using sqlalchemy as the session type in order to create the initial database.
# Flask-sessionstore seems to have the same problem.
app.session_interface.db.create_all() |
<SYSTEM_TASK:>
Parses the IKE_INIT response packet received from Responder.
<END_TASK>
<USER_TASK:>
Description:
def init_recv(self):
"""
Parses the IKE_INIT response packet received from Responder.
Assigns the correct values of rSPI and Nr
Calculates Diffie-Hellman exchange and assigns all keys to self.
""" |
assert len(self.packets) == 2
packet = self.packets[-1]
for p in packet.payloads:
if p._type == payloads.Type.Nr:
self.Nr = p._data
logger.debug(u"Responder nonce {}".format(binascii.hexlify(self.Nr)))
elif p._type == payloads.Type.KE:
int_from_bytes = int.from_bytes(p.kex_data, 'big')
self.diffie_hellman.derivate(int_from_bytes)
else:
logger.debug('Ignoring: {}'.format(p))
logger.debug('Nonce I: {}\nNonce R: {}'.format(binascii.hexlify(self.Ni), binascii.hexlify(self.Nr)))
logger.debug('DH shared secret: {}'.format(binascii.hexlify(self.diffie_hellman.shared_secret)))
SKEYSEED = prf(self.Ni + self.Nr, self.diffie_hellman.shared_secret)
logger.debug(u"SKEYSEED is: {0!r:s}\n".format(binascii.hexlify(SKEYSEED)))
keymat = prfplus(SKEYSEED, (self.Ni + self.Nr +
to_bytes(self.iSPI) + to_bytes(self.rSPI)),
32 * 7)
#3 * 32 + 2 * 32 + 2 * 32)
logger.debug("Got %d bytes of key material" % len(keymat))
# get keys from material
( self.SK_d,
self.SK_ai,
self.SK_ar,
self.SK_ei,
self.SK_er,
self.SK_pi,
self.SK_pr ) = unpack("32s" * 7, keymat) # XXX: Should support other than 256-bit algorithms, really.
logger.debug("SK_ai: {}".format(dump(self.SK_ai)))
logger.debug("SK_ei: {}".format(dump(self.SK_ei))) |
<SYSTEM_TASK:>
Verifies the peers authentication.
<END_TASK>
<USER_TASK:>
Description:
def authenticate_peer(self, auth_data, peer_id, message):
"""
Verifies the peers authentication.
""" |
logger.debug('message: {}'.format(dump(message)))
signed_octets = message + self.Ni + prf(self.SK_pr, peer_id._data)
auth_type = const.AuthenticationType(struct.unpack(const.AUTH_HEADER, auth_data[:4])[0])
assert auth_type == const.AuthenticationType.RSA
logger.debug(dump(auth_data))
try:
return pubkey.verify(signed_octets, auth_data[4:], 'tests/peer.pem')
except pubkey.VerifyError:
raise IkeError("Remote peer authentication failed.") |
<SYSTEM_TASK:>
Returns the name of the caller's module as a string.
<END_TASK>
<USER_TASK:>
Description:
def get_caller_module():
"""
Returns the name of the caller's module as a string.
>>> get_caller_module()
'__main__'
""" |
stack = inspect.stack()
assert len(stack) > 1
caller = stack[2][0]
return caller.f_globals['__name__'] |
<SYSTEM_TASK:>
Returns a random number between min_value and max_value
<END_TASK>
<USER_TASK:>
Description:
def uniform(self, key, min_value=0., max_value=1.):
"""Returns a random number between min_value and max_value""" |
return min_value + self._random(key) * (max_value - min_value) |
<SYSTEM_TASK:>
Return perlin noise seede with the specified key.
<END_TASK>
<USER_TASK:>
Description:
def perlin(self, key, **kwargs):
"""Return perlin noise seede with the specified key.
For parameters, check the PerlinNoise class.""" |
if hasattr(key, "encode"):
key = key.encode('ascii')
value = zlib.adler32(key, self.seed)
return PerlinNoise(value, **kwargs) |
<SYSTEM_TASK:>
Helper method to determine if the proxies logged any major errors related to the functioning of the proxy itself
<END_TASK>
<USER_TASK:>
Description:
def check_no_proxy_errors(self, **kwargs):
"""
Helper method to determine if the proxies logged any major errors related to the functioning of the proxy itself
""" |
data = self._es.search(body={
"size": max_query_results,
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"term": {
"level": "error"
}
}
}
}
})
# if self.debug:
# print(data)
return GremlinTestResult(data["hits"]["total"] == 0, data) |
<SYSTEM_TASK:>
Helper method to determine if proxies logged any error related to the requests passing through
<END_TASK>
<USER_TASK:>
Description:
def get_requests_with_errors(self):
""" Helper method to determine if proxies logged any error related to the requests passing through""" |
data = self._es.search(body={
"size": max_query_results,
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"exists": {
"field": "errmsg"
}
}
}
}
})
return GremlinTestResult(False, data) |
<SYSTEM_TASK:>
Check a set of assertions
<END_TASK>
<USER_TASK:>
Description:
def check_assertions(self, checklist, all=False):
"""Check a set of assertions
@param all boolean if False, stop at first failure
@return: False if any assertion fails.
""" |
assert isinstance(checklist, dict) and 'checks' in checklist
retval = None
retlist = []
for assertion in checklist['checks']:
retval = self.check_assertion(**assertion)
retlist.append(retval)
if not retval.success and not all:
print "Error message:", retval[3]
return retlist
return retlist |
<SYSTEM_TASK:>
Download star index files.
<END_TASK>
<USER_TASK:>
Description:
def download(odir: Path, source_url: str, irng: Sequence[int]):
"""Download star index files.
The default range was useful for my cameras.
""" |
assert len(irng) == 2, 'specify start, stop indices'
odir = Path(odir).expanduser()
odir.mkdir(parents=True, exist_ok=True)
ri = int(source_url.split('/')[-2][:2])
for i in range(*irng):
fn = f'index-{ri:2d}{i:02d}.fits'
url = f'{source_url}{fn}'
ofn = odir / fn
if ofn.is_file(): # no clobber
print('skipping', ofn)
continue
print(f'{url} => {ofn}', end='\r')
urlretrieve(url, ofn) |
<SYSTEM_TASK:>
Check excluded value against the link's current value
<END_TASK>
<USER_TASK:>
Description:
def exclude_match(exclude, link_value):
""" Check excluded value against the link's current value """ |
if hasattr(exclude, "search") and exclude.search(link_value):
return True
if exclude == link_value:
return True
return False |
<SYSTEM_TASK:>
Modify a hyperlink to make it SEO-friendly by replacing
<END_TASK>
<USER_TASK:>
Description:
def seoify_hyperlink(hyperlink):
"""Modify a hyperlink to make it SEO-friendly by replacing
hyphens with spaces and trimming multiple spaces.
:param hyperlink: URL to attempt to grab SEO from """ |
last_slash = hyperlink.rfind('/')
return re.sub(r' +|-', ' ', hyperlink[last_slash + 1:]) |
<SYSTEM_TASK:>
Using filters and sorts, this finds all hyperlinks
<END_TASK>
<USER_TASK:>
Description:
def find(self, limit=None, reverse=False, sort=None,
exclude=None, duplicates=True, pretty=False, **filters):
""" Using filters and sorts, this finds all hyperlinks
on a web page
:param limit: Crop results down to limit specified
:param reverse: Reverse the list of links, useful for before limiting
:param exclude: Remove links from list
:param duplicates: Determines if identical URLs should be displayed
:param pretty: Quick and pretty formatting using pprint
:param filters: All the links to search for """ |
if exclude is None:
exclude = []
if 'href' not in filters:
filters['href'] = True
search = self._soup.findAll('a', **filters)
if reverse:
search.reverse()
links = []
for anchor in search:
build_link = anchor.attrs
try:
build_link[u'seo'] = seoify_hyperlink(anchor['href'])
except KeyError:
pass
try:
build_link[u'text'] = anchor.string or build_link['seo']
except KeyError:
pass
ignore_link = False
for nixd in exclude:
for key, value in six.iteritems(nixd):
if key in build_link:
if (isinstance(build_link[key], collections.Iterable)
and not isinstance(build_link[key], six.string_types)):
for item in build_link[key]:
ignore_link = exclude_match(value, item)
else:
ignore_link = exclude_match(value, build_link[key])
if not duplicates:
for link in links:
if link['href'] == anchor['href']:
ignore_link = True
if not ignore_link:
links.append(build_link)
if limit is not None and len(links) == limit:
break
if sort is not None:
links = sorted(links, key=sort, reverse=reverse)
if pretty:
pp = pprint.PrettyPrinter(indent=4)
return pp.pprint(links)
else:
return links |
<SYSTEM_TASK:>
Astrometry.net makes file ".new" with the image and the WCS SIP 2-D polynomial fit coefficients in the FITS header
<END_TASK>
<USER_TASK:>
Description:
def add_plot(fn: Path, cm, ax, alpha=1):
"""Astrometry.net makes file ".new" with the image and the WCS SIP 2-D polynomial fit coefficients in the FITS header
We use DECL as "x" and RA as "y".
pcolormesh() is used as it handles arbitrary pixel shapes.
Note that pcolormesh() cannot tolerate NaN in X or Y (NaN in C is OK).
This is handled in https://github.com/scivision/pcolormesh_nan.py.
""" |
with fits.open(fn, mode='readonly', memmap=False) as f:
img = f[0].data
yPix, xPix = f[0].shape[-2:]
x, y = np.meshgrid(range(xPix), range(yPix)) # pixel indices to find RA/dec of
xy = np.column_stack((x.ravel(order='C'), y.ravel(order='C')))
radec = wcs.WCS(f[0].header).all_pix2world(xy, 0)
ra = radec[:, 0].reshape((yPix, xPix), order='C')
dec = radec[:, 1].reshape((yPix, xPix), order='C')
ax.set_title(fn.name)
ax.pcolormesh(ra, dec, img, alpha=alpha, cmap=cm, norm=LogNorm())
ax.set_ylabel('Right Ascension [deg.]')
ax.set_xlabel('Declination [deg.]') |
<SYSTEM_TASK:>
Returns data in hex format in groups of 4 octets delimited by spaces for debugging purposes.
<END_TASK>
<USER_TASK:>
Description:
def dump(src):
"""
Returns data in hex format in groups of 4 octets delimited by spaces for debugging purposes.
""" |
return b' '.join(binascii.hexlify(bytes(x)) for x in zip(src[::4], src[1::4], src[2::4], src[3::4])) |
<SYSTEM_TASK:>
Display productpage with normal user and test user buttons
<END_TASK>
<USER_TASK:>
Description:
def index():
""" Display productpage with normal user and test user buttons""" |
global productpage
table = json2html.convert(json = json.dumps(productpage),
table_attributes="class=\"table table-condensed table-bordered table-hover\"")
return render_template('index.html', serviceTable=table) |
<SYSTEM_TASK:>
A range implementation which can handle floats
<END_TASK>
<USER_TASK:>
Description:
def frange(start, end, step):
"""A range implementation which can handle floats""" |
if start <= end:
step = abs(step)
else:
step = -abs(step)
while start < end:
yield start
start += step |
<SYSTEM_TASK:>
Generate a grid of complex numbers
<END_TASK>
<USER_TASK:>
Description:
def gridrange(start, end, step):
"""Generate a grid of complex numbers""" |
for x in frange(start.real, end.real, step.real):
for y in frange(start.imag, end.imag, step.imag):
yield x + y * 1j |
<SYSTEM_TASK:>
Pads data to blocksize according to RFC 4303. Pad length field is included in output.
<END_TASK>
<USER_TASK:>
Description:
def pad(data, blocksize=16):
"""
Pads data to blocksize according to RFC 4303. Pad length field is included in output.
""" |
padlen = blocksize - len(data) % blocksize
return bytes(data + bytearray(range(1, padlen)) + bytearray((padlen - 1,))) |
<SYSTEM_TASK:>
Astrometry.net from at least version 0.67 is OK with Python 3.
<END_TASK>
<USER_TASK:>
Description:
def doSolve(fitsfn: Path, args: str=None):
"""
Astrometry.net from at least version 0.67 is OK with Python 3.
""" |
# binpath = Path(find_executable('solve-field')).parent
opts = args.split(' ') if args else []
# %% build command
cmd = ['solve-field', '--overwrite', str(fitsfn)]
cmd += opts
print('\n', ' '.join(cmd), '\n')
# %% execute
ret = subprocess.check_output(cmd, universal_newlines=True)
# solve-field returns 0 even if it didn't solve!
print(ret)
if 'Did not solve' in ret:
raise RuntimeError(f'could not solve {fitsfn}')
print('\n\n *** done with astrometry.net ***\n ') |
<SYSTEM_TASK:>
Converts a color from HSV to a hex RGB.
<END_TASK>
<USER_TASK:>
Description:
def to_rgb(hsv):
"""Converts a color from HSV to a hex RGB.
HSV should be in range 0..1, though hue wraps around. Output is a
hexadecimal color value as used by CSS, HTML and SVG""" |
r, g, b = [int(min(255, max(0, component * 256)))
for component in colorsys.hsv_to_rgb(*hsv)]
return "%02x%02x%02x" % (r, g, b) |
<SYSTEM_TASK:>
Returns the specfied verbosity level interpreted as a logging level.
<END_TASK>
<USER_TASK:>
Description:
def verbosityToLogLevel(verbosity):
"""
Returns the specfied verbosity level interpreted as a logging level.
""" |
ret = 0
if verbosity == 1:
ret = logging.INFO
elif verbosity >= 2:
ret = logging.DEBUG
return ret |
<SYSTEM_TASK:>
Adds common options to a variant searches command line parser.
<END_TASK>
<USER_TASK:>
Description:
def addVariantSearchOptions(parser):
"""
Adds common options to a variant searches command line parser.
""" |
addVariantSetIdArgument(parser)
addReferenceNameArgument(parser)
addCallSetIdsArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
addPageSizeArgument(parser) |
<SYSTEM_TASK:>
Adds common options to a annotation searches command line parser.
<END_TASK>
<USER_TASK:>
Description:
def addAnnotationsSearchOptions(parser):
"""
Adds common options to a annotation searches command line parser.
""" |
addAnnotationSetIdArgument(parser)
addReferenceNameArgument(parser)
addReferenceIdArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
addEffectsArgument(parser)
addPageSizeArgument(parser) |
<SYSTEM_TASK:>
Adds common options to a features search command line parser.
<END_TASK>
<USER_TASK:>
Description:
def addFeaturesSearchOptions(parser):
"""
Adds common options to a features search command line parser.
""" |
addFeatureSetIdArgument(parser)
addFeaturesReferenceNameArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
addParentFeatureIdArgument(parser)
addFeatureTypesArgument(parser) |
<SYSTEM_TASK:>
Adds common options to a continuous search command line parser.
<END_TASK>
<USER_TASK:>
Description:
def addContinuousSearchOptions(parser):
"""
Adds common options to a continuous search command line parser.
""" |
addContinuousSetIdArgument(parser)
addContinuousReferenceNameArgument(parser)
addStartArgument(parser)
addEndArgument(parser) |
<SYSTEM_TASK:>
Adds options to a phenotype searches command line parser.
<END_TASK>
<USER_TASK:>
Description:
def addPhenotypeSearchOptions(parser):
"""
Adds options to a phenotype searches command line parser.
""" |
parser.add_argument(
"--phenotype_association_set_id", "-s", default=None,
help="Only return phenotypes from this phenotype_association_set.")
parser.add_argument(
"--phenotype_id", "-p", default=None,
help="Only return this phenotype.")
parser.add_argument(
"--description", "-d", default=None,
help="Only return phenotypes matching this description.")
parser.add_argument(
"--age_of_onset", "-a", default=None,
help="Only return phenotypes with this age_of_onset.")
parser.add_argument(
"--type", "-T", default=None,
help="Only return phenotypes with this type.") |
<SYSTEM_TASK:>
Outputs a text summary of the specified protocol objects, one
<END_TASK>
<USER_TASK:>
Description:
def _textOutput(self, gaObjects):
"""
Outputs a text summary of the specified protocol objects, one
per line.
""" |
for gaObject in gaObjects:
if hasattr(gaObject, 'name'):
print(gaObject.id, gaObject.name, sep="\t")
else:
print(gaObject.id, sep="\t") |
<SYSTEM_TASK:>
Returns all continuous sets on the server.
<END_TASK>
<USER_TASK:>
Description:
def getAllContinuousSets(self):
"""
Returns all continuous sets on the server.
""" |
for dataset in self.getAllDatasets():
iterator = self._client.search_continuous_sets(
dataset_id=dataset.id)
for continuousSet in iterator:
yield continuousSet |
<SYSTEM_TASK:>
Get all read groups in a read group set
<END_TASK>
<USER_TASK:>
Description:
def getAllReadGroups(self):
"""
Get all read groups in a read group set
""" |
for dataset in self.getAllDatasets():
iterator = self._client.search_read_group_sets(
dataset_id=dataset.id)
for readGroupSet in iterator:
readGroupSet = self._client.get_read_group_set(
readGroupSet.id)
for readGroup in readGroupSet.read_groups:
yield readGroup.id |
<SYSTEM_TASK:>
automatically guess reference id if not passed
<END_TASK>
<USER_TASK:>
Description:
def _run(self, referenceGroupId, referenceId=None):
"""
automatically guess reference id if not passed
""" |
# check if we can get reference id from rg
if referenceId is None:
referenceId = self._referenceId
if referenceId is None:
rg = self._client.get_read_group(
read_group_id=referenceGroupId)
iterator = self._client.search_references(rg.reference_set_id)
for reference in iterator:
self._run(referenceGroupId, reference.id)
else:
iterator = self._client.search_reads(
read_group_ids=[referenceGroupId],
reference_id=referenceId,
start=self._start, end=self._end)
self._output(iterator) |
<SYSTEM_TASK:>
Iterate passed read group ids, or go through all available read groups
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Iterate passed read group ids, or go through all available read groups
""" |
if not self._readGroupIds:
for referenceGroupId in self.getAllReadGroups():
self._run(referenceGroupId)
else:
for referenceGroupId in self._readGroupIds:
self._run(referenceGroupId) |
<SYSTEM_TASK:>
Merge the doc-sections of the parent's and child's attribute into a single docstring.
<END_TASK>
<USER_TASK:>
Description:
def merge_all_sections(prnt_sctns, child_sctns, style):
""" Merge the doc-sections of the parent's and child's attribute into a single docstring.
Parameters
----------
prnt_sctns: OrderedDict[str, Union[None,str]]
child_sctns: OrderedDict[str, Union[None,str]]
Returns
-------
str
Output docstring of the merged docstrings.""" |
doc = []
prnt_only_raises = prnt_sctns["Raises"] and not (prnt_sctns["Returns"] or prnt_sctns["Yields"])
if prnt_only_raises and (child_sctns["Returns"] or child_sctns["Yields"]):
prnt_sctns["Raises"] = None
for key in prnt_sctns:
sect = merge_section(key, prnt_sctns[key], child_sctns[key], style)
if sect is not None:
doc.append(sect)
return "\n\n".join(doc) if doc else None |
<SYSTEM_TASK:>
Merge two numpy-style docstrings into a single docstring, according to napoleon docstring sections.
<END_TASK>
<USER_TASK:>
Description:
def merge_numpy_napoleon_docs(prnt_doc=None, child_doc=None):
""" Merge two numpy-style docstrings into a single docstring, according to napoleon docstring sections.
Given the numpy-style docstrings from a parent and child's attributes, merge the docstring
sections such that the child's section is used, wherever present, otherwise the parent's
section is used.
Any whitespace that can be uniformly removed from a docstring's second line and onwards is
removed. Sections will be separated by a single blank line.
Aliased docstring sections are normalized. E.g Args, Arguments -> Parameters
Parameters
----------
prnt_doc: Optional[str]
The docstring from the parent.
child_doc: Optional[str]
The docstring from the child.
Returns
-------
Union[str, None]
The merged docstring. """ |
style = "numpy"
return merge_all_sections(parse_napoleon_doc(prnt_doc, style), parse_napoleon_doc(child_doc, style), style) |
<SYSTEM_TASK:>
Merge two google-style docstrings into a single docstring, according to napoleon docstring sections.
<END_TASK>
<USER_TASK:>
Description:
def merge_google_napoleon_docs(prnt_doc=None, child_doc=None):
""" Merge two google-style docstrings into a single docstring, according to napoleon docstring sections.
Given the google-style docstrings from a parent and child's attributes, merge the docstring
sections such that the child's section is used, wherever present, otherwise the parent's
section is used.
Any whitespace that can be uniformly removed from a docstring's second line and onwards is
removed. Sections will be separated by a single blank line.
Aliased docstring sections are normalized. E.g Args, Arguments -> Parameters
Parameters
----------
prnt_doc: Optional[str]
The docstring from the parent.
child_doc: Optional[str]
The docstring from the child.
Returns
-------
Union[str, None]
The merged docstring. """ |
style = "google"
return merge_all_sections(parse_napoleon_doc(prnt_doc, style), parse_napoleon_doc(child_doc, style), style) |
<SYSTEM_TASK:>
Merge two meshes in common coordinates found in x1, x2.
<END_TASK>
<USER_TASK:>
Description:
def merge_mesh( x1, ngroups1, conns1, x2, ngroups2, conns2, cmap, eps = 1e-8 ):
"""Merge two meshes in common coordinates found in x1, x2.""" |
n1 = x1.shape[0]
n2 = x2.shape[0]
err = nm.sum( nm.sum( nm.abs( x1[cmap[:,0],:-1] - x2[cmap[:,1],:-1] ) ) )
if abs( err ) > (10.0 * eps):
print 'nonmatching meshes!', err
raise ValueError
mask = nm.ones( (n2,), dtype = nm.int32 )
mask[cmap[:,1]] = 0
# print mask, nm.cumsum( mask )
remap = nm.cumsum( mask ) + n1 - 1
remap[cmap[:,1]] = cmap[:,0]
# print remap
i2 = nm.setdiff1d( nm.arange( n2, dtype = nm.int32 ),
cmap[:,1] )
xx = nm.r_[x1, x2[i2]]
ngroups = nm.r_[ngroups1, ngroups2[i2]]
conns = []
for ii in xrange( len( conns1 ) ):
conn = nm.vstack( (conns1[ii], remap[conns2[ii]]) )
conns.append( conn )
return xx, ngroups, conns |
<SYSTEM_TASK:>
Detect and attempt fixing double nodes in a mesh.
<END_TASK>
<USER_TASK:>
Description:
def fix_double_nodes(coor, ngroups, conns, eps):
"""
Detect and attempt fixing double nodes in a mesh.
The double nodes are nodes having the same coordinates
w.r.t. precision given by `eps`.
""" |
n_nod, dim = coor.shape
cmap = find_map( coor, nm.zeros( (0,dim) ), eps = eps, allow_double = True )
if cmap.size:
output('double nodes in input mesh!')
output('trying to fix...')
while cmap.size:
print cmap.size
# Just like in Variable.equation_mapping()...
ii = nm.argsort( cmap[:,1] )
scmap = cmap[ii]
eq = nm.arange( n_nod )
eq[scmap[:,1]] = -1
eqi = eq[eq >= 0]
eq[eqi] = nm.arange( eqi.shape[0] )
remap = eq.copy()
remap[scmap[:,1]] = eq[scmap[:,0]]
print coor.shape
coor = coor[eqi]
ngroups = ngroups[eqi]
print coor.shape
ccs = []
for conn in conns:
ccs.append( remap[conn] )
conns = ccs
cmap = find_map( coor, nm.zeros( (0,dim) ), eps = eps,
allow_double = True )
output('...done')
return coor, ngroups, conns |
<SYSTEM_TASK:>
Get the smallest edge length.
<END_TASK>
<USER_TASK:>
Description:
def get_min_edge_size(coor, conns):
"""
Get the smallest edge length.
""" |
mes = 1e16
for conn in conns:
n_ep = conn.shape[1]
for ir in range( n_ep ):
x1 = coor[conn[:,ir]]
for ic in range( ir + 1, n_ep ):
x2 = coor[conn[:,ic]]
aux = nm.sqrt( nm.sum( (x2 - x1)**2.0, axis = 1 ).min() )
mes = min( mes, aux )
return mes |
<SYSTEM_TASK:>
Can miss the minimum, but is enough for our purposes.
<END_TASK>
<USER_TASK:>
Description:
def get_min_vertex_distance( coor, guess ):
"""Can miss the minimum, but is enough for our purposes.""" |
# Sort by x.
ix = nm.argsort( coor[:,0] )
scoor = coor[ix]
mvd = 1e16
# Get mvd in chunks potentially smaller than guess.
n_coor = coor.shape[0]
print n_coor
i0 = i1 = 0
x0 = scoor[i0,0]
while 1:
while ((scoor[i1,0] - x0) < guess) and (i1 < (n_coor - 1)):
i1 += 1
# print i0, i1, x0, scoor[i1,0]
aim, aa1, aa2, aux = get_min_vertex_distance_naive( scoor[i0:i1+1] )
if aux < mvd:
im, a1, a2 = aim, aa1 + i0, aa2 + i0
mvd = min( mvd, aux )
i0 = i1 = int( 0.5 * (i1 + i0 ) ) + 1
# i0 += 1
x0 = scoor[i0,0]
# print '-', i0
if i1 == n_coor - 1: break
print im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2]
return mvd |
<SYSTEM_TASK:>
Create a mesh reusing mat_ids and descs of mesh_in.
<END_TASK>
<USER_TASK:>
Description:
def make_mesh( coor, ngroups, conns, mesh_in ):
"""Create a mesh reusing mat_ids and descs of mesh_in.""" |
mat_ids = []
for ii, conn in enumerate( conns ):
mat_id = nm.empty( (conn.shape[0],), dtype = nm.int32 )
mat_id.fill( mesh_in.mat_ids[ii][0] )
mat_ids.append( mat_id )
mesh_out = Mesh.from_data( 'merged mesh', coor, ngroups, conns,
mat_ids, mesh_in.descs )
return mesh_out |
<SYSTEM_TASK:>
For each mesh node referenced in the connectivity conns, make a list of
<END_TASK>
<USER_TASK:>
Description:
def make_inverse_connectivity(conns, n_nod, ret_offsets=True):
"""
For each mesh node referenced in the connectivity conns, make a list of
elements it belongs to.
""" |
from itertools import chain
iconn = [[] for ii in xrange( n_nod )]
n_els = [0] * n_nod
for ig, conn in enumerate( conns ):
for iel, row in enumerate( conn ):
for node in row:
iconn[node].extend([ig, iel])
n_els[node] += 1
n_els = nm.array(n_els, dtype=nm.int32)
iconn = nm.fromiter(chain(*iconn), nm.int32)
if ret_offsets:
offsets = nm.cumsum(nm.r_[0, n_els], dtype=nm.int32)
return offsets, iconn
else:
return n_els, iconn |
<SYSTEM_TASK:>
Create a mesh given a set of surface faces and the original mesh.
<END_TASK>
<USER_TASK:>
Description:
def from_surface( surf_faces, mesh_in ):
"""
Create a mesh given a set of surface faces and the original mesh.
""" |
aux = nm.concatenate([faces.ravel() for faces in surf_faces])
inod = nm.unique(aux)
n_nod = len( inod )
n_nod_m, dim = mesh_in.coors.shape
aux = nm.arange( n_nod, dtype=nm.int32 )
remap = nm.zeros( (n_nod_m,), nm.int32 )
remap[inod] = aux
mesh = Mesh( mesh_in.name + "_surf" )
mesh.coors = mesh_in.coors[inod]
mesh.ngroups = mesh_in.ngroups[inod]
sfm = {3 : "2_3", 4 : "2_4"}
mesh.conns = []
mesh.descs = []
mesh.mat_ids = []
for ii, sf in enumerate( surf_faces ):
n_el, n_fp = sf.shape
conn = remap[sf]
mat_id = nm.empty( (conn.shape[0],), dtype = nm.int32 )
mat_id.fill( ii )
mesh.descs.append( sfm[n_fp] )
mesh.conns.append( conn )
mesh.mat_ids.append( mat_id )
mesh._set_shape_info()
return mesh |
<SYSTEM_TASK:>
Read a mesh from a file.
<END_TASK>
<USER_TASK:>
Description:
def from_file(filename=None, io='auto', prefix_dir=None,
omit_facets=False):
"""
Read a mesh from a file.
Parameters
----------
filename : string or function or MeshIO instance or Mesh instance
The name of file to read the mesh from. For convenience, a
mesh creation function or a MeshIO instance or directly a Mesh
instance can be passed in place of the file name.
io : *MeshIO instance
Passing *MeshIO instance has precedence over filename.
prefix_dir : str
If not None, the filename is relative to that directory.
omit_facets : bool
If True, do not read cells of lower dimension than the space
dimension (faces and/or edges). Only some MeshIO subclasses
support this!
""" |
if isinstance(filename, Mesh):
return filename
if io == 'auto':
if filename is None:
output( 'filename or io must be specified!' )
raise ValueError
else:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
output('reading mesh (%s)...' % (io.filename))
tt = time.clock()
trunk = io.get_filename_trunk()
mesh = Mesh(trunk)
mesh = io.read(mesh, omit_facets=omit_facets)
output('...done in %.2f s' % (time.clock() - tt))
mesh._set_shape_info()
return mesh |
<SYSTEM_TASK:>
Create a mesh corresponding to a given region.
<END_TASK>
<USER_TASK:>
Description:
def from_region(region, mesh_in, save_edges=False, save_faces=False,
localize=False, is_surface=False):
"""
Create a mesh corresponding to a given region.
""" |
mesh = Mesh( mesh_in.name + "_reg" )
mesh.coors = mesh_in.coors.copy()
mesh.ngroups = mesh_in.ngroups.copy()
mesh.conns = []
mesh.descs = []
mesh.mat_ids = []
if not is_surface:
if region.has_cells():
for ig in region.igs:
mesh.descs.append( mesh_in.descs[ig] )
els = region.get_cells( ig )
mesh.mat_ids.append( mesh_in.mat_ids[ig][els,:].copy() )
mesh.conns.append( mesh_in.conns[ig][els,:].copy() )
if save_edges:
ed = region.domain.ed
for ig in region.igs:
edges = region.get_edges( ig )
mesh.descs.append( '1_2' )
mesh.mat_ids.append( ed.data[edges,0] + 1 )
mesh.conns.append( ed.data[edges,-2:].copy() )
if save_faces:
mesh._append_region_faces(region)
if save_edges or save_faces:
mesh.descs.append( {2 : '2_3', 3 : '3_4'}[mesh_in.dim] )
mesh.mat_ids.append( -nm.ones_like( region.all_vertices ) )
mesh.conns.append(make_point_cells(region.all_vertices,
mesh_in.dim))
else:
mesh._append_region_faces(region, force_faces=True)
mesh._set_shape_info()
if localize:
mesh.localize( region.all_vertices )
return mesh |
<SYSTEM_TASK:>
Create a mesh from mesh data.
<END_TASK>
<USER_TASK:>
Description:
def from_data( name, coors, ngroups, conns, mat_ids, descs, igs = None ):
"""
Create a mesh from mesh data.
""" |
if igs is None:
igs = range( len( conns ) )
mesh = Mesh(name)
mesh._set_data(coors = coors,
ngroups = ngroups,
conns = [conns[ig] for ig in igs],
mat_ids = [mat_ids[ig] for ig in igs],
descs = [descs[ig] for ig in igs])
mesh._set_shape_info()
return mesh |
<SYSTEM_TASK:>
Make a deep copy of self.
<END_TASK>
<USER_TASK:>
Description:
def copy(self, name=None):
"""Make a deep copy of self.
Parameters
----------
name : str
Name of the copied mesh.
""" |
return Struct.copy(self, deep=True, name=name) |
<SYSTEM_TASK:>
Set mesh data.
<END_TASK>
<USER_TASK:>
Description:
def _set_data(self, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None):
"""
Set mesh data.
Parameters
----------
coors : array
Coordinates of mesh nodes.
ngroups : array
Node groups.
conns : list of arrays
The array of mesh elements (connectivities) for each element group.
mat_ids : list of arrays
The array of material ids for each element group.
descs: list of strings
The element type for each element group.
nodal_bcs : dict of arrays, optional
The nodes defining regions for boundary conditions referred
to by the dict keys in problem description files.
""" |
self.coors = nm.ascontiguousarray(coors)
if ngroups is None:
self.ngroups = nm.zeros((self.coors.shape[0],), dtype=nm.int32)
else:
self.ngroups = nm.ascontiguousarray(ngroups)
self.conns = [nm.asarray(conn, dtype=nm.int32) for conn in conns]
self.mat_ids = [nm.asarray(mat_id, dtype=nm.int32)
for mat_id in mat_ids]
self.descs = descs
self.nodal_bcs = get_default(nodal_bcs, {}) |
<SYSTEM_TASK:>
Write mesh + optional results in `out` to a file.
<END_TASK>
<USER_TASK:>
Description:
def write(self, filename=None, io=None,
coors=None, igs=None, out=None, float_format=None, **kwargs):
"""
Write mesh + optional results in `out` to a file.
Parameters
----------
filename : str, optional
The file name. If None, the mesh name is used instead.
io : MeshIO instance or 'auto', optional
Passing 'auto' respects the extension of `filename`.
coors : array, optional
The coordinates that can be used instead of the mesh coordinates.
igs : array_like, optional
Passing a list of group ids selects only those groups for writing.
out : dict, optional
The output data attached to the mesh vertices and/or cells.
float_format : str, optional
The format string used to print floats in case of a text file
format.
**kwargs : dict, optional
Additional arguments that can be passed to the `MeshIO` instance.
""" |
if filename is None:
filename = self.name + '.mesh'
if io is None:
io = self.io
if io is None:
io = 'auto'
if io == 'auto':
io = MeshIO.any_from_filename( filename )
if coors is None:
coors = self.coors
if igs is None:
igs = range( len( self.conns ) )
aux_mesh = Mesh.from_data( self.name, coors, self.ngroups,
self.conns, self.mat_ids, self.descs, igs )
io.set_float_format( float_format )
io.write( filename, aux_mesh, out, **kwargs ) |
<SYSTEM_TASK:>
Get the coordinates of vertices elements in group `ig`.
<END_TASK>
<USER_TASK:>
Description:
def get_element_coors(self, ig=None):
"""
Get the coordinates of vertices elements in group `ig`.
Parameters
----------
ig : int, optional
The element group. If None, the coordinates for all groups
are returned, filled with zeros at places of missing
vertices, i.e. where elements having less then the full number
of vertices (`n_ep_max`) are.
Returns
-------
coors : array
The coordinates in an array of shape `(n_el, n_ep_max, dim)`.
""" |
cc = self.coors
n_ep_max = self.n_e_ps.max()
coors = nm.empty((self.n_el, n_ep_max, self.dim), dtype=cc.dtype)
for ig, conn in enumerate(self.conns):
i1, i2 = self.el_offsets[ig], self.el_offsets[ig + 1]
coors[i1:i2, :conn.shape[1], :] = cc[conn]
return coors |
<SYSTEM_TASK:>
Transform coordinates of the mesh by the given transformation matrix.
<END_TASK>
<USER_TASK:>
Description:
def transform_coors(self, mtx_t, ref_coors=None):
"""
Transform coordinates of the mesh by the given transformation matrix.
Parameters
----------
mtx_t : array
The transformation matrix `T` (2D array). It is applied
depending on its shape:
- `(dim, dim): x = T * x`
- `(dim, dim + 1): x = T[:, :-1] * x + T[:, -1]`
ref_coors : array, optional
Alternative coordinates to use for the transformation instead
of the mesh coordinates, with the same shape as `self.coors`.
""" |
if ref_coors is None:
ref_coors = self.coors
if mtx_t.shape[1] > self.coors.shape[1]:
self.coors[:] = nm.dot(ref_coors, mtx_t[:,:-1].T) + mtx_t[:,-1]
else:
self.coors[:] = nm.dot(ref_coors, mtx_t.T) |
<SYSTEM_TASK:>
Explode the mesh element groups by `eps`, i.e. split group
<END_TASK>
<USER_TASK:>
Description:
def explode_groups(self, eps, return_emap=False):
"""
Explode the mesh element groups by `eps`, i.e. split group
interface nodes and shrink each group towards its centre by
`eps`.
Parameters
----------
eps : float in `[0.0, 1.0]`
The group shrinking factor.
return_emap : bool, optional
If True, also return the mapping against original mesh
coordinates that result in the exploded mesh coordinates.
The mapping can be used to map mesh vertex data to the
exploded mesh vertices.
Returns
-------
mesh : Mesh
The new mesh with exploded groups.
emap : spmatrix, optional
The maping for exploding vertex values. Only provided if
`return_emap` is True.
""" |
assert_(0.0 <= eps <= 1.0)
remap = nm.empty((self.n_nod,), dtype=nm.int32)
offset = 0
if return_emap:
rows, cols = [], []
coors = []
ngroups = []
conns = []
mat_ids = []
descs = []
for ig, conn in enumerate(self.conns):
nodes = nm.unique(conn)
group_coors = self.coors[nodes]
n_nod = group_coors.shape[0]
centre = group_coors.sum(axis=0) / float(n_nod)
vectors = group_coors - centre[None, :]
new_coors = centre + (vectors * eps)
remap[nodes] = nm.arange(n_nod, dtype=nm.int32) + offset
new_conn = remap[conn]
coors.append(new_coors)
ngroups.append(self.ngroups[nodes])
conns.append(new_conn)
mat_ids.append(self.mat_ids[ig])
descs.append(self.descs[ig])
offset += n_nod
if return_emap:
cols.append(nodes)
rows.append(remap[nodes])
coors = nm.concatenate(coors, axis=0)
ngroups = nm.concatenate(ngroups, axis=0)
mesh = Mesh.from_data('exploded_' + self.name,
coors, ngroups, conns, mat_ids, descs)
if return_emap:
rows = nm.concatenate(rows)
cols = nm.concatenate(cols)
data = nm.ones(rows.shape[0], dtype=nm.float64)
emap = sp.coo_matrix((data, (rows, cols)),
shape=(mesh.n_nod, self.n_nod))
return mesh, emap
else:
return mesh |
<SYSTEM_TASK:>
Join groups of the same element type.
<END_TASK>
<USER_TASK:>
Description:
def join_conn_groups( conns, descs, mat_ids, concat = False ):
"""Join groups of the same element type.""" |
el = dict_from_keys_init( descs, list )
for ig, desc in enumerate( descs ):
el[desc].append( ig )
groups = [ii for ii in el.values() if ii]
## print el, groups
descs_out, conns_out, mat_ids_out = [], [], []
for group in groups:
n_ep = conns[group[0]].shape[1]
conn = nm.zeros( (0, n_ep), nm.int32 )
mat_id = nm.zeros( (0,), nm.int32 )
for ig in group:
conn = nm.concatenate( (conn, conns[ig]) )
mat_id = nm.concatenate( (mat_id, mat_ids[ig]) )
if concat:
conn = nm.concatenate( (conn, mat_id[:,nm.newaxis]), 1 )
else:
mat_ids_out.append( mat_id )
conns_out.append( conn )
descs_out.append( descs[group[0]] )
if concat:
return conns_out, descs_out
else:
return conns_out, descs_out, mat_ids_out |
<SYSTEM_TASK:>
Convert complex values in the output dictionary `out_in` to pairs of
<END_TASK>
<USER_TASK:>
Description:
def convert_complex_output(out_in):
"""
Convert complex values in the output dictionary `out_in` to pairs of
real and imaginary parts.
""" |
out = {}
for key, val in out_in.iteritems():
if val.data.dtype in complex_types:
rval = copy(val)
rval.data = val.data.real
out['real(%s)' % key] = rval
ival = copy(val)
ival.data = val.data.imag
out['imag(%s)' % key] = ival
else:
out[key] = val
return out |
<SYSTEM_TASK:>
Guess the format of filename, candidates are in formats.
<END_TASK>
<USER_TASK:>
Description:
def guess_format( filename, ext, formats, io_table ):
"""
Guess the format of filename, candidates are in formats.
""" |
ok = False
for format in formats:
output( 'guessing %s' % format )
try:
ok = io_table[format].guess( filename )
except AttributeError:
pass
if ok: break
else:
raise NotImplementedError('cannot guess format of a *%s file!' % ext)
return format |
<SYSTEM_TASK:>
Create a MeshIO instance according to the kind of `filename`.
<END_TASK>
<USER_TASK:>
Description:
def any_from_filename(filename, prefix_dir=None):
"""
Create a MeshIO instance according to the kind of `filename`.
Parameters
----------
filename : str, function or MeshIO subclass instance
The name of the mesh file. It can be also a user-supplied function
accepting two arguments: `mesh`, `mode`, where `mesh` is a Mesh
instance and `mode` is one of 'read','write', or a MeshIO subclass
instance.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the kind of `filename`.
""" |
if not isinstance(filename, basestr):
if isinstance(filename, MeshIO):
return filename
else:
return UserMeshIO(filename)
ext = op.splitext(filename)[1].lower()
try:
format = supported_formats[ext]
except KeyError:
raise ValueError('unsupported mesh file suffix! (%s)' % ext)
if isinstance(format, tuple):
format = guess_format(filename, ext, format, io_table)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
return io_table[format](filename) |
<SYSTEM_TASK:>
Create a MeshIO instance for file `filename` with forced `format`.
<END_TASK>
<USER_TASK:>
Description:
def for_format(filename, format=None, writable=False, prefix_dir=None):
"""
Create a MeshIO instance for file `filename` with forced `format`.
Parameters
----------
filename : str
The name of the mesh file.
format : str
One of supported formats. If None,
:func:`MeshIO.any_from_filename()` is called instead.
writable : bool
If True, verify that the mesh format is writable.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the `format`.
""" |
ext = op.splitext(filename)[1].lower()
try:
_format = supported_formats[ext]
except KeyError:
_format = None
format = get_default(format, _format)
if format is None:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
else:
if not isinstance(format, basestr):
raise ValueError('ambigous suffix! (%s -> %s)' % (ext, format))
if format not in io_table:
raise ValueError('unknown output mesh format! (%s)' % format)
if writable and ('w' not in supported_capabilities[format]):
output_writable_meshes()
msg = 'write support not implemented for output mesh format "%s",' \
' see above!' \
% format
raise ValueError(msg)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
io = io_table[format](filename)
return io |
<SYSTEM_TASK:>
Reads one section from the mesh3d file.
<END_TASK>
<USER_TASK:>
Description:
def _read_section(self, f, integer=True):
"""
Reads one section from the mesh3d file.
integer ... if True, all numbers are passed to int(), otherwise to
float(), before returning
Some examples how a section can look like:
2
1 2 5 4 7 8 11 10
2 3 6 5 8 9 12 11
or
5
1 2 3 4 1
1 2 6 5 1
2 3 7 6 1
3 4 8 7 1
4 1 5 8 1
or
0
""" |
if integer:
dtype=int
else:
dtype=float
l = self._read_line(f)
N = int(l)
rows = []
for i in range(N):
l = self._read_line(f)
row = nm.fromstring(l, sep=" ", dtype=dtype)
rows.append(row)
return nm.array(rows) |
<SYSTEM_TASK:>
Return always the same instance of the backend class
<END_TASK>
<USER_TASK:>
Description:
def create(cls):
"""
Return always the same instance of the backend class
""" |
if cls not in cls._instances:
cls._instances[cls] = cls()
return cls._instances[cls] |
<SYSTEM_TASK:>
Returns PeekableIterator.Nothing when the iterator is exhausted.
<END_TASK>
<USER_TASK:>
Description:
def peek(self):
"""
Returns PeekableIterator.Nothing when the iterator is exhausted.
""" |
try:
v = next(self._iter)
self._iter = itertools.chain((v,), self._iter)
return v
except StopIteration:
return PeekableIterator.Nothing |
<SYSTEM_TASK:>
Causes this ephemeral table to be persisted on the TOMLFile.
<END_TASK>
<USER_TASK:>
Description:
def _append_to_parent(self):
"""
Causes this ephemeral table to be persisted on the TOMLFile.
""" |
if self.__appended:
return
if self._parent is not None:
self._parent.append_fresh_table(self)
self.__appended = True |
<SYSTEM_TASK:>
Try to get the `AccessToken` associated with the provided token.
<END_TASK>
<USER_TASK:>
Description:
def access_token(self, value, request):
"""
Try to get the `AccessToken` associated with the provided token.
*The provided value must pass `BearerHandler.validate()`*
""" |
if self.validate(value, request) is not None:
return None
access_token = AccessToken.objects.for_token(value)
return access_token |
<SYSTEM_TASK:>
Try to get the `AccessToken` associated with the given token.
<END_TASK>
<USER_TASK:>
Description:
def validate(self, value, request):
"""
Try to get the `AccessToken` associated with the given token.
The return value is determined based n a few things:
- If no token is provided (`value` is None), a 400 response will be returned.
- If an invalid token is provided, a 401 response will be returned.
- If the token provided is valid, `None` will be returned.
""" |
from django.http import HttpResponseBadRequest
from doac.http import HttpResponseUnauthorized
if not value:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(CredentialsNotProvided)
return response
try:
access_token = AccessToken.objects.for_token(value)
except AccessToken.DoesNotExist:
response = HttpResponseUnauthorized()
response["WWW-Authenticate"] = request_error_header(InvalidToken)
return response
return None |
<SYSTEM_TASK:>
fetches the friends from twitter using the
<END_TASK>
<USER_TASK:>
Description:
def fetch_friends(self, user):
"""
fetches the friends from twitter using the
information on django-social-auth models
user is an instance of UserSocialAuth
Returns:
collection of friend objects fetched from facebook
""" |
# Fetch the token key and secret
if USING_ALLAUTH:
social_app = SocialApp.objects.get_current('twitter')
consumer_key = social_app.key
consumer_secret = social_app.secret
oauth_token = SocialToken.objects.get(account=user, app=social_app).token
oauth_token_secret = SocialToken.objects.get(account=user, app=social_app).token_secret
else:
t = TwitterBackend()
tokens = t.tokens(user)
oauth_token_secret = tokens['oauth_token_secret']
oauth_token = tokens['oauth_token']
# Consumer key and secret from settings
consumer_key = settings.TWITTER_CONSUMER_KEY
consumer_secret = settings.TWITTER_CONSUMER_SECRET
# now fetch the twitter friends using `python-twitter`
api = twitter.Api(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=oauth_token,
access_token_secret=oauth_token_secret
)
return api.GetFriends() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.