text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Cut a sphere specified by origin and radius.
<END_TASK>
<USER_TASK:>
Description:
def cut_sphere(
self,
radius=15.,
origin=None,
outside_sliced=True,
preserve_bonds=False):
"""Cut a sphere specified by origin and radius.
Args:
radius (float):
origin (list): Please note that you can also pass an
integer. In this case it is interpreted as the
index of the atom which is taken as origin.
outside_sliced (bool): Atoms outside/inside the sphere
are cut out.
preserve_bonds (bool): Do not cut covalent bonds.
Returns:
Cartesian:
""" |
if origin is None:
origin = np.zeros(3)
elif pd.api.types.is_list_like(origin):
origin = np.array(origin, dtype='f8')
else:
origin = self.loc[origin, ['x', 'y', 'z']]
molecule = self.get_distance_to(origin)
if outside_sliced:
molecule = molecule[molecule['distance'] < radius]
else:
molecule = molecule[molecule['distance'] > radius]
if preserve_bonds:
molecule = self._preserve_bonds(molecule)
return molecule |
<SYSTEM_TASK:>
Cut a cuboid specified by edge and radius.
<END_TASK>
<USER_TASK:>
Description:
def cut_cuboid(
self,
a=20,
b=None,
c=None,
origin=None,
outside_sliced=True,
preserve_bonds=False):
"""Cut a cuboid specified by edge and radius.
Args:
a (float): Value of the a edge.
b (float): Value of the b edge. Takes value of a if None.
c (float): Value of the c edge. Takes value of a if None.
origin (list): Please note that you can also pass an
integer. In this case it is interpreted as the index
of the atom which is taken as origin.
outside_sliced (bool): Atoms outside/inside the sphere are
cut away.
preserve_bonds (bool): Do not cut covalent bonds.
Returns:
Cartesian:
""" |
if origin is None:
origin = np.zeros(3)
elif pd.api.types.is_list_like(origin):
origin = np.array(origin, dtype='f8')
else:
origin = self.loc[origin, ['x', 'y', 'z']]
b = a if b is None else b
c = a if c is None else c
sides = np.array([a, b, c])
pos = self.loc[:, ['x', 'y', 'z']]
if outside_sliced:
molecule = self[((pos - origin) / (sides / 2)).max(axis=1) < 1.]
else:
molecule = self[((pos - origin) / (sides / 2)).max(axis=1) > 1.]
if preserve_bonds:
molecule = self._preserve_bonds(molecule)
return molecule |
<SYSTEM_TASK:>
Return the mass weighted average location.
<END_TASK>
<USER_TASK:>
Description:
def get_barycenter(self):
"""Return the mass weighted average location.
Args:
None
Returns:
:class:`numpy.ndarray`:
""" |
try:
mass = self['mass'].values
except KeyError:
mass = self.add_data('mass')['mass'].values
pos = self.loc[:, ['x', 'y', 'z']].values
return (pos * mass[:, None]).sum(axis=0) / self.get_total_mass() |
<SYSTEM_TASK:>
Return the distances between given atoms.
<END_TASK>
<USER_TASK:>
Description:
def get_bond_lengths(self, indices):
"""Return the distances between given atoms.
Calculates the distance between the atoms with
indices ``i`` and ``b``.
The indices can be given in three ways:
* As simple list ``[i, b]``
* As list of lists: ``[[i1, b1], [i2, b2]...]``
* As :class:`pd.DataFrame` where ``i`` is taken from the index and
``b`` from the respective column ``'b'``.
Args:
indices (list):
Returns:
:class:`numpy.ndarray`: Vector of angles in degrees.
""" |
coords = ['x', 'y', 'z']
if isinstance(indices, pd.DataFrame):
i_pos = self.loc[indices.index, coords].values
b_pos = self.loc[indices.loc[:, 'b'], coords].values
else:
indices = np.array(indices)
if len(indices.shape) == 1:
indices = indices[None, :]
i_pos = self.loc[indices[:, 0], coords].values
b_pos = self.loc[indices[:, 1], coords].values
return np.linalg.norm(i_pos - b_pos, axis=1) |
<SYSTEM_TASK:>
Return the angles between given atoms.
<END_TASK>
<USER_TASK:>
Description:
def get_angle_degrees(self, indices):
"""Return the angles between given atoms.
Calculates the angle in degrees between the atoms with
indices ``i, b, a``.
The indices can be given in three ways:
* As simple list ``[i, b, a]``
* As list of lists: ``[[i1, b1, a1], [i2, b2, a2]...]``
* As :class:`pd.DataFrame` where ``i`` is taken from the index and
``b`` and ``a`` from the respective columns ``'b'`` and ``'a'``.
Args:
indices (list):
Returns:
:class:`numpy.ndarray`: Vector of angles in degrees.
""" |
coords = ['x', 'y', 'z']
if isinstance(indices, pd.DataFrame):
i_pos = self.loc[indices.index, coords].values
b_pos = self.loc[indices.loc[:, 'b'], coords].values
a_pos = self.loc[indices.loc[:, 'a'], coords].values
else:
indices = np.array(indices)
if len(indices.shape) == 1:
indices = indices[None, :]
i_pos = self.loc[indices[:, 0], coords].values
b_pos = self.loc[indices[:, 1], coords].values
a_pos = self.loc[indices[:, 2], coords].values
BI, BA = i_pos - b_pos, a_pos - b_pos
bi, ba = [v / np.linalg.norm(v, axis=1)[:, None] for v in (BI, BA)]
dot_product = np.sum(bi * ba, axis=1)
dot_product[dot_product > 1] = 1
dot_product[dot_product < -1] = -1
angles = np.degrees(np.arccos(dot_product))
return angles |
<SYSTEM_TASK:>
Return the dihedrals between given atoms.
<END_TASK>
<USER_TASK:>
Description:
def get_dihedral_degrees(self, indices, start_row=0):
"""Return the dihedrals between given atoms.
Calculates the dihedral angle in degrees between the atoms with
indices ``i, b, a, d``.
The indices can be given in three ways:
* As simple list ``[i, b, a, d]``
* As list of lists: ``[[i1, b1, a1, d1], [i2, b2, a2, d2]...]``
* As :class:`pandas.DataFrame` where ``i`` is taken from the index and
``b``, ``a`` and ``d``from the respective columns
``'b'``, ``'a'`` and ``'d'``.
Args:
indices (list):
Returns:
:class:`numpy.ndarray`: Vector of angles in degrees.
""" |
coords = ['x', 'y', 'z']
if isinstance(indices, pd.DataFrame):
i_pos = self.loc[indices.index, coords].values
b_pos = self.loc[indices.loc[:, 'b'], coords].values
a_pos = self.loc[indices.loc[:, 'a'], coords].values
d_pos = self.loc[indices.loc[:, 'd'], coords].values
else:
indices = np.array(indices)
if len(indices.shape) == 1:
indices = indices[None, :]
i_pos = self.loc[indices[:, 0], coords].values
b_pos = self.loc[indices[:, 1], coords].values
a_pos = self.loc[indices[:, 2], coords].values
d_pos = self.loc[indices[:, 3], coords].values
IB = b_pos - i_pos
BA = a_pos - b_pos
AD = d_pos - a_pos
N1 = np.cross(IB, BA, axis=1)
N2 = np.cross(BA, AD, axis=1)
n1, n2 = [v / np.linalg.norm(v, axis=1)[:, None] for v in (N1, N2)]
dot_product = np.sum(n1 * n2, axis=1)
dot_product[dot_product > 1] = 1
dot_product[dot_product < -1] = -1
dihedrals = np.degrees(np.arccos(dot_product))
# the next lines are to test the direction of rotation.
# is a dihedral really 90 or 270 degrees?
# Equivalent to direction of rotation of dihedral
where_to_modify = np.sum(BA * np.cross(n1, n2, axis=1), axis=1) > 0
where_to_modify = np.nonzero(where_to_modify)[0]
length = indices.shape[0] - start_row
sign = np.full(length, 1, dtype='float64')
to_add = np.full(length, 0, dtype='float64')
sign[where_to_modify] = -1
to_add[where_to_modify] = 360
dihedrals = to_add + sign * dihedrals
return dihedrals |
<SYSTEM_TASK:>
Get the indices of non bonded parts in the molecule.
<END_TASK>
<USER_TASK:>
Description:
def fragmentate(self, give_only_index=False,
use_lookup=None):
"""Get the indices of non bonded parts in the molecule.
Args:
give_only_index (bool): If ``True`` a set of indices is returned.
Otherwise a new Cartesian instance.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
list: A list of sets of indices or new Cartesian instances.
""" |
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
fragments = []
pending = set(self.index)
self.get_bonds(use_lookup=use_lookup)
while pending:
index = self.get_coordination_sphere(
pending.pop(), use_lookup=True, n_sphere=float('inf'),
only_surface=False, give_only_index=True)
pending = pending - index
if give_only_index:
fragments.append(index)
else:
fragment = self.loc[index]
fragment._metadata['bond_dict'] = fragment.restrict_bond_dict(
self._metadata['bond_dict'])
try:
fragment._metadata['val_bond_dict'] = (
fragment.restrict_bond_dict(
self._metadata['val_bond_dict']))
except KeyError:
pass
fragments.append(fragment)
return fragments |
<SYSTEM_TASK:>
Restrict a bond dictionary to self.
<END_TASK>
<USER_TASK:>
Description:
def restrict_bond_dict(self, bond_dict):
"""Restrict a bond dictionary to self.
Args:
bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`,
to see examples for a bond_dict.
Returns:
bond dictionary
""" |
return {j: bond_dict[j] & set(self.index) for j in self.index} |
<SYSTEM_TASK:>
Get the indices of the atoms in a fragment.
<END_TASK>
<USER_TASK:>
Description:
def get_fragment(self, list_of_indextuples, give_only_index=False,
use_lookup=None):
"""Get the indices of the atoms in a fragment.
The list_of_indextuples contains all bondings from the
molecule to the fragment. ``[(1,3), (2,4)]`` means for example that the
fragment is connected over two bonds. The first bond is from atom 1 in
the molecule to atom 3 in the fragment. The second bond is from atom
2 in the molecule to atom 4 in the fragment.
Args:
list_of_indextuples (list):
give_only_index (bool): If ``True`` a set of indices
is returned. Otherwise a new Cartesian instance.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
A set of indices or a new Cartesian instance.
""" |
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
exclude = [tuple[0] for tuple in list_of_indextuples]
index_of_atom = list_of_indextuples[0][1]
fragment_index = self.get_coordination_sphere(
index_of_atom, exclude=set(exclude), n_sphere=float('inf'),
only_surface=False, give_only_index=True, use_lookup=use_lookup)
if give_only_index:
return fragment_index
else:
return self.loc[fragment_index, :] |
<SYSTEM_TASK:>
Return self without the specified fragments.
<END_TASK>
<USER_TASK:>
Description:
def get_without(self, fragments,
use_lookup=None):
"""Return self without the specified fragments.
Args:
fragments: Either a list of :class:`~chemcoord.Cartesian` or a
:class:`~chemcoord.Cartesian`.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
list: List containing :class:`~chemcoord.Cartesian`.
""" |
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
if pd.api.types.is_list_like(fragments):
for fragment in fragments:
try:
index_of_all_fragments |= fragment.index
except NameError:
index_of_all_fragments = fragment.index
else:
index_of_all_fragments = fragments.index
missing_part = self.loc[self.index.difference(index_of_all_fragments)]
missing_part = missing_part.fragmentate(use_lookup=use_lookup)
return sorted(missing_part, key=len, reverse=True) |
<SYSTEM_TASK:>
Optimized function for calculating the distance between each pair
<END_TASK>
<USER_TASK:>
Description:
def _jit_pairwise_distances(pos1, pos2):
"""Optimized function for calculating the distance between each pair
of points in positions1 and positions2.
Does use python mode as fallback, if a scalar and not an array is
given.
""" |
n1 = pos1.shape[0]
n2 = pos2.shape[0]
D = np.empty((n1, n2))
for i in range(n1):
for j in range(n2):
D[i, j] = np.sqrt(((pos1[i] - pos2[j])**2).sum())
return D |
<SYSTEM_TASK:>
Calculate the inertia tensor and transforms along
<END_TASK>
<USER_TASK:>
Description:
def get_inertia(self):
"""Calculate the inertia tensor and transforms along
rotation axes.
This function calculates the inertia tensor and returns
a 4-tuple.
The unit is ``amu * length-unit-of-xyz-file**2``
Args:
None
Returns:
dict: The returned dictionary has four possible keys:
``transformed_Cartesian``:
A :class:`~chemcoord.Cartesian`
that is transformed to the basis spanned by
the eigenvectors of the inertia tensor. The x-axis
is the axis with the lowest inertia moment, the
z-axis the one with the highest. Contains also a
column for the mass
``diag_inertia_tensor``:
A vector containing the ascendingly sorted inertia moments after
diagonalization.
``inertia_tensor``:
The inertia tensor in the old basis.
``eigenvectors``:
The eigenvectors of the inertia tensor in the old basis.
Since the inertia_tensor is hermitian, they are orthogonal and
are returned as an orthonormal righthanded basis.
The i-th eigenvector corresponds to the i-th eigenvalue in
``diag_inertia_tensor``.
""" |
def calculate_inertia_tensor(molecule):
masses = molecule.loc[:, 'mass'].values
pos = molecule.loc[:, ['x', 'y', 'z']].values
inertia = np.sum(
masses[:, None, None]
* ((pos**2).sum(axis=1)[:, None, None]
* np.identity(3)[None, :, :]
- pos[:, :, None] * pos[:, None, :]),
axis=0)
diag_inertia, eig_v = np.linalg.eig(inertia)
sorted_index = np.argsort(diag_inertia)
diag_inertia = diag_inertia[sorted_index]
eig_v = eig_v[:, sorted_index]
return inertia, eig_v, diag_inertia
molecule = self.add_data('mass')
molecule = molecule - molecule.get_barycenter()
inertia, eig_v, diag_inertia = calculate_inertia_tensor(molecule)
eig_v = xyz_functions.orthonormalize_righthanded(eig_v)
molecule = molecule.basistransform(eig_v)
return {'transformed_Cartesian': molecule, 'eigenvectors': eig_v,
'diag_inertia_tensor': diag_inertia, 'inertia_tensor': inertia} |
<SYSTEM_TASK:>
Transform the frame to a new basis.
<END_TASK>
<USER_TASK:>
Description:
def basistransform(self, new_basis, old_basis=None,
orthonormalize=True):
"""Transform the frame to a new basis.
This function transforms the cartesian coordinates from an
old basis to a new one. Please note that old_basis and
new_basis are supposed to have full Rank and consist of
three linear independent vectors. If rotate_only is True,
it is asserted, that both bases are orthonormal and right
handed. Besides all involved matrices are transposed
instead of inverted.
In some applications this may require the function
:func:`xyz_functions.orthonormalize` as a previous step.
Args:
old_basis (np.array):
new_basis (np.array):
rotate_only (bool):
Returns:
Cartesian: The transformed molecule.
""" |
if old_basis is None:
old_basis = np.identity(3)
is_rotation_matrix = np.isclose(np.linalg.det(new_basis), 1)
if not is_rotation_matrix and orthonormalize:
new_basis = xyz_functions.orthonormalize_righthanded(new_basis)
is_rotation_matrix = True
if is_rotation_matrix:
return dot(np.dot(new_basis.T, old_basis), self)
else:
return dot(np.dot(np.linalg.inv(new_basis), old_basis), self) |
<SYSTEM_TASK:>
Return a Cartesian with a column for the distance from origin.
<END_TASK>
<USER_TASK:>
Description:
def get_distance_to(self, origin=None, other_atoms=None, sort=False):
"""Return a Cartesian with a column for the distance from origin.
""" |
if origin is None:
origin = np.zeros(3)
elif pd.api.types.is_list_like(origin):
origin = np.array(origin, dtype='f8')
else:
origin = self.loc[origin, ['x', 'y', 'z']]
if other_atoms is None:
other_atoms = self.index
new = self.loc[other_atoms, :].copy()
norm = np.linalg.norm
try:
new['distance'] = norm((new - origin).loc[:, ['x', 'y', 'z']],
axis=1)
except AttributeError:
# Happens if molecule consists of only one atom
new['distance'] = norm((new - origin).loc[:, ['x', 'y', 'z']])
if sort:
new.sort_values(by='distance', inplace=True)
return new |
<SYSTEM_TASK:>
Return the reindexed version of Cartesian.
<END_TASK>
<USER_TASK:>
Description:
def change_numbering(self, rename_dict, inplace=False):
"""Return the reindexed version of Cartesian.
Args:
rename_dict (dict): A dictionary mapping integers on integers.
Returns:
Cartesian: A renamed copy according to the dictionary passed.
""" |
output = self if inplace else self.copy()
new_index = [rename_dict.get(key, key) for key in self.index]
output.index = new_index
if not inplace:
return output |
<SYSTEM_TASK:>
This function partitions the molecule into subsets of the
<END_TASK>
<USER_TASK:>
Description:
def partition_chem_env(self, n_sphere=4,
use_lookup=None):
"""This function partitions the molecule into subsets of the
same chemical environment.
A chemical environment is specified by the number of
surrounding atoms of a certain kind around an atom with a
certain atomic number represented by a tuple of a string
and a frozenset of tuples.
The ``n_sphere`` option determines how many branches the
algorithm follows to determine the chemical environment.
Example:
A carbon atom in ethane has bonds with three hydrogen (atomic
number 1) and one carbon atom (atomic number 6).
If ``n_sphere=1`` these are the only atoms we are
interested in and the chemical environment is::
('C', frozenset([('H', 3), ('C', 1)]))
If ``n_sphere=2`` we follow every atom in the chemical
enviromment of ``n_sphere=1`` to their direct neighbours.
In the case of ethane this gives::
('C', frozenset([('H', 6), ('C', 1)]))
In the special case of ethane this is the whole molecule;
in other cases you can apply this operation recursively and
stop after ``n_sphere`` or after reaching the end of
branches.
Args:
n_sphere (int):
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
dict: The output will look like this::
{ (element_symbol, frozenset([tuples])) : set([indices]) }
A dictionary mapping from a chemical environment to
the set of indices of atoms in this environment.
""" |
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
def get_chem_env(self, i, n_sphere):
env_index = self.get_coordination_sphere(
i, n_sphere=n_sphere, only_surface=False,
give_only_index=True, use_lookup=use_lookup)
env_index.remove(i)
atoms = self.loc[env_index, 'atom']
environment = frozenset(collections.Counter(atoms).most_common())
return (self.loc[i, 'atom'], environment)
chemical_environments = collections.defaultdict(set)
for i in self.index:
chemical_environments[get_chem_env(self, i, n_sphere)].add(i)
return dict(chemical_environments) |
<SYSTEM_TASK:>
Align two Cartesians.
<END_TASK>
<USER_TASK:>
Description:
def align(self, other, indices=None, ignore_hydrogens=False):
"""Align two Cartesians.
Minimize the RMSD (root mean squared deviation) between
``self`` and ``other``.
Returns a tuple of copies of ``self`` and ``other`` where
both are centered around their centroid and
``other`` is rotated unto ``self``.
The rotation minimises the distances between the
atom pairs of same label.
Uses the Kabsch algorithm implemented within
:func:`~.xyz_functions.get_kabsch_rotation`
.. note:: If ``indices is None``, then ``len(self) == len(other)``
must be true and the elements in each index have to be the same.
Args:
other (Cartesian):
indices (sequence): It is possible to specify a subset of indices
that is used for the determination of
the best rotation matrix::
[[i1, i2,...], [j1, j2,...]]
If ``indices`` is given in this form, the rotation matrix
minimises the distance between ``i1`` and ``j1``,
``i2`` and ``j2`` and so on.
ignore_hydrogens (bool):
Returns:
tuple:
""" |
m1 = (self - self.get_centroid()).sort_index()
m2 = (other - other.get_centroid()).sort_index()
if indices is not None and ignore_hydrogens:
message = 'Indices != None and ignore_hydrogens == True is invalid'
raise IllegalArgumentCombination(message)
elif ignore_hydrogens:
m1 = m1[m1['atom'] != 'H']
m2 = m2[m2['atom'] != 'H']
elif indices is not None:
pos1 = m1.loc[indices[0], ['x', 'y', 'z']].values
pos2 = m2.loc[indices[1], ['x', 'y', 'z']].values
else:
pos1 = m1.loc[:, ['x', 'y', 'z']].values
pos2 = m2.loc[m1.index, ['x', 'y', 'z']].values
m2 = dot(xyz_functions.get_kabsch_rotation(pos1, pos2), m2)
return m1, m2 |
<SYSTEM_TASK:>
Reindex ``other`` to be similarly indexed as ``self``.
<END_TASK>
<USER_TASK:>
Description:
def reindex_similar(self, other, n_sphere=4):
"""Reindex ``other`` to be similarly indexed as ``self``.
Returns a reindexed copy of ``other`` that minimizes the
distance for each atom to itself in the same chemical environemt
from ``self`` to ``other``.
Read more about the definition of the chemical environment in
:func:`Cartesian.partition_chem_env`
.. note:: It is necessary to align ``self`` and other before
applying this method.
This can be done via :meth:`~Cartesian.align`.
.. note:: It is probably necessary to improve the result using
:meth:`~Cartesian.change_numbering()`.
Args:
other (Cartesian):
n_sphere (int): Wrapper around the argument for
:meth:`~Cartesian.partition_chem_env`.
Returns:
Cartesian: Reindexed version of other
""" |
def make_subset_similar(m1, subset1, m2, subset2, index_dct):
"""Changes index_dct INPLACE"""
coords = ['x', 'y', 'z']
index1 = list(subset1)
for m1_i in index1:
dist_m2_to_m1_i = m2.get_distance_to(m1.loc[m1_i, coords],
subset2, sort=True)
m2_i = dist_m2_to_m1_i.index[0]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
counter = itertools.count()
found = False
while not found:
if m2_i in index_dct.keys():
old_m1_pos = m1.loc[index_dct[m2_i], coords]
if dist_new < np.linalg.norm(m2_pos_i - old_m1_pos):
index1.append(index_dct[m2_i])
index_dct[m2_i] = m1_i
found = True
else:
m2_i = dist_m2_to_m1_i.index[next(counter)]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
else:
index_dct[m2_i] = m1_i
found = True
return index_dct
molecule1 = self.copy()
molecule2 = other.copy()
partition1 = molecule1.partition_chem_env(n_sphere)
partition2 = molecule2.partition_chem_env(n_sphere)
index_dct = {}
for key in partition1:
message = ('You have chemically different molecules, regarding '
'the topology of their connectivity.')
assert len(partition1[key]) == len(partition2[key]), message
index_dct = make_subset_similar(molecule1, partition1[key],
molecule2, partition2[key],
index_dct)
molecule2.index = [index_dct[i] for i in molecule2.index]
return molecule2.loc[molecule1.index] |
<SYSTEM_TASK:>
Return list of datetime fields for given schema.
<END_TASK>
<USER_TASK:>
Description:
def get_dates(schema):
"""Return list of datetime fields for given schema.""" |
dates = [config.LAST_UPDATED, config.DATE_CREATED]
for field, field_schema in schema.items():
if field_schema['type'] == 'datetime':
dates.append(field)
return dates |
<SYSTEM_TASK:>
Format given doc to match given schema.
<END_TASK>
<USER_TASK:>
Description:
def format_doc(hit, schema, dates):
"""Format given doc to match given schema.""" |
doc = hit.get('_source', {})
doc.setdefault(config.ID_FIELD, hit.get('_id'))
doc.setdefault('_type', hit.get('_type'))
if hit.get('highlight'):
doc['es_highlight'] = hit.get('highlight')
if hit.get('inner_hits'):
doc['_inner_hits'] = {}
for key, value in hit.get('inner_hits').items():
doc['_inner_hits'][key] = []
for item in value.get('hits', {}).get('hits', []):
doc['_inner_hits'][key].append(item.get('_source', {}))
for key in dates:
if key in doc:
doc[key] = parse_date(doc[key])
return doc |
<SYSTEM_TASK:>
Put together all filters we have and set them as 'and' filter
<END_TASK>
<USER_TASK:>
Description:
def set_filters(query, base_filters):
"""Put together all filters we have and set them as 'and' filter
within filtered query.
:param query: elastic query being constructed
:param base_filters: all filters set outside of query (eg. resource config, sub_resource_lookup)
""" |
filters = [f for f in base_filters if f is not None]
query_filter = query['query']['filtered'].get('filter', None)
if query_filter is not None:
if 'and' in query_filter:
filters.extend(query_filter['and'])
else:
filters.append(query_filter)
if filters:
query['query']['filtered']['filter'] = {'and': filters} |
<SYSTEM_TASK:>
Build a query which follows ElasticSearch syntax from doc.
<END_TASK>
<USER_TASK:>
Description:
def build_elastic_query(doc):
"""
Build a query which follows ElasticSearch syntax from doc.
1. Converts {"q":"cricket"} to the below elastic query::
{
"query": {
"filtered": {
"query": {
"query_string": {
"query": "cricket",
"lenient": false,
"default_operator": "AND"
}
}
}
}
}
2. Converts a faceted query::
{"q":"cricket", "type":['text'], "source": "AAP"}
to the below elastic query::
{
"query": {
"filtered": {
"filter": {
"and": [
{"terms": {"type": ["text"]}},
{"term": {"source": "AAP"}}
]
},
"query": {
"query_string": {
"query": "cricket",
"lenient": false,
"default_operator": "AND"
}
}
}
}
}
:param doc: A document object which is inline with the syntax specified in the examples.
It's the developer responsibility to pass right object.
:returns ElasticSearch query
""" |
elastic_query, filters = {"query": {"filtered": {}}}, []
for key in doc.keys():
if key == 'q':
elastic_query['query']['filtered']['query'] = _build_query_string(doc['q'])
else:
_value = doc[key]
filters.append({"terms": {key: _value}} if isinstance(_value, list) else {"term": {key: _value}})
set_filters(elastic_query, filters)
return elastic_query |
<SYSTEM_TASK:>
Build ``query_string`` object from ``q``.
<END_TASK>
<USER_TASK:>
Description:
def _build_query_string(q, default_field=None, default_operator='AND'):
"""
Build ``query_string`` object from ``q``.
:param q: q of type String
:param default_field: default_field
:return: dictionary object.
""" |
def _is_phrase_search(query_string):
clean_query = query_string.strip()
return clean_query and clean_query.startswith('"') and clean_query.endswith('"')
def _get_phrase(query_string):
return query_string.strip().strip('"')
if _is_phrase_search(q):
query = {'match_phrase': {'_all': _get_phrase(q)}}
else:
query = {'query_string': {'query': q, 'default_operator': default_operator}}
query['query_string'].update({'lenient': False} if default_field else {'default_field': default_field})
return query |
<SYSTEM_TASK:>
Add extra info to response.
<END_TASK>
<USER_TASK:>
Description:
def extra(self, response):
"""Add extra info to response.""" |
if 'facets' in self.hits:
response['_facets'] = self.hits['facets']
if 'aggregations' in self.hits:
response['_aggregations'] = self.hits['aggregations'] |
<SYSTEM_TASK:>
Get mapping for given resource or item schema.
<END_TASK>
<USER_TASK:>
Description:
def _get_mapping(self, schema):
"""Get mapping for given resource or item schema.
:param schema: resource or dict/list type item schema
""" |
properties = {}
for field, field_schema in schema.items():
field_mapping = self._get_field_mapping(field_schema)
if field_mapping:
properties[field] = field_mapping
return {'properties': properties} |
<SYSTEM_TASK:>
Get mapping for single field schema.
<END_TASK>
<USER_TASK:>
Description:
def _get_field_mapping(self, schema):
"""Get mapping for single field schema.
:param schema: field schema
""" |
if 'mapping' in schema:
return schema['mapping']
elif schema['type'] == 'dict' and 'schema' in schema:
return self._get_mapping(schema['schema'])
elif schema['type'] == 'list' and 'schema' in schema.get('schema', {}):
return self._get_mapping(schema['schema']['schema'])
elif schema['type'] == 'datetime':
return {'type': 'date'}
elif schema['type'] == 'string' and schema.get('unique'):
return {'type': 'string', 'index': 'not_analyzed'} |
<SYSTEM_TASK:>
Create new index and ignore if it exists already.
<END_TASK>
<USER_TASK:>
Description:
def create_index(self, index=None, settings=None, es=None):
"""Create new index and ignore if it exists already.""" |
if index is None:
index = self.index
if es is None:
es = self.es
try:
alias = index
index = generate_index_name(alias)
args = {'index': index}
if settings:
args['body'] = settings
es.indices.create(**args)
es.indices.put_alias(index, alias)
logger.info('created index alias=%s index=%s' % (alias, index))
except elasticsearch.TransportError: # index exists
pass |
<SYSTEM_TASK:>
Put mapping for elasticsearch for current schema.
<END_TASK>
<USER_TASK:>
Description:
def put_mapping(self, app, index=None):
"""Put mapping for elasticsearch for current schema.
It's not called automatically now, but rather left for user to call it whenever it makes sense.
""" |
for resource, resource_config in self._get_elastic_resources().items():
datasource = resource_config.get('datasource', {})
if not is_elastic(datasource):
continue
if datasource.get('source', resource) != resource: # only put mapping for core types
continue
properties = self._get_mapping_properties(resource_config)
kwargs = {
'index': index or self._resource_index(resource),
'doc_type': resource,
'body': properties,
}
try:
self.elastic(resource).indices.put_mapping(**kwargs)
except elasticsearch.exceptions.RequestError:
logger.exception('mapping error, updating settings resource=%s' % resource) |
<SYSTEM_TASK:>
Get mapping for index.
<END_TASK>
<USER_TASK:>
Description:
def get_mapping(self, index, doc_type=None):
"""Get mapping for index.
:param index: index name
""" |
mapping = self.es.indices.get_mapping(index=index, doc_type=doc_type)
return next(iter(mapping.values())) |
<SYSTEM_TASK:>
Get settings for index.
<END_TASK>
<USER_TASK:>
Description:
def get_settings(self, index):
"""Get settings for index.
:param index: index name
""" |
settings = self.es.indices.get_settings(index=index)
return next(iter(settings.values())) |
<SYSTEM_TASK:>
Get index name for given alias.
<END_TASK>
<USER_TASK:>
Description:
def get_index_by_alias(self, alias):
"""Get index name for given alias.
If there is no alias assume it's an index.
:param alias: alias name
""" |
try:
info = self.es.indices.get_alias(name=alias)
return next(iter(info.keys()))
except elasticsearch.exceptions.NotFoundError:
return alias |
<SYSTEM_TASK:>
Check the environment variable and the given argument parameter to decide if aggregations needed.
<END_TASK>
<USER_TASK:>
Description:
def should_aggregate(self, req):
"""Check the environment variable and the given argument parameter to decide if aggregations needed.
argument value is expected to be '0' or '1'
""" |
try:
return self.app.config.get('ELASTICSEARCH_AUTO_AGGREGATIONS') or \
bool(req.args and int(req.args.get('aggregations')))
except (AttributeError, TypeError):
return False |
<SYSTEM_TASK:>
Check the given argument parameter to decide if highlights needed.
<END_TASK>
<USER_TASK:>
Description:
def should_highlight(self, req):
"""
Check the given argument parameter to decide if highlights needed.
argument value is expected to be '0' or '1'
""" |
try:
return bool(req.args and int(req.args.get('es_highlight', 0)))
except (AttributeError, TypeError):
return False |
<SYSTEM_TASK:>
Check the given argument parameter to decide if projections needed.
<END_TASK>
<USER_TASK:>
Description:
def should_project(self, req):
"""
Check the given argument parameter to decide if projections needed.
argument value is expected to be a list of strings
""" |
try:
return req.args and json.loads(req.args.get('projections', []))
except (AttributeError, TypeError):
return False |
<SYSTEM_TASK:>
Returns the projected fields from request.
<END_TASK>
<USER_TASK:>
Description:
def get_projected_fields(self, req):
"""
Returns the projected fields from request.
""" |
try:
args = getattr(req, 'args', {})
return ','.join(json.loads(args.get('projections')))
except (AttributeError, TypeError):
return None |
<SYSTEM_TASK:>
Find single document, if there is _id in lookup use that, otherwise filter.
<END_TASK>
<USER_TASK:>
Description:
def find_one(self, resource, req, **lookup):
"""Find single document, if there is _id in lookup use that, otherwise filter.""" |
if config.ID_FIELD in lookup:
return self._find_by_id(resource=resource, _id=lookup[config.ID_FIELD], parent=lookup.get('parent'))
else:
args = self._es_args(resource)
filters = [{'term': {key: val}} for key, val in lookup.items()]
query = {'query': {'constant_score': {'filter': {'and': filters}}}}
try:
args['size'] = 1
hits = self.elastic(resource).search(body=query, **args)
docs = self._parse_hits(hits, resource)
return docs.first()
except elasticsearch.NotFoundError:
return |
<SYSTEM_TASK:>
Find the document by Id. If parent is not provided then on
<END_TASK>
<USER_TASK:>
Description:
def _find_by_id(self, resource, _id, parent=None):
"""Find the document by Id. If parent is not provided then on
routing exception try to find using search.
""" |
def is_found(hit):
if 'exists' in hit:
hit['found'] = hit['exists']
return hit.get('found', False)
args = self._es_args(resource)
try:
# set the parent if available
if parent:
args['parent'] = parent
hit = self.elastic(resource).get(id=_id, **args)
if not is_found(hit):
return
docs = self._parse_hits({'hits': {'hits': [hit]}}, resource)
return docs.first()
except elasticsearch.NotFoundError:
return
except elasticsearch.TransportError as tex:
if tex.error == 'routing_missing_exception' or 'RoutingMissingException' in tex.error:
# search for the item
args = self._es_args(resource)
query = {'query': {'bool': {'must': [{'term': {'_id': _id}}]}}}
try:
args['size'] = 1
hits = self.elastic(resource).search(body=query, **args)
docs = self._parse_hits(hits, resource)
return docs.first()
except elasticsearch.NotFoundError:
return |
<SYSTEM_TASK:>
Insert document, it must be new if there is ``_id`` in it.
<END_TASK>
<USER_TASK:>
Description:
def insert(self, resource, doc_or_docs, **kwargs):
"""Insert document, it must be new if there is ``_id`` in it.""" |
ids = []
kwargs.update(self._es_args(resource))
for doc in doc_or_docs:
self._update_parent_args(resource, kwargs, doc)
_id = doc.pop('_id', None)
res = self.elastic(resource).index(body=doc, id=_id, **kwargs)
doc.setdefault('_id', res.get('_id', _id))
ids.append(doc.get('_id'))
self._refresh_resource_index(resource)
return ids |
<SYSTEM_TASK:>
Test if there is no document for resource.
<END_TASK>
<USER_TASK:>
Description:
def is_empty(self, resource):
"""Test if there is no document for resource.
:param resource: resource name
""" |
args = self._es_args(resource)
res = self.elastic(resource).count(body={'query': {'match_all': {}}}, **args)
return res.get('count', 0) == 0 |
<SYSTEM_TASK:>
Get the Parent Id of the document
<END_TASK>
<USER_TASK:>
Description:
def get_parent_id(self, resource, document):
"""Get the Parent Id of the document
:param resource: resource name
:param document: document containing the parent id
""" |
parent_type = self._get_parent_type(resource)
if parent_type and document:
return document.get(parent_type.get('field'))
return None |
<SYSTEM_TASK:>
Get projection fields for given resource.
<END_TASK>
<USER_TASK:>
Description:
def _fields(self, resource):
"""Get projection fields for given resource.""" |
datasource = self.get_datasource(resource)
keys = datasource[2].keys()
return ','.join(keys) + ','.join([config.LAST_UPDATED, config.DATE_CREATED]) |
<SYSTEM_TASK:>
Get index for given resource.
<END_TASK>
<USER_TASK:>
Description:
def _resource_index(self, resource):
"""Get index for given resource.
by default it will be `self.index`, but it can be overriden via app.config
:param resource: resource name
""" |
datasource = self.get_datasource(resource)
indexes = self._resource_config(resource, 'INDEXES') or {}
default_index = self._resource_config(resource, 'INDEX')
return indexes.get(datasource[0], default_index) |
<SYSTEM_TASK:>
Get elastic prefix for given resource.
<END_TASK>
<USER_TASK:>
Description:
def _resource_prefix(self, resource=None):
"""Get elastic prefix for given resource.
Resource can specify ``elastic_prefix`` which behaves same like ``mongo_prefix``.
""" |
px = 'ELASTICSEARCH'
if resource and config.DOMAIN[resource].get('elastic_prefix'):
px = config.DOMAIN[resource].get('elastic_prefix')
return px |
<SYSTEM_TASK:>
Returns the MD5 checksum of a file.
<END_TASK>
<USER_TASK:>
Description:
def get_md5sum(fname, chunk_size=1024):
"""
Returns the MD5 checksum of a file.
Args:
fname (str): Filename
chunk_size (Optional[int]): Size (in Bytes) of the chunks that should be
read in at once. Increasing chunk size reduces the number of reads
required, but increases the memory usage. Defaults to 1024.
Returns:
The MD5 checksum of the file, which is a string.
""" |
def iter_chunks(f):
while True:
chunk = f.read(chunk_size)
if not chunk:
break
yield chunk
sig = hashlib.md5()
with open(fname, 'rb') as f:
for chunk in iter_chunks(f):
sig.update(chunk)
# data = f.read()
# return hashlib.md5(data).hexdigest()
return sig.hexdigest() |
<SYSTEM_TASK:>
Download a file and verify the MD5 sum.
<END_TASK>
<USER_TASK:>
Description:
def download_and_verify(url, md5sum, fname=None,
chunk_size=1024, clobber=False,
verbose=True):
"""
Download a file and verify the MD5 sum.
Args:
url (str): The URL to download.
md5sum (str): The expected MD5 sum.
fname (Optional[str]): The filename to store the downloaded file in.
If `None`, infer the filename from the URL. Defaults to `None`.
chunk_size (Optional[int]): Process in chunks of this size (in Bytes).
Defaults to 1024.
clobber (Optional[bool]): If `True`, any existing, identical file will
be overwritten. If `False`, the MD5 sum of any existing file with
the destination filename will be checked. If the MD5 sum does not
match, the existing file will be overwritten. Defaults to `False`.
verbose (Optional[bool]): If `True` (the default), then a progress bar
will be shownd during downloads.
Returns:
The filename the URL was downloaded to.
Raises:
DownloadError: The MD5 sum of the downloaded file does not match
`md5sum`.
requests.exceptions.HTTPError: There was a problem connecting to the
URL.
""" |
# Determine the filename
if fname is None:
fname = url.split('/')[-1]
# Check if the file already exists on disk
if (not clobber) and os.path.isfile(fname):
print('Checking existing file to see if MD5 sum matches ...')
md5_existing = get_md5sum(fname, chunk_size=chunk_size)
if md5_existing == md5sum:
print('File exists. Not overwriting.')
return fname
# Make sure the directory it's going into exists
dir_name = os.path.dirname(fname)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
sig = hashlib.md5()
if verbose:
print('Downloading {} ...'.format(url))
if url.startswith('http://') or url.startswith('https://'):
# Stream the URL as a file, copying to local disk
with contextlib.closing(requests.get(url, stream=True)) as r:
try:
r.raise_for_status()
except requests.exceptions.HTTPError as error:
print('Error connecting to URL: "{}"'.format(url))
print(r.text)
raise error
with open(fname, 'wb') as f:
content_length = r.headers.get('content-length')
if content_length is not None:
content_length = int(content_length)
bar = FileTransferProgressBar(content_length)
for k,chunk in enumerate(r.iter_content(chunk_size=chunk_size)):
f.write(chunk)
sig.update(chunk)
if verbose:
bar_val = chunk_size*(k+1)
if content_length is not None:
bar_val = min(bar_val, content_length)
bar.update(bar_val)
else: # e.g., ftp://
with contextlib.closing(urlopen(url)) as r:
content_length = r.headers.get('content-length')
if content_length is not None:
content_length = int(content_length)
bar = FileTransferProgressBar(content_length)
with open(fname, 'wb') as f:
k = 0
while True:
chunk = r.read(chunk_size)
if not chunk:
break
f.write(chunk)
sig.update(chunk)
if verbose:
k += 1
bar_val = chunk_size*k
if content_length is not None:
bar_val = min(bar_val, content_length)
bar.update(bar_val)
if sig.hexdigest() != md5sum:
raise DownloadError('The MD5 sum of the downloaded file is incorrect.\n'
+ ' download: {}\n'.format(sig.hexdigest())
+ ' expected: {}\n'.format(md5sum))
return fname |
<SYSTEM_TASK:>
Downloads a file.
<END_TASK>
<USER_TASK:>
Description:
def download(url, fname=None):
"""
Downloads a file.
Args:
url (str): The URL to download.
fname (Optional[str]): The filename to store the downloaded file in. If
`None`, take the filename from the URL. Defaults to `None`.
Returns:
The filename the URL was downloaded to.
Raises:
requests.exceptions.HTTPError: There was a problem connecting to the
URL.
""" |
# Determine the filename
if fname is None:
fname = url.split('/')[-1]
# Stream the URL as a file, copying to local disk
with contextlib.closing(requests.get(url, stream=True)) as r:
try:
r.raise_for_status()
except requests.exceptions.HTTPError as error:
print('Error connecting to URL: "{}"'.format(url))
print(r.text)
raise error
with open(fname, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return fname |
<SYSTEM_TASK:>
Downloads a file from the Dataverse, using a DOI and set of metadata
<END_TASK>
<USER_TASK:>
Description:
def dataverse_download_doi(doi,
local_fname=None,
file_requirements={},
clobber=False):
"""
Downloads a file from the Dataverse, using a DOI and set of metadata
parameters to locate the file.
Args:
doi (str): Digital Object Identifier (DOI) containing the file.
local_fname (Optional[str]): Local filename to download the file to. If
`None`, then use the filename provided by the Dataverse. Defaults to
`None`.
file_requirements (Optional[dict]): Select the file containing the
given metadata entries. If multiple files meet these requirements,
only the first in downloaded. Defaults to `{}`, corresponding to no
requirements.
Raises:
DownloadError: Either no matching file was found under the given DOI, or
the MD5 sum of the file was not as expected.
requests.exceptions.HTTPError: The given DOI does not exist, or there
was a problem connecting to the Dataverse.
""" |
metadata = dataverse_search_doi(doi)
def requirements_match(metadata):
for key in file_requirements.keys():
if metadata['dataFile'].get(key, None) != file_requirements[key]:
return False
return True
for file_metadata in metadata['data']['latestVersion']['files']:
if requirements_match(file_metadata):
file_id = file_metadata['dataFile']['id']
md5sum = file_metadata['dataFile']['md5']
if local_fname is None:
local_fname = file_metadata['dataFile']['filename']
# Check if the file already exists on disk
if (not clobber) and os.path.isfile(local_fname):
print('Checking existing file to see if MD5 sum matches ...')
md5_existing = get_md5sum(local_fname)
if md5_existing == md5sum:
print('File exists. Not overwriting.')
return
print("Downloading data to '{}' ...".format(local_fname))
dataverse_download_id(file_id, md5sum,
fname=local_fname, clobber=False)
return
raise DownloadError(
'No file found under the given DOI matches the requirements.\n'
'The metadata found for this DOI was:\n'
+ json.dumps(file_metadata, indent=2, sort_keys=True)) |
<SYSTEM_TASK:>
Reencode an address
<END_TASK>
<USER_TASK:>
Description:
def address_reencode(address, blockchain='bitcoin', **blockchain_opts):
"""
Reencode an address
""" |
if blockchain == 'bitcoin':
return btc_address_reencode(address, **blockchain_opts)
else:
raise ValueError("Unknown blockchain '{}'".format(blockchain)) |
<SYSTEM_TASK:>
Is the given private key bundle a multisig bundle?
<END_TASK>
<USER_TASK:>
Description:
def is_multisig(privkey_info, blockchain='bitcoin', **blockchain_opts):
"""
Is the given private key bundle a multisig bundle?
""" |
if blockchain == 'bitcoin':
return btc_is_multisig(privkey_info, **blockchain_opts)
else:
raise ValueError('Unknown blockchain "{}"'.format(blockchain)) |
<SYSTEM_TASK:>
Is the given address a multisig address?
<END_TASK>
<USER_TASK:>
Description:
def is_multisig_address(addr, blockchain='bitcoin', **blockchain_opts):
"""
Is the given address a multisig address?
""" |
if blockchain == 'bitcoin':
return btc_is_multisig_address(addr, **blockchain_opts)
else:
raise ValueError('Unknown blockchain "{}"'.format(blockchain)) |
<SYSTEM_TASK:>
Is the given script a multisig script?
<END_TASK>
<USER_TASK:>
Description:
def is_multisig_script(script, blockchain='bitcoin', **blockchain_opts):
"""
Is the given script a multisig script?
""" |
if blockchain == 'bitcoin':
return btc_is_multisig_script(script, **blockchain_opts)
else:
raise ValueError('Unknown blockchain "{}"'.format(blockchain)) |
<SYSTEM_TASK:>
Is the given private key bundle a single-sig key bundle?
<END_TASK>
<USER_TASK:>
Description:
def is_singlesig(privkey_info, blockchain='bitcoin', **blockchain_opts):
"""
Is the given private key bundle a single-sig key bundle?
""" |
if blockchain == 'bitcoin':
return btc_is_singlesig(privkey_info, **blockchain_opts)
else:
raise ValueError('Unknown blockchain "{}"'.format(blockchain)) |
<SYSTEM_TASK:>
Is the given address a single-sig address?
<END_TASK>
<USER_TASK:>
Description:
def is_singlesig_address(addr, blockchain='bitcoin', **blockchain_opts):
"""
Is the given address a single-sig address?
""" |
if blockchain == 'bitcoin':
return btc_is_singlesig_address(addr, **blockchain_opts)
else:
raise ValueError('Unknown blockchain "{}"'.format(blockchain)) |
<SYSTEM_TASK:>
Get the address from a private key bundle
<END_TASK>
<USER_TASK:>
Description:
def get_privkey_address(privkey_info, blockchain='bitcoin', **blockchain_opts):
"""
Get the address from a private key bundle
""" |
if blockchain == 'bitcoin':
return btc_get_privkey_address(privkey_info, **blockchain_opts)
else:
raise ValueError('Unknown blockchain "{}"'.format(blockchain)) |
<SYSTEM_TASK:>
Apply the gradient for transformation to cartesian space onto zmat_dist.
<END_TASK>
<USER_TASK:>
Description:
def apply_grad_cartesian_tensor(grad_X, zmat_dist):
"""Apply the gradient for transformation to cartesian space onto zmat_dist.
Args:
grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array.
The mathematical details of the index layout is explained in
:meth:`~chemcoord.Cartesian.get_grad_zmat()`.
zmat_dist (:class:`~chemcoord.Zmat`):
Distortions in Zmatrix space.
Returns:
:class:`~chemcoord.Cartesian`: Distortions in cartesian space.
""" |
columns = ['bond', 'angle', 'dihedral']
C_dist = zmat_dist.loc[:, columns].values.T
try:
C_dist = C_dist.astype('f8')
C_dist[[1, 2], :] = np.radians(C_dist[[1, 2], :])
except (TypeError, AttributeError):
C_dist[[1, 2], :] = sympy.rad(C_dist[[1, 2], :])
cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T
from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian
return Cartesian(atoms=zmat_dist['atom'],
coords=cart_dist, index=zmat_dist.index) |
<SYSTEM_TASK:>
Add url converter for model
<END_TASK>
<USER_TASK:>
Description:
def register_model_converter(model, app):
"""Add url converter for model
Example:
class Student(db.model):
id = Column(Integer, primary_key=True)
name = Column(String(50))
register_model_converter(Student)
@route('/classmates/<Student:classmate>')
def get_classmate_info(classmate):
pass
This only support model's have single primary key.
You need call this function before create view function.
""" |
if hasattr(model, 'id'):
class Converter(_ModelConverter):
_model = model
app.url_map.converters[model.__name__] = Converter |
<SYSTEM_TASK:>
Give the IUPAC conform representation.
<END_TASK>
<USER_TASK:>
Description:
def iupacify(self):
"""Give the IUPAC conform representation.
Mathematically speaking the angles in a zmatrix are
representations of an equivalence class.
We will denote an equivalence relation with :math:`\\sim`
and use :math:`\\alpha` for an angle and :math:`\\delta` for a dihedral
angle. Then the following equations hold true.
.. math::
(\\alpha, \\delta) &\sim (-\\alpha, \\delta + \\pi) \\\\
\\alpha &\sim \\alpha \\mod 2\\pi \\\\
\\delta &\sim \\delta \\mod 2\\pi
`IUPAC <https://goldbook.iupac.org/html/T/T06406.html>`_ defines
a designated representation of these equivalence classes, by asserting:
.. math::
0 \\leq &\\alpha \\leq \\pi \\\\
-\\pi \\leq &\\delta \\leq \\pi
Args:
None
Returns:
Zmat: Zmatrix with accordingly changed angles and dihedrals.
""" |
def convert_d(d):
r = d % 360
return r - (r // 180) * 360
new = self.copy()
new.unsafe_loc[:, 'angle'] = new['angle'] % 360
select = new['angle'] > 180
new.unsafe_loc[select, 'angle'] = new.loc[select, 'angle'] - 180
new.unsafe_loc[select, 'dihedral'] = new.loc[select, 'dihedral'] + 180
new.unsafe_loc[:, 'dihedral'] = convert_d(new.loc[:, 'dihedral'])
return new |
<SYSTEM_TASK:>
r"""Give a representation of the dihedral with minimized absolute value.
<END_TASK>
<USER_TASK:>
Description:
def minimize_dihedrals(self):
r"""Give a representation of the dihedral with minimized absolute value.
Mathematically speaking the angles in a zmatrix are
representations of an equivalence class.
We will denote an equivalence relation with :math:`\sim`
and use :math:`\alpha` for an angle and :math:`\delta` for a dihedral
angle. Then the following equations hold true.
.. math::
(\alpha, \delta) &\sim (-\alpha, \delta + \pi) \\
\alpha &\sim \alpha \mod 2\pi \\
\delta &\sim \delta \mod 2\pi
This function asserts:
.. math::
-\pi \leq \delta \leq \pi
The main application of this function is the construction of
a transforming movement from ``zmat1`` to ``zmat2``.
This is under the assumption that ``zmat1`` and ``zmat2`` are the same
molecules (regarding their topology) and have the same
construction table (:meth:`~Cartesian.get_construction_table`)::
with cc.TestOperators(False):
D = zm2 - zm1
zmats1 = [zm1 + D * i / n for i in range(n)]
zmats2 = [zm1 + D.minimize_dihedrals() * i / n for i in range(n)]
The movement described by ``zmats1`` might be too large,
because going from :math:`5^\circ` to :math:`355^\circ` is
:math:`350^\circ` in this case and not :math:`-10^\circ` as
in ``zmats2`` which is the desired :math:`\Delta` in most cases.
Args:
None
Returns:
Zmat: Zmatrix with accordingly changed angles and dihedrals.
""" |
new = self.copy()
def convert_d(d):
r = d % 360
return r - (r // 180) * 360
new.unsafe_loc[:, 'dihedral'] = convert_d(new.loc[:, 'dihedral'])
return new |
<SYSTEM_TASK:>
Change numbering to a new index.
<END_TASK>
<USER_TASK:>
Description:
def change_numbering(self, new_index=None):
"""Change numbering to a new index.
Changes the numbering of index and all dependent numbering
(bond_with...) to a new_index.
The user has to make sure that the new_index consists of distinct
elements.
Args:
new_index (list): If None the new_index is taken from 1 to the
number of atoms.
Returns:
Zmat: Reindexed version of the zmatrix.
""" |
if (new_index is None):
new_index = range(len(self))
elif len(new_index) != len(self):
raise ValueError('len(new_index) has to be the same as len(self)')
c_table = self.loc[:, ['b', 'a', 'd']]
# Strange bug in pandas where .replace is transitive for object columns
# and non-transitive for all other types.
# (Remember that string columns are just object columns)
# Example:
# A = {1: 2, 2: 3}
# Transtitive [1].replace(A) gives [3]
# Non-Transtitive [1].replace(A) gives [2]
# https://github.com/pandas-dev/pandas/issues/5338
# https://github.com/pandas-dev/pandas/issues/16051
# https://github.com/pandas-dev/pandas/issues/5541
# For this reason convert to int and replace then.
c_table = c_table.replace(constants.int_label)
try:
c_table = c_table.astype('i8')
except ValueError:
raise ValueError('Due to a bug in pandas it is necessary to have '
'integer columns')
c_table = c_table.replace(self.index, new_index)
c_table = c_table.replace(
{v: k for k, v in constants.int_label.items()})
out = self.copy()
out.unsafe_loc[:, ['b', 'a', 'd']] = c_table
out._frame.index = new_index
return out |
<SYSTEM_TASK:>
Insert dummy atom into the already built cartesian of exception
<END_TASK>
<USER_TASK:>
Description:
def _insert_dummy_cart(self, exception, last_valid_cartesian=None):
"""Insert dummy atom into the already built cartesian of exception
""" |
def get_normal_vec(cartesian, reference_labels):
b_pos, a_pos, d_pos = cartesian._get_positions(reference_labels)
BA = a_pos - b_pos
AD = d_pos - a_pos
N1 = np.cross(BA, AD)
n1 = N1 / np.linalg.norm(N1)
return n1
def insert_dummy(cartesian, reference_labels, n1):
cartesian = cartesian.copy()
b_pos, a_pos, d_pos = cartesian._get_positions(reference_labels)
BA = a_pos - b_pos
N2 = np.cross(n1, BA)
n2 = N2 / np.linalg.norm(N2)
i_dummy = max(self.index) + 1
cartesian.loc[i_dummy, 'atom'] = 'X'
cartesian.loc[i_dummy, ['x', 'y', 'z']] = a_pos + n2
return cartesian, i_dummy
if last_valid_cartesian is None:
last_valid_cartesian = self._metadata['last_valid_cartesian']
ref_labels = self.loc[exception.index, ['b', 'a', 'd']]
n1 = get_normal_vec(last_valid_cartesian, ref_labels)
return insert_dummy(exception.already_built_cartesian, ref_labels, n1) |
<SYSTEM_TASK:>
Return the molecule in cartesian coordinates.
<END_TASK>
<USER_TASK:>
Description:
def get_cartesian(self):
"""Return the molecule in cartesian coordinates.
Raises an :class:`~exceptions.InvalidReference` exception,
if the reference of the i-th atom is undefined.
Args:
None
Returns:
Cartesian: Reindexed version of the zmatrix.
""" |
def create_cartesian(positions, row):
xyz_frame = pd.DataFrame(columns=['atom', 'x', 'y', 'z'],
index=self.index[:row], dtype='f8')
xyz_frame['atom'] = self.loc[xyz_frame.index, 'atom']
xyz_frame.loc[:, ['x', 'y', 'z']] = positions[:row]
from chemcoord.cartesian_coordinates.cartesian_class_main \
import Cartesian
cartesian = Cartesian(xyz_frame, metadata=self.metadata)
return cartesian
c_table = self.loc[:, ['b', 'a', 'd']]
c_table = c_table.replace(constants.int_label)
c_table = c_table.replace({k: v for v, k in enumerate(c_table.index)})
c_table = c_table.values.astype('i8').T
C = self.loc[:, ['bond', 'angle', 'dihedral']].values.T
C[[1, 2], :] = np.radians(C[[1, 2], :])
err, row, positions = transformation.get_X(C, c_table)
positions = positions.T
if err == ERR_CODE_InvalidReference:
rename = dict(enumerate(self.index))
i = rename[row]
b, a, d = self.loc[i, ['b', 'a', 'd']]
cartesian = create_cartesian(positions, row)
raise InvalidReference(i=i, b=b, a=a, d=d,
already_built_cartesian=cartesian)
elif err == ERR_CODE_OK:
return create_cartesian(positions, row + 1) |
<SYSTEM_TASK:>
r"""Return the gradient for the transformation to a Cartesian.
<END_TASK>
<USER_TASK:>
Description:
def get_grad_cartesian(self, as_function=True, chain=True,
drop_auto_dummies=True):
r"""Return the gradient for the transformation to a Cartesian.
If ``as_function`` is True, a function is returned that can be directly
applied onto instances of :class:`~Zmat`, which contain the
applied distortions in Zmatrix space.
In this case the user does not have to worry about indexing and
correct application of the tensor product.
Basically this is the function
:func:`zmat_functions.apply_grad_cartesian_tensor`
with partially replaced arguments.
If ``as_function`` is False, a ``(3, n, n, 3)`` tensor is returned,
which contains the values of the derivatives.
Since a ``n * 3`` matrix is deriven after a ``n * 3``
matrix, it is important to specify the used rules for indexing the
resulting tensor.
The rule is very simple: The indices of the numerator are used first
then the indices of the denominator get swapped and appended:
.. math::
\left(
\frac{\partial \mathbf{Y}}{\partial \mathbf{X}}
\right)_{i, j, k, l}
=
\frac{\partial \mathbf{Y}_{i, j}}{\partial \mathbf{X}_{l, k}}
Applying this rule to an example function:
.. math::
f \colon \mathbb{R}^3 \rightarrow \mathbb{R}
Gives as derivative the known row-vector gradient:
.. math::
(\nabla f)_{1, i}
=
\frac{\partial f}{\partial x_i} \qquad i \in \{1, 2, 3\}
.. note::
The row wise alignment of the zmat files makes sense for these
CSV like files.
But it is mathematically advantageous and
sometimes (depending on the memory layout) numerically better
to use a column wise alignment of the coordinates.
In this function the resulting tensor assumes a ``3 * n`` array
for the coordinates.
If
.. math::
\mathbf{C}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n \\
\mathbf{X}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n
denote the positions in Zmatrix and cartesian space,
The complete tensor may be written as:
.. math::
\left(
\frac{\partial \mathbf{X}}{\partial \mathbf{C}}
\right)_{i, j, k, l}
=
\frac{\partial \mathbf{X}_{i, j}}{\partial \mathbf{C}_{l, k}}
Args:
construction_table (pandas.DataFrame):
as_function (bool): Return a tensor or
:func:`xyz_functions.apply_grad_zmat_tensor`
with partially replaced arguments.
chain (bool):
drop_auto_dummies (bool): Drop automatically created
dummies from the gradient.
This means, that only changes in regularly placed atoms are
considered for the gradient.
Returns:
(func, :class:`numpy.ndarray`): Depending on ``as_function``
return a tensor or
:func:`~chemcoord.zmat_functions.apply_grad_cartesian_tensor`
with partially replaced arguments.
""" |
zmat = self.change_numbering()
c_table = zmat.loc[:, ['b', 'a', 'd']]
c_table = c_table.replace(constants.int_label).values.T
C = zmat.loc[:, ['bond', 'angle', 'dihedral']].values.T
if C.dtype == np.dtype('i8'):
C = C.astype('f8')
C[[1, 2], :] = np.radians(C[[1, 2], :])
grad_X = transformation.get_grad_X(C, c_table, chain=chain)
if drop_auto_dummies:
def drop_dummies(grad_X, zmolecule):
rename = dict(zip(zmolecule.index, range(len(zmolecule))))
dummies = [rename[v['dummy_d']] for v in
self._metadata['has_dummies'].values()]
excluded = np.full(grad_X.shape[1], True)
excluded[dummies] = False
coord_rows = np.full(3, True)
selection = np.ix_(coord_rows, excluded, excluded, coord_rows)
return grad_X[selection]
grad_X = drop_dummies(grad_X, self)
if as_function:
from chemcoord.internal_coordinates.zmat_functions import (
apply_grad_cartesian_tensor)
return partial(apply_grad_cartesian_tensor, grad_X)
else:
return grad_X |
<SYSTEM_TASK:>
Add a set of inputs and outputs to a tx.
<END_TASK>
<USER_TASK:>
Description:
def tx_extend(partial_tx_hex, new_inputs, new_outputs, blockchain='bitcoin', **blockchain_opts):
"""
Add a set of inputs and outputs to a tx.
Return the new tx on success
Raise on error
""" |
if blockchain == 'bitcoin':
return btc_tx_extend(partial_tx_hex, new_inputs, new_outputs, **blockchain_opts)
else:
raise ValueError('Unknown blockchain "{}"'.format(blockchain)) |
<SYSTEM_TASK:>
Set the current context to that given.
<END_TASK>
<USER_TASK:>
Description:
def setcontext(context, _local=local):
"""
Set the current context to that given.
Attributes provided by ``context`` override those in the current
context. If ``context`` doesn't specify a particular attribute,
the attribute from the current context shows through.
""" |
oldcontext = getcontext()
_local.__bigfloat_context__ = oldcontext + context |
<SYSTEM_TASK:>
Apply an MPFR function 'f' to the given arguments 'args', rounding to
<END_TASK>
<USER_TASK:>
Description:
def _apply_function_in_context(cls, f, args, context):
""" Apply an MPFR function 'f' to the given arguments 'args', rounding to
the given context. Returns a new Mpfr object with precision taken from
the current context.
""" |
rounding = context.rounding
bf = mpfr.Mpfr_t.__new__(cls)
mpfr.mpfr_init2(bf, context.precision)
args = (bf,) + args + (rounding,)
ternary = f(*args)
with _temporary_exponent_bounds(context.emin, context.emax):
ternary = mpfr.mpfr_check_range(bf, ternary, rounding)
if context.subnormalize:
# mpfr_subnormalize doesn't set underflow and
# subnormal flags, so we do that ourselves. We choose
# to set the underflow flag for *all* cases where the
# 'after rounding' result is smaller than the smallest
# normal number, even if that result is exact.
# if bf is zero but ternary is nonzero, the underflow
# flag will already have been set by mpfr_check_range;
underflow = (
mpfr.mpfr_number_p(bf) and
not mpfr.mpfr_zero_p(bf) and
mpfr.mpfr_get_exp(bf) < context.precision - 1 + context.emin)
if underflow:
mpfr.mpfr_set_underflow()
ternary = mpfr.mpfr_subnormalize(bf, ternary, rounding)
if ternary:
mpfr.mpfr_set_inexflag()
return bf |
<SYSTEM_TASK:>
Get the absolute path to the config file.
<END_TASK>
<USER_TASK:>
Description:
def get_config_filename(impl, working_dir):
"""
Get the absolute path to the config file.
""" |
config_filename = impl.get_virtual_chain_name() + ".ini"
return os.path.join(working_dir, config_filename) |
<SYSTEM_TASK:>
Get the absolute path to the last-block file.
<END_TASK>
<USER_TASK:>
Description:
def get_db_filename(impl, working_dir):
"""
Get the absolute path to the last-block file.
""" |
db_filename = impl.get_virtual_chain_name() + ".db"
return os.path.join(working_dir, db_filename) |
<SYSTEM_TASK:>
Get the absolute path to the chain's consensus snapshots file.
<END_TASK>
<USER_TASK:>
Description:
def get_snapshots_filename(impl, working_dir):
"""
Get the absolute path to the chain's consensus snapshots file.
""" |
snapshots_filename = impl.get_virtual_chain_name() + ".snapshots"
return os.path.join(working_dir, snapshots_filename) |
<SYSTEM_TASK:>
Get the absolute path to the chain's indexing lockfile
<END_TASK>
<USER_TASK:>
Description:
def get_lockfile_filename(impl, working_dir):
"""
Get the absolute path to the chain's indexing lockfile
""" |
lockfile_name = impl.get_virtual_chain_name() + ".lock"
return os.path.join(working_dir, lockfile_name) |
<SYSTEM_TASK:>
Set bitcoind options globally.
<END_TASK>
<USER_TASK:>
Description:
def get_bitcoind_config(config_file=None, impl=None):
"""
Set bitcoind options globally.
Call this before trying to talk to bitcoind.
""" |
loaded = False
bitcoind_server = None
bitcoind_port = None
bitcoind_user = None
bitcoind_passwd = None
bitcoind_timeout = None
bitcoind_regtest = None
bitcoind_p2p_port = None
bitcoind_spv_path = None
regtest = None
if config_file is not None:
parser = SafeConfigParser()
parser.read(config_file)
if parser.has_section('bitcoind'):
if parser.has_option('bitcoind', 'server'):
bitcoind_server = parser.get('bitcoind', 'server')
if parser.has_option('bitcoind', 'port'):
bitcoind_port = int(parser.get('bitcoind', 'port'))
if parser.has_option('bitcoind', 'p2p_port'):
bitcoind_p2p_port = int(parser.get('bitcoind', 'p2p_port'))
if parser.has_option('bitcoind', 'user'):
bitcoind_user = parser.get('bitcoind', 'user')
if parser.has_option('bitcoind', 'passwd'):
bitcoind_passwd = parser.get('bitcoind', 'passwd')
if parser.has_option('bitcoind', 'spv_path'):
bitcoind_spv_path = parser.get('bitcoind', 'spv_path')
if parser.has_option('bitcoind', 'regtest'):
regtest = parser.get('bitcoind', 'regtest')
else:
regtest = 'no'
if parser.has_option('bitcoind', 'timeout'):
bitcoind_timeout = float(parser.get('bitcoind', 'timeout'))
if regtest.lower() in ["yes", "y", "true", "1", "on"]:
bitcoind_regtest = True
else:
bitcoind_regtest = False
loaded = True
if not loaded:
bitcoind_server = 'bitcoin.blockstack.com'
bitcoind_port = 8332
bitcoind_user = 'blockstack'
bitcoind_passwd = 'blockstacksystem'
bitcoind_regtest = False
bitcoind_timeout = 300
bitcoind_p2p_port = 8333
bitcoind_spv_path = os.path.expanduser("~/.virtualchain-spv-headers.dat")
default_bitcoin_opts = {
"bitcoind_user": bitcoind_user,
"bitcoind_passwd": bitcoind_passwd,
"bitcoind_server": bitcoind_server,
"bitcoind_port": bitcoind_port,
"bitcoind_timeout": bitcoind_timeout,
"bitcoind_regtest": bitcoind_regtest,
"bitcoind_p2p_port": bitcoind_p2p_port,
"bitcoind_spv_path": bitcoind_spv_path
}
return default_bitcoin_opts |
<SYSTEM_TASK:>
Returns True if all child date elements present are empty
<END_TASK>
<USER_TASK:>
Description:
def is_empty(self):
"""Returns True if all child date elements present are empty
and other nodes are not set. Returns False if any child date
elements are not empty or other nodes are set.""" |
return all(date.is_empty() for date in [self.created, self.issued]) \
and not self.publisher |
<SYSTEM_TASK:>
Backwards-compatibility for 0.14 and later
<END_TASK>
<USER_TASK:>
Description:
def getinfo(self):
"""
Backwards-compatibility for 0.14 and later
""" |
try:
old_getinfo = AuthServiceProxy(self.__service_url, 'getinfo', self.__timeout, self.__conn, True)
res = old_getinfo()
if 'error' not in res:
# 0.13 and earlier
return res
except JSONRPCException:
pass
network_info = self.getnetworkinfo()
blockchain_info = self.getblockchaininfo()
try:
wallet_info = self.getwalletinfo()
except:
wallet_info = {
'walletversion': None,
'balance': None,
'keypoololdest': None,
'keypoolsize': None,
'paytxfee': None,
}
res = {
'version': network_info['version'],
'protocolversion': network_info['protocolversion'],
'walletversion': wallet_info['walletversion'],
'balance': wallet_info['balance'],
'blocks': blockchain_info['blocks'],
'timeoffset': network_info['timeoffset'],
'connections': network_info['connections'],
'proxy': network_info['networks'],
'difficulty': blockchain_info['difficulty'],
'testnet': blockchain_info['chain'] == 'testnet',
'keypoololdest': wallet_info['keypoololdest'],
'keypoolsize': wallet_info['keypoolsize'],
'paytxfee': wallet_info['paytxfee'],
'errors': network_info['warnings'],
}
for k in ['unlocked_until', 'relayfee', 'paytxfee']:
if wallet_info.has_key(k):
res[k] = wallet_info[k]
return res |
<SYSTEM_TASK:>
Make a P2SH address from a hex script
<END_TASK>
<USER_TASK:>
Description:
def btc_make_p2sh_address( script_hex ):
"""
Make a P2SH address from a hex script
""" |
h = hashing.bin_hash160(binascii.unhexlify(script_hex))
addr = bin_hash160_to_address(h, version_byte=multisig_version_byte)
return addr |
<SYSTEM_TASK:>
Make a p2wpkh address from a hex pubkey
<END_TASK>
<USER_TASK:>
Description:
def btc_make_p2wpkh_address( pubkey_hex ):
"""
Make a p2wpkh address from a hex pubkey
""" |
pubkey_hex = keylib.key_formatting.compress(pubkey_hex)
hash160_bin = hashing.bin_hash160(pubkey_hex.decode('hex'))
return segwit_addr_encode(hash160_bin) |
<SYSTEM_TASK:>
Make the redeem script for a p2sh-p2wpkh witness script
<END_TASK>
<USER_TASK:>
Description:
def btc_make_p2sh_p2wpkh_redeem_script( pubkey_hex ):
"""
Make the redeem script for a p2sh-p2wpkh witness script
""" |
pubkey_hash = hashing.bin_hash160(pubkey_hex.decode('hex')).encode('hex')
redeem_script = btc_script_serialize(['0014' + pubkey_hash])
return redeem_script |
<SYSTEM_TASK:>
Make the redeem script for a p2sh-p2wsh witness script
<END_TASK>
<USER_TASK:>
Description:
def btc_make_p2sh_p2wsh_redeem_script( witness_script_hex ):
"""
Make the redeem script for a p2sh-p2wsh witness script
""" |
witness_script_hash = hashing.bin_sha256(witness_script_hex.decode('hex')).encode('hex')
redeem_script = btc_script_serialize(['0020' + witness_script_hash])
return redeem_script |
<SYSTEM_TASK:>
Is the given address a p2sh address?
<END_TASK>
<USER_TASK:>
Description:
def btc_is_p2sh_address( address ):
"""
Is the given address a p2sh address?
""" |
vb = keylib.b58check.b58check_version_byte( address )
if vb == multisig_version_byte:
return True
else:
return False |
<SYSTEM_TASK:>
Is the given address a p2pkh address?
<END_TASK>
<USER_TASK:>
Description:
def btc_is_p2pkh_address( address ):
"""
Is the given address a p2pkh address?
""" |
vb = keylib.b58check.b58check_version_byte( address )
if vb == version_byte:
return True
else:
return False |
<SYSTEM_TASK:>
Is the given address a p2wpkh address?
<END_TASK>
<USER_TASK:>
Description:
def btc_is_p2wpkh_address( address ):
"""
Is the given address a p2wpkh address?
""" |
wver, whash = segwit_addr_decode(address)
if whash is None:
return False
if len(whash) != 20:
return False
return True |
<SYSTEM_TASK:>
Is the given address a p2wsh address?
<END_TASK>
<USER_TASK:>
Description:
def btc_is_p2wsh_address( address ):
"""
Is the given address a p2wsh address?
""" |
wver, whash = segwit_addr_decode(address)
if whash is None:
return False
if len(whash) != 32:
return False
return True |
<SYSTEM_TASK:>
Is the given scriptpubkey a p2sh script?
<END_TASK>
<USER_TASK:>
Description:
def btc_is_p2sh_script( script_hex ):
"""
Is the given scriptpubkey a p2sh script?
""" |
if script_hex.startswith("a914") and script_hex.endswith("87") and len(script_hex) == 46:
return True
else:
return False |
<SYSTEM_TASK:>
Depending on whether or not we're in testnet
<END_TASK>
<USER_TASK:>
Description:
def btc_address_reencode( address, **blockchain_opts ):
"""
Depending on whether or not we're in testnet
or mainnet, re-encode an address accordingly.
""" |
# re-encode bitcoin address
network = blockchain_opts.get('network', None)
opt_version_byte = blockchain_opts.get('version_byte', None)
if btc_is_segwit_address(address):
# bech32 address
hrp = None
if network == 'mainnet':
hrp = 'bc'
elif network == 'testnet':
hrp = 'tb'
else:
if os.environ.get('BLOCKSTACK_TESTNET') == '1' or os.environ.get('BLOCKSTACK_TESTNET3') == '1':
hrp = 'tb'
else:
hrp = 'bc'
wver, whash = segwit_addr_decode(address)
return segwit_addr_encode(whash, hrp=hrp, witver=wver)
else:
# base58 address
vb = keylib.b58check.b58check_version_byte( address )
if network == 'mainnet':
if vb == 0 or vb == 111:
vb = 0
elif vb == 5 or vb == 196:
vb = 5
else:
raise ValueError("Unrecognized address %s" % address)
elif network == 'testnet':
if vb == 0 or vb == 111:
vb = 111
elif vb == 5 or vb == 196:
vb = 196
else:
raise ValueError("Unrecognized address %s" % address)
else:
if opt_version_byte is not None:
vb = opt_version_byte
elif os.environ.get("BLOCKSTACK_TESTNET") == "1" or os.environ.get("BLOCKSTACK_TESTNET3") == "1":
if vb == 0 or vb == 111:
# convert to testnet p2pkh
vb = 111
elif vb == 5 or vb == 196:
# convert to testnet p2sh
vb = 196
else:
raise ValueError("unrecognized address %s" % address)
else:
if vb == 0 or vb == 111:
# convert to mainnet p2pkh
vb = 0
elif vb == 5 or vb == 196:
# convert to mainnet p2sh
vb = 5
else:
raise ValueError("unrecognized address %s" % address)
return keylib.b58check.b58check_encode( keylib.b58check.b58check_decode(address), vb ) |
<SYSTEM_TASK:>
Is the given key bundle a p2sh-p2wpkh key bundle?
<END_TASK>
<USER_TASK:>
Description:
def btc_is_singlesig_segwit(privkey_info):
"""
Is the given key bundle a p2sh-p2wpkh key bundle?
""" |
try:
jsonschema.validate(privkey_info, PRIVKEY_MULTISIG_SCHEMA)
if len(privkey_info['private_keys']) > 1:
return False
return privkey_info.get('segwit', False)
except ValidationError:
return False |
<SYSTEM_TASK:>
Encode a segwit script hash to a bech32 address.
<END_TASK>
<USER_TASK:>
Description:
def segwit_addr_encode(witprog_bin, hrp=bech32_prefix, witver=bech32_witver):
"""
Encode a segwit script hash to a bech32 address.
Returns the bech32-encoded string on success
""" |
witprog_bytes = [ord(c) for c in witprog_bin]
ret = bech32_encode(hrp, [int(witver)] + convertbits(witprog_bytes, 8, 5))
assert segwit_addr_decode(hrp, ret) is not (None, None)
return ret |
<SYSTEM_TASK:>
format the result of calling 'git describe' as a python version
<END_TASK>
<USER_TASK:>
Description:
def format_git_describe(git_str, pep440=False):
"""format the result of calling 'git describe' as a python version""" |
if git_str is None:
return None
if "-" not in git_str: # currently at a tag
return git_str
else:
# formatted as version-N-githash
# want to convert to version.postN-githash
git_str = git_str.replace("-", ".post", 1)
if pep440: # does not allow git hash afterwards
return git_str.split("-")[0]
else:
return git_str.replace("-g", "+git") |
<SYSTEM_TASK:>
Tracks the version number.
<END_TASK>
<USER_TASK:>
Description:
def get_version(pep440=False):
"""Tracks the version number.
pep440: bool
When True, this function returns a version string suitable for
a release as defined by PEP 440. When False, the githash (if
available) will be appended to the version string.
The file VERSION holds the version information. If this is not a git
repository, then it is reasonable to assume that the version is not
being incremented and the version returned will be the release version as
read from the file.
However, if the script is located within an active git repository,
git-describe is used to get the version information.
The file VERSION will need to be changed by manually. This should be done
before running git tag (set to the same as the version in the tag).
""" |
git_version = format_git_describe(call_git_describe(), pep440=pep440)
if git_version is None: # not a git repository
return read_release_version()
return git_version |
<SYSTEM_TASK:>
Initialize an XmlObject from a string.
<END_TASK>
<USER_TASK:>
Description:
def load_xmlobject_from_string(string, xmlclass=XmlObject, validate=False,
resolver=None):
"""Initialize an XmlObject from a string.
If an xmlclass is specified, construct an instance of that class instead
of :class:`~eulxml.xmlmap.XmlObject`. It should be a subclass of XmlObject.
The constructor will be passed a single node.
If validation is requested and the specified subclass of :class:`XmlObject`
has an XSD_SCHEMA defined, the parser will be configured to validate against
the specified schema. Otherwise, the parser will be configured to use DTD
validation, and expect a Doctype declaration in the xml content.
:param string: xml content to be loaded, as a string
:param xmlclass: subclass of :class:`~eulxml.xmlmap.XmlObject` to initialize
:param validate: boolean, enable validation; defaults to false
:rtype: instance of :class:`~eulxml.xmlmap.XmlObject` requested
""" |
parser = _get_xmlparser(xmlclass=xmlclass, validate=validate, resolver=resolver)
element = etree.fromstring(string, parser)
return xmlclass(element) |
<SYSTEM_TASK:>
Initialize an XmlObject from a file.
<END_TASK>
<USER_TASK:>
Description:
def load_xmlobject_from_file(filename, xmlclass=XmlObject, validate=False,
resolver=None):
"""Initialize an XmlObject from a file.
See :meth:`load_xmlobject_from_string` for more details; behaves exactly the
same, and accepts the same parameters, except that it takes a filename
instead of a string.
:param filename: name of the file that should be loaded as an xmlobject.
:meth:`etree.lxml.parse` will accept a file name/path, a file object, a
file-like object, or an HTTP or FTP url, however file path and URL are
recommended, as they are generally faster for lxml to handle.
""" |
parser = _get_xmlparser(xmlclass=xmlclass, validate=validate, resolver=resolver)
tree = etree.parse(filename, parser)
return xmlclass(tree.getroot()) |
<SYSTEM_TASK:>
Reads a zmat file.
<END_TASK>
<USER_TASK:>
Description:
def read_zmat(cls, inputfile, implicit_index=True):
"""Reads a zmat file.
Lines beginning with ``#`` are ignored.
Args:
inputfile (str):
implicit_index (bool): If this option is true the first column
has to be the element symbols for the atoms.
The row number is used to determine the index.
Returns:
Zmat:
""" |
cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']
if implicit_index:
zmat_frame = pd.read_table(inputfile, comment='#',
delim_whitespace=True,
names=cols)
zmat_frame.index = range(1, len(zmat_frame) + 1)
else:
zmat_frame = pd.read_table(inputfile, comment='#',
delim_whitespace=True,
names=['temp_index'] + cols)
zmat_frame.set_index('temp_index', drop=True, inplace=True)
zmat_frame.index.name = None
if pd.isnull(zmat_frame.iloc[0, 1]):
zmat_values = [1.27, 127., 127.]
zmat_refs = [constants.int_label[x] for x in
['origin', 'e_z', 'e_x']]
for row, i in enumerate(zmat_frame.index[:3]):
cols = ['b', 'a', 'd']
zmat_frame.loc[:, cols] = zmat_frame.loc[:, cols].astype('O')
if row < 2:
zmat_frame.loc[i, cols[row:]] = zmat_refs[row:]
zmat_frame.loc[i, ['bond', 'angle', 'dihedral'][row:]
] = zmat_values[row:]
else:
zmat_frame.loc[i, 'd'] = zmat_refs[2]
zmat_frame.loc[i, 'dihedral'] = zmat_values[2]
elif zmat_frame.iloc[0, 1] in constants.int_label.keys():
zmat_frame = zmat_frame.replace(
{col: constants.int_label for col in ['b', 'a', 'd']})
zmat_frame = cls._cast_correct_types(zmat_frame)
try:
Zmat = cls(zmat_frame)
except InvalidReference:
raise UndefinedCoordinateSystem(
'Your zmatrix cannot be transformed to cartesian coordinates')
return Zmat |
<SYSTEM_TASK:>
Write zmat-file
<END_TASK>
<USER_TASK:>
Description:
def to_zmat(self, buf=None, upper_triangle=True, implicit_index=True,
float_format='{:.6f}'.format, overwrite=True,
header=False):
"""Write zmat-file
Args:
buf (str): StringIO-like, optional buffer to write to
implicit_index (bool): If implicit_index is set, the zmat indexing
is changed to ``range(1, len(self) + 1)``.
Using :meth:`~chemcoord.Zmat.change_numbering`
Besides the index is omitted while writing which means,
that the index is given
implicitly by the row number.
float_format (one-parameter function): Formatter function
to apply to column’s elements if they are floats.
The result of this function must be a unicode string.
overwrite (bool): May overwrite existing files.
Returns:
formatted : string (or unicode, depending on data and options)
""" |
out = self.copy()
if implicit_index:
out = out.change_numbering(new_index=range(1, len(self) + 1))
if not upper_triangle:
out = out._remove_upper_triangle()
output = out.to_string(index=(not implicit_index),
float_format=float_format, header=header)
if buf is not None:
if overwrite:
with open(buf, mode='w') as f:
f.write(output)
else:
with open(buf, mode='x') as f:
f.write(output)
else:
return output |
<SYSTEM_TASK:>
Write xyz-file
<END_TASK>
<USER_TASK:>
Description:
def to_xyz(self, buf=None, sort_index=True,
index=False, header=False, float_format='{:.6f}'.format,
overwrite=True):
"""Write xyz-file
Args:
buf (str): StringIO-like, optional buffer to write to
sort_index (bool): If sort_index is true, the
:class:`~chemcoord.Cartesian`
is sorted by the index before writing.
float_format (one-parameter function): Formatter function
to apply to column’s elements if they are floats.
The result of this function must be a unicode string.
overwrite (bool): May overwrite existing files.
Returns:
formatted : string (or unicode, depending on data and options)
""" |
if sort_index:
molecule_string = self.sort_index().to_string(
header=header, index=index, float_format=float_format)
else:
molecule_string = self.to_string(header=header, index=index,
float_format=float_format)
# NOTE the following might be removed in the future
# introduced because of formatting bug in pandas
# See https://github.com/pandas-dev/pandas/issues/13032
space = ' ' * (self.loc[:, 'atom'].str.len().max()
- len(self.iloc[0, 0]))
output = '{n}\n{message}\n{alignment}{frame_string}'.format(
n=len(self), alignment=space, frame_string=molecule_string,
message='Created by chemcoord http://chemcoord.readthedocs.io/')
if buf is not None:
if overwrite:
with open(buf, mode='w') as f:
f.write(output)
else:
with open(buf, mode='x') as f:
f.write(output)
else:
return output |
<SYSTEM_TASK:>
Read a file of coordinate information.
<END_TASK>
<USER_TASK:>
Description:
def read_xyz(cls, buf, start_index=0, get_bonds=True,
nrows=None, engine=None):
"""Read a file of coordinate information.
Reads xyz-files.
Args:
inputfile (str):
start_index (int):
get_bonds (bool):
nrows (int): Number of rows of file to read.
Note that the first two rows are implicitly excluded.
engine (str): Wrapper for argument of :func:`pandas.read_csv`.
Returns:
Cartesian:
""" |
frame = pd.read_table(buf, skiprows=2, comment='#',
nrows=nrows,
delim_whitespace=True,
names=['atom', 'x', 'y', 'z'], engine=engine)
remove_digits = partial(re.sub, r'[0-9]+', '')
frame['atom'] = frame['atom'].apply(remove_digits)
molecule = cls(frame)
molecule.index = range(start_index, start_index + len(molecule))
if get_bonds:
molecule.get_bonds(use_lookup=False, set_lookup=True)
return molecule |
<SYSTEM_TASK:>
Write a cjson file or return dictionary.
<END_TASK>
<USER_TASK:>
Description:
def to_cjson(self, buf=None, **kwargs):
"""Write a cjson file or return dictionary.
The cjson format is specified
`here <https://github.com/OpenChemistry/chemicaljson>`_.
Args:
buf (str): If it is a filepath, the data is written to
filepath. If it is None, a dictionary with the cjson
information is returned.
kwargs: The keyword arguments are passed into the
``dump`` function of the
`json library <https://docs.python.org/3/library/json.html>`_.
Returns:
dict:
""" |
cjson_dict = {'chemical json': 0}
cjson_dict['atoms'] = {}
atomic_number = constants.elements['atomic_number'].to_dict()
cjson_dict['atoms'] = {'elements': {}}
cjson_dict['atoms']['elements']['number'] = [
int(atomic_number[x]) for x in self['atom']]
cjson_dict['atoms']['coords'] = {}
coords = self.loc[:, ['x', 'y', 'z']].values.reshape(len(self) * 3)
cjson_dict['atoms']['coords']['3d'] = [float(x) for x in coords]
bonds = []
bond_dict = self.get_bonds()
for i in bond_dict:
for b in bond_dict[i]:
bonds += [int(i), int(b)]
bond_dict[b].remove(i)
cjson_dict['bonds'] = {'connections': {}}
cjson_dict['bonds']['connections']['index'] = bonds
if buf is not None:
with open(buf, mode='w') as f:
f.write(json.dumps(cjson_dict, **kwargs))
else:
return cjson_dict |
<SYSTEM_TASK:>
Read a cjson file or a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def read_cjson(cls, buf):
"""Read a cjson file or a dictionary.
The cjson format is specified
`here <https://github.com/OpenChemistry/chemicaljson>`_.
Args:
buf (str, dict): If it is a filepath, the data is read from
filepath. If it is a dictionary, the dictionary is interpreted
as cjson.
Returns:
Cartesian:
""" |
if isinstance(buf, dict):
data = buf.copy()
else:
with open(buf, 'r') as f:
data = json.load(f)
assert data['chemical json'] == 0
n_atoms = len(data['atoms']['coords']['3d'])
metadata = {}
_metadata = {}
coords = np.array(
data['atoms']['coords']['3d']).reshape((n_atoms // 3, 3))
atomic_number = constants.elements['atomic_number']
elements = [dict(zip(atomic_number, atomic_number.index))[x]
for x in data['atoms']['elements']['number']]
try:
connections = data['bonds']['connections']['index']
except KeyError:
pass
else:
bond_dict = defaultdict(set)
for i, b in zip(connections[::2], connections[1::2]):
bond_dict[i].add(b)
bond_dict[b].add(i)
_metadata['bond_dict'] = dict(bond_dict)
try:
metadata.update(data['properties'])
except KeyError:
pass
out = cls(atoms=elements, coords=coords, _metadata=_metadata,
metadata=metadata)
return out |
<SYSTEM_TASK:>
View your molecule.
<END_TASK>
<USER_TASK:>
Description:
def view(self, viewer=None, use_curr_dir=False):
"""View your molecule.
.. note:: This function writes a temporary file and opens it with
an external viewer.
If you modify your molecule afterwards you have to recall view
in order to see the changes.
Args:
viewer (str): The external viewer to use. If it is None,
the default as specified in cc.settings['defaults']['viewer']
is used.
use_curr_dir (bool): If True, the temporary file is written to
the current diretory. Otherwise it gets written to the
OS dependendent temporary directory.
Returns:
None:
""" |
if viewer is None:
viewer = settings['defaults']['viewer']
if use_curr_dir:
TEMP_DIR = os.path.curdir
else:
TEMP_DIR = tempfile.gettempdir()
def give_filename(i):
filename = 'ChemCoord_' + str(i) + '.xyz'
return os.path.join(TEMP_DIR, filename)
i = 1
while os.path.exists(give_filename(i)):
i = i + 1
self.to_xyz(give_filename(i))
def open_file(i):
"""Open file and close after being finished."""
try:
subprocess.check_call([viewer, give_filename(i)])
except (subprocess.CalledProcessError, FileNotFoundError):
raise
finally:
if use_curr_dir:
pass
else:
os.remove(give_filename(i))
Thread(target=open_file, args=(i,)).start() |
<SYSTEM_TASK:>
Create a Molecule instance of the pymatgen library
<END_TASK>
<USER_TASK:>
Description:
def get_pymatgen_molecule(self):
"""Create a Molecule instance of the pymatgen library
.. warning:: The `pymatgen library <http://pymatgen.org>`_ is imported
locally in this function and will raise
an ``ImportError`` exception, if it is not installed.
Args:
None
Returns:
:class:`pymatgen.core.structure.Molecule`:
""" |
from pymatgen import Molecule
return Molecule(self['atom'].values,
self.loc[:, ['x', 'y', 'z']].values) |
<SYSTEM_TASK:>
Create an instance of the own class from a pymatgen molecule
<END_TASK>
<USER_TASK:>
Description:
def from_pymatgen_molecule(cls, molecule):
"""Create an instance of the own class from a pymatgen molecule
Args:
molecule (:class:`pymatgen.core.structure.Molecule`):
Returns:
Cartesian:
""" |
new = cls(atoms=[el.value for el in molecule.species],
coords=molecule.cart_coords)
return new._to_numeric() |
<SYSTEM_TASK:>
Create an instance of the own class from an ase molecule
<END_TASK>
<USER_TASK:>
Description:
def from_ase_atoms(cls, atoms):
"""Create an instance of the own class from an ase molecule
Args:
molecule (:class:`ase.atoms.Atoms`):
Returns:
Cartesian:
""" |
return cls(atoms=atoms.get_chemical_symbols(), coords=atoms.positions) |
<SYSTEM_TASK:>
Returns a PointGroup object for the molecule.
<END_TASK>
<USER_TASK:>
Description:
def get_pointgroup(self, tolerance=0.3):
"""Returns a PointGroup object for the molecule.
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
:class:`~PointGroupOperations`
""" |
PA = self._get_point_group_analyzer(tolerance=tolerance)
return PointGroupOperations(PA.sch_symbol, PA.symmops) |
<SYSTEM_TASK:>
Returns sets of equivalent atoms with symmetry operations
<END_TASK>
<USER_TASK:>
Description:
def get_equivalent_atoms(self, tolerance=0.3):
"""Returns sets of equivalent atoms with symmetry operations
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
""" |
PA = self._get_point_group_analyzer(tolerance=tolerance)
eq = PA.get_equivalent_atoms()
self._convert_eq(eq)
return eq |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.