repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
pyviz/imagen | imagen/patternfn.py | smooth_rectangle | def smooth_rectangle(x, y, rec_w, rec_h, gaussian_width_x, gaussian_width_y):
"""
Rectangle with a solid central region, then Gaussian fall-off at the edges.
"""
gaussian_x_coord = abs(x)-rec_w/2.0
gaussian_y_coord = abs(y)-rec_h/2.0
box_x=np.less(gaussian_x_coord,0.0)
box_y=np.less(gaussian_y_coord,0.0)
sigmasq_x=gaussian_width_x*gaussian_width_x
sigmasq_y=gaussian_width_y*gaussian_width_y
with float_error_ignore():
falloff_x=x*0.0 if sigmasq_x==0.0 else \
np.exp(np.divide(-gaussian_x_coord*gaussian_x_coord,2*sigmasq_x))
falloff_y=y*0.0 if sigmasq_y==0.0 else \
np.exp(np.divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq_y))
return np.minimum(np.maximum(box_x,falloff_x), np.maximum(box_y,falloff_y)) | python | def smooth_rectangle(x, y, rec_w, rec_h, gaussian_width_x, gaussian_width_y):
"""
Rectangle with a solid central region, then Gaussian fall-off at the edges.
"""
gaussian_x_coord = abs(x)-rec_w/2.0
gaussian_y_coord = abs(y)-rec_h/2.0
box_x=np.less(gaussian_x_coord,0.0)
box_y=np.less(gaussian_y_coord,0.0)
sigmasq_x=gaussian_width_x*gaussian_width_x
sigmasq_y=gaussian_width_y*gaussian_width_y
with float_error_ignore():
falloff_x=x*0.0 if sigmasq_x==0.0 else \
np.exp(np.divide(-gaussian_x_coord*gaussian_x_coord,2*sigmasq_x))
falloff_y=y*0.0 if sigmasq_y==0.0 else \
np.exp(np.divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq_y))
return np.minimum(np.maximum(box_x,falloff_x), np.maximum(box_y,falloff_y)) | [
"def",
"smooth_rectangle",
"(",
"x",
",",
"y",
",",
"rec_w",
",",
"rec_h",
",",
"gaussian_width_x",
",",
"gaussian_width_y",
")",
":",
"gaussian_x_coord",
"=",
"abs",
"(",
"x",
")",
"-",
"rec_w",
"/",
"2.0",
"gaussian_y_coord",
"=",
"abs",
"(",
"y",
")",
"-",
"rec_h",
"/",
"2.0",
"box_x",
"=",
"np",
".",
"less",
"(",
"gaussian_x_coord",
",",
"0.0",
")",
"box_y",
"=",
"np",
".",
"less",
"(",
"gaussian_y_coord",
",",
"0.0",
")",
"sigmasq_x",
"=",
"gaussian_width_x",
"*",
"gaussian_width_x",
"sigmasq_y",
"=",
"gaussian_width_y",
"*",
"gaussian_width_y",
"with",
"float_error_ignore",
"(",
")",
":",
"falloff_x",
"=",
"x",
"*",
"0.0",
"if",
"sigmasq_x",
"==",
"0.0",
"else",
"np",
".",
"exp",
"(",
"np",
".",
"divide",
"(",
"-",
"gaussian_x_coord",
"*",
"gaussian_x_coord",
",",
"2",
"*",
"sigmasq_x",
")",
")",
"falloff_y",
"=",
"y",
"*",
"0.0",
"if",
"sigmasq_y",
"==",
"0.0",
"else",
"np",
".",
"exp",
"(",
"np",
".",
"divide",
"(",
"-",
"gaussian_y_coord",
"*",
"gaussian_y_coord",
",",
"2",
"*",
"sigmasq_y",
")",
")",
"return",
"np",
".",
"minimum",
"(",
"np",
".",
"maximum",
"(",
"box_x",
",",
"falloff_x",
")",
",",
"np",
".",
"maximum",
"(",
"box_y",
",",
"falloff_y",
")",
")"
] | Rectangle with a solid central region, then Gaussian fall-off at the edges. | [
"Rectangle",
"with",
"a",
"solid",
"central",
"region",
"then",
"Gaussian",
"fall",
"-",
"off",
"at",
"the",
"edges",
"."
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patternfn.py#L178-L197 | train |
bskinn/opan | opan/utils/base.py | pack_tups | def pack_tups(*args):
"""Pack an arbitrary set of iterables and non-iterables into tuples.
Function packs a set of inputs with arbitrary iterability into tuples.
Iterability is tested with :func:`iterable`. Non-iterable inputs
are repeated in each output tuple. Iterable inputs are expanded
uniformly across the output tuples. For consistency, all iterables must
be the same length.
The input arguments are parsed such that bare strings are treated as
**NON-ITERABLE**, through the use of a local subclass of |str| that
cripples the ``__iter__()`` method. Any strings passed are returned
in the packed tuples as standard, **ITERABLE** instances of |str|, however.
The order of the input arguments is retained within each output tuple.
No structural conversion is attempted on the arguments.
If all inputs are non-iterable, a list containing a single |tuple| will be
returned.
Parameters
----------
\*args
Arbitrary number of arbitrary mix of iterable and non-iterable
objects to be packed into tuples.
Returns
-------
tups
|list| of |tuple| --
Number of tuples returned is equal to the length of the iterables
passed in `*args`
Raises
------
~exceptions.ValueError
If any iterable objects are of different lengths
"""
# Imports
import numpy as np
# Debug flag
_DEBUG = False
# Marker value for non-iterable items
NOT_ITER = -1
# Uninitialized test value
UNINIT_VAL = -1
# Print the input if in debug mode
if _DEBUG: # pragma: no cover
print("args = {0}".format(args))
# Non-iterable subclass of str
class StrNoIter(str):
""" Non-iterable subclass of |str|. """
def __iter__(self):
raise NotImplementedError("Non-iterable string")
## end def __iter__
## end class StrNoIter
# Re-wrap input arguments with non-iterable strings if required
mod_args = [(StrNoIter(a) if isinstance(a, str) else a) for a in args]
# Determine the length or non-iterable status of each item and store
# the maximum value (depends on NOT_ITER < 0)
iterlens = [(len(a) if iterable(a) else NOT_ITER) for a in mod_args]
maxiter = max(iterlens)
# Check to ensure all iterables are the same length
if not all(map(lambda v: v in (NOT_ITER, maxiter), iterlens)):
raise ValueError("All iterable items must be of equal length")
## end if
# If everything is non-iterable, just return the args tuple wrapped in
# a list (as above, depends on NOT_ITER < 0)
if maxiter == NOT_ITER:
return [args]
## end if
# Swap any non-iterables for a suitable length repeat, and zip to
# tuples for return
tups = list(zip(*[(np.repeat(a, maxiter) if l == NOT_ITER else a)
for (a,l) in zip(mod_args, iterlens)]))
# Dump the resulting tuples, if in debug mode
if _DEBUG: # pragma: no cover
print("tups = {0}".format(tups))
## end if
# Return the tuples
return tups | python | def pack_tups(*args):
"""Pack an arbitrary set of iterables and non-iterables into tuples.
Function packs a set of inputs with arbitrary iterability into tuples.
Iterability is tested with :func:`iterable`. Non-iterable inputs
are repeated in each output tuple. Iterable inputs are expanded
uniformly across the output tuples. For consistency, all iterables must
be the same length.
The input arguments are parsed such that bare strings are treated as
**NON-ITERABLE**, through the use of a local subclass of |str| that
cripples the ``__iter__()`` method. Any strings passed are returned
in the packed tuples as standard, **ITERABLE** instances of |str|, however.
The order of the input arguments is retained within each output tuple.
No structural conversion is attempted on the arguments.
If all inputs are non-iterable, a list containing a single |tuple| will be
returned.
Parameters
----------
\*args
Arbitrary number of arbitrary mix of iterable and non-iterable
objects to be packed into tuples.
Returns
-------
tups
|list| of |tuple| --
Number of tuples returned is equal to the length of the iterables
passed in `*args`
Raises
------
~exceptions.ValueError
If any iterable objects are of different lengths
"""
# Imports
import numpy as np
# Debug flag
_DEBUG = False
# Marker value for non-iterable items
NOT_ITER = -1
# Uninitialized test value
UNINIT_VAL = -1
# Print the input if in debug mode
if _DEBUG: # pragma: no cover
print("args = {0}".format(args))
# Non-iterable subclass of str
class StrNoIter(str):
""" Non-iterable subclass of |str|. """
def __iter__(self):
raise NotImplementedError("Non-iterable string")
## end def __iter__
## end class StrNoIter
# Re-wrap input arguments with non-iterable strings if required
mod_args = [(StrNoIter(a) if isinstance(a, str) else a) for a in args]
# Determine the length or non-iterable status of each item and store
# the maximum value (depends on NOT_ITER < 0)
iterlens = [(len(a) if iterable(a) else NOT_ITER) for a in mod_args]
maxiter = max(iterlens)
# Check to ensure all iterables are the same length
if not all(map(lambda v: v in (NOT_ITER, maxiter), iterlens)):
raise ValueError("All iterable items must be of equal length")
## end if
# If everything is non-iterable, just return the args tuple wrapped in
# a list (as above, depends on NOT_ITER < 0)
if maxiter == NOT_ITER:
return [args]
## end if
# Swap any non-iterables for a suitable length repeat, and zip to
# tuples for return
tups = list(zip(*[(np.repeat(a, maxiter) if l == NOT_ITER else a)
for (a,l) in zip(mod_args, iterlens)]))
# Dump the resulting tuples, if in debug mode
if _DEBUG: # pragma: no cover
print("tups = {0}".format(tups))
## end if
# Return the tuples
return tups | [
"def",
"pack_tups",
"(",
"*",
"args",
")",
":",
"import",
"numpy",
"as",
"np",
"_DEBUG",
"=",
"False",
"NOT_ITER",
"=",
"-",
"1",
"UNINIT_VAL",
"=",
"-",
"1",
"if",
"_DEBUG",
":",
"print",
"(",
"\"args = {0}\"",
".",
"format",
"(",
"args",
")",
")",
"class",
"StrNoIter",
"(",
"str",
")",
":",
"def",
"__iter__",
"(",
"self",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Non-iterable string\"",
")",
"mod_args",
"=",
"[",
"(",
"StrNoIter",
"(",
"a",
")",
"if",
"isinstance",
"(",
"a",
",",
"str",
")",
"else",
"a",
")",
"for",
"a",
"in",
"args",
"]",
"iterlens",
"=",
"[",
"(",
"len",
"(",
"a",
")",
"if",
"iterable",
"(",
"a",
")",
"else",
"NOT_ITER",
")",
"for",
"a",
"in",
"mod_args",
"]",
"maxiter",
"=",
"max",
"(",
"iterlens",
")",
"if",
"not",
"all",
"(",
"map",
"(",
"lambda",
"v",
":",
"v",
"in",
"(",
"NOT_ITER",
",",
"maxiter",
")",
",",
"iterlens",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"All iterable items must be of equal length\"",
")",
"if",
"maxiter",
"==",
"NOT_ITER",
":",
"return",
"[",
"args",
"]",
"tups",
"=",
"list",
"(",
"zip",
"(",
"*",
"[",
"(",
"np",
".",
"repeat",
"(",
"a",
",",
"maxiter",
")",
"if",
"l",
"==",
"NOT_ITER",
"else",
"a",
")",
"for",
"(",
"a",
",",
"l",
")",
"in",
"zip",
"(",
"mod_args",
",",
"iterlens",
")",
"]",
")",
")",
"if",
"_DEBUG",
":",
"print",
"(",
"\"tups = {0}\"",
".",
"format",
"(",
"tups",
")",
")",
"return",
"tups"
] | Pack an arbitrary set of iterables and non-iterables into tuples.
Function packs a set of inputs with arbitrary iterability into tuples.
Iterability is tested with :func:`iterable`. Non-iterable inputs
are repeated in each output tuple. Iterable inputs are expanded
uniformly across the output tuples. For consistency, all iterables must
be the same length.
The input arguments are parsed such that bare strings are treated as
**NON-ITERABLE**, through the use of a local subclass of |str| that
cripples the ``__iter__()`` method. Any strings passed are returned
in the packed tuples as standard, **ITERABLE** instances of |str|, however.
The order of the input arguments is retained within each output tuple.
No structural conversion is attempted on the arguments.
If all inputs are non-iterable, a list containing a single |tuple| will be
returned.
Parameters
----------
\*args
Arbitrary number of arbitrary mix of iterable and non-iterable
objects to be packed into tuples.
Returns
-------
tups
|list| of |tuple| --
Number of tuples returned is equal to the length of the iterables
passed in `*args`
Raises
------
~exceptions.ValueError
If any iterable objects are of different lengths | [
"Pack",
"an",
"arbitrary",
"set",
"of",
"iterables",
"and",
"non",
"-",
"iterables",
"into",
"tuples",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L44-L139 | train |
bskinn/opan | opan/utils/base.py | safe_cast | def safe_cast(invar, totype):
"""Performs a "safe" typecast.
Ensures that `invar` properly casts to `totype`. Checks after
casting that the result is actually of type `totype`. Any exceptions raised
by the typecast itself are unhandled.
Parameters
----------
invar
(arbitrary) -- Value to be typecast.
totype
|type| -- Type to which `invar` is to be cast.
Returns
-------
outvar
`type 'totype'` -- Typecast version of `invar`
Raises
------
~exceptions.TypeError
If result of typecast is not of type `totype`
"""
# Make the typecast. Just use Python built-in exceptioning
outvar = totype(invar)
# Check that the cast type matches
if not isinstance(outvar, totype):
raise TypeError("Result of cast to '{0}' is '{1}'"
.format(totype, type(outvar)))
## end if
# Success; return the cast value
return outvar | python | def safe_cast(invar, totype):
"""Performs a "safe" typecast.
Ensures that `invar` properly casts to `totype`. Checks after
casting that the result is actually of type `totype`. Any exceptions raised
by the typecast itself are unhandled.
Parameters
----------
invar
(arbitrary) -- Value to be typecast.
totype
|type| -- Type to which `invar` is to be cast.
Returns
-------
outvar
`type 'totype'` -- Typecast version of `invar`
Raises
------
~exceptions.TypeError
If result of typecast is not of type `totype`
"""
# Make the typecast. Just use Python built-in exceptioning
outvar = totype(invar)
# Check that the cast type matches
if not isinstance(outvar, totype):
raise TypeError("Result of cast to '{0}' is '{1}'"
.format(totype, type(outvar)))
## end if
# Success; return the cast value
return outvar | [
"def",
"safe_cast",
"(",
"invar",
",",
"totype",
")",
":",
"outvar",
"=",
"totype",
"(",
"invar",
")",
"if",
"not",
"isinstance",
"(",
"outvar",
",",
"totype",
")",
":",
"raise",
"TypeError",
"(",
"\"Result of cast to '{0}' is '{1}'\"",
".",
"format",
"(",
"totype",
",",
"type",
"(",
"outvar",
")",
")",
")",
"return",
"outvar"
] | Performs a "safe" typecast.
Ensures that `invar` properly casts to `totype`. Checks after
casting that the result is actually of type `totype`. Any exceptions raised
by the typecast itself are unhandled.
Parameters
----------
invar
(arbitrary) -- Value to be typecast.
totype
|type| -- Type to which `invar` is to be cast.
Returns
-------
outvar
`type 'totype'` -- Typecast version of `invar`
Raises
------
~exceptions.TypeError
If result of typecast is not of type `totype` | [
"Performs",
"a",
"safe",
"typecast",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L168-L205 | train |
bskinn/opan | opan/utils/base.py | make_timestamp | def make_timestamp(el_time):
""" Generate an hour-minutes-seconds timestamp from an interval in seconds.
Assumes numeric input of a time interval in seconds. Converts this
interval to a string of the format "#h #m #s", indicating the number of
hours, minutes, and seconds in the interval. Intervals greater than 24h
are unproblematic.
Parameters
----------
el_time
|int| or |float| --
Time interval in seconds to be converted to h/m/s format
Returns
-------
stamp
|str| -- String timestamp in #h #m #s format
"""
# Calc hours
hrs = el_time // 3600.0
# Calc minutes
mins = (el_time % 3600.0) // 60.0
# Calc seconds
secs = el_time % 60.0
# Construct timestamp string
stamp = "{0}h {1}m {2}s".format(int(hrs), int(mins), int(secs))
# Return
return stamp | python | def make_timestamp(el_time):
""" Generate an hour-minutes-seconds timestamp from an interval in seconds.
Assumes numeric input of a time interval in seconds. Converts this
interval to a string of the format "#h #m #s", indicating the number of
hours, minutes, and seconds in the interval. Intervals greater than 24h
are unproblematic.
Parameters
----------
el_time
|int| or |float| --
Time interval in seconds to be converted to h/m/s format
Returns
-------
stamp
|str| -- String timestamp in #h #m #s format
"""
# Calc hours
hrs = el_time // 3600.0
# Calc minutes
mins = (el_time % 3600.0) // 60.0
# Calc seconds
secs = el_time % 60.0
# Construct timestamp string
stamp = "{0}h {1}m {2}s".format(int(hrs), int(mins), int(secs))
# Return
return stamp | [
"def",
"make_timestamp",
"(",
"el_time",
")",
":",
"hrs",
"=",
"el_time",
"//",
"3600.0",
"mins",
"=",
"(",
"el_time",
"%",
"3600.0",
")",
"//",
"60.0",
"secs",
"=",
"el_time",
"%",
"60.0",
"stamp",
"=",
"\"{0}h {1}m {2}s\"",
".",
"format",
"(",
"int",
"(",
"hrs",
")",
",",
"int",
"(",
"mins",
")",
",",
"int",
"(",
"secs",
")",
")",
"return",
"stamp"
] | Generate an hour-minutes-seconds timestamp from an interval in seconds.
Assumes numeric input of a time interval in seconds. Converts this
interval to a string of the format "#h #m #s", indicating the number of
hours, minutes, and seconds in the interval. Intervals greater than 24h
are unproblematic.
Parameters
----------
el_time
|int| or |float| --
Time interval in seconds to be converted to h/m/s format
Returns
-------
stamp
|str| -- String timestamp in #h #m #s format | [
"Generate",
"an",
"hour",
"-",
"minutes",
"-",
"seconds",
"timestamp",
"from",
"an",
"interval",
"in",
"seconds",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L210-L244 | train |
bskinn/opan | opan/utils/base.py | check_geom | def check_geom(c1, a1, c2, a2, tol=_DEF.XYZ_COORD_MATCH_TOL):
""" Check for consistency of two geometries and atom symbol lists
Cartesian coordinates are considered consistent with the input
coords if each component matches to within `tol`. If coords or
atoms vectors are passed that are of mismatched lengths, a
|False| value is returned.
Both coords vectors must be three times the length of the atoms vectors
or a :exc:`~exceptions.ValueError` is raised.
Parameters
----------
c1
length-3N |npfloat_| --
Vector of first set of stacked 'lab-frame' Cartesian coordinates
a1
length-N |str| or |int| --
Vector of first set of atom symbols or atomic numbers
c2
length-3N |npfloat_| --
Vector of second set of stacked 'lab-frame' Cartesian coordinates
a2
length-N |str| or |int| --
Vector of second set of atom symbols or atomic numbers
tol
|float|, optional --
Tolerance for acceptable deviation of each geometry coordinate
from that in the reference instance to still be considered
matching. Default value is specified by
:attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`)
Returns
-------
match
|bool| --
Whether input coords and atoms match (|True|) or
not (|False|)
fail_type
:class:`~opan.const.EnumCheckGeomMismatch` or |None|
-- Type of check failure
If `match` == |True|:
Returns as |None|
If `match` == |False|:
An :class:`~opan.const.EnumCheckGeomMismatch` value
indicating the reason for the failed match:
:attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION`
-- Mismatch in geometry size (number of atoms)
:attr:`~opan.const.EnumCheckGeomMismatch.COORDS`
-- Mismatch in one or more coordinates
:attr:`~opan.const.EnumCheckGeomMismatch.ATOMS`
-- Mismatch in one or more atoms
fail_loc
length-3N |bool| or length-N |bool| or |None| --
Mismatched elements
If `match` == |True|:
Returns as |None|
If `match` == |False|:
For "array-level" problems such as a dimension mismatch, a
|None| value is returned.
For "element-level" problems, a vector is returned
indicating positions of mismatch in either `coords` or `atoms`,
depending on the value of `fail_type`.
|True| elements indicate **MATCHING** values
|False| elements mark **MISMATCHES**
Raises
------
~exceptions.ValueError
If a pair of coords & atoms array lengths is inconsistent:
.. code-block:: python
if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2):
raise ValueError(...)
"""
# Import(s)
from ..const import atom_num
import numpy as np
from ..const import EnumCheckGeomMismatch as ECGM
# Initialize return value to success condition
match = True
#** Check coords for suitable shape. Assume 1-D np.arrays.
if not len(c1.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c1' is not a vector."))
## end if
if not len(c2.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c2' is not a vector."))
## end if
#** Check atoms for suitable shape. Assume lists of strings, so
# convert to np.array to check.
if not len(a1.shape) == 1:
# Not a vector; complain
raise ValueError(("'a1' is not a simple list."))
## end if
if not len(a2.shape) == 1:
# Not a vector; complain.
raise ValueError(("'a2' is not a simple list."))
## end if
#** Confirm proper lengths of coords vs atoms
if not c1.shape[0] == 3 * a1.shape[0]:
raise ValueError("len(c1) != 3*len(a1)")
## end if
if not c2.shape[0] == 3 * a2.shape[0]:
raise ValueError("len(c2) != 3*len(a2)")
## end if
#** Confirm matching lengths of coords and atoms w/corresponding
# objects among the two geometries
if not c1.shape[0] == c2.shape[0]:
match = False
fail_type = ECGM.DIMENSION
return match, fail_type, None
## end if
#** Element-wise check for geometry match to within 'tol'
fail_loc = np.less_equal(np.abs(np.subtract(c1,c2)), tol)
if sum(fail_loc) != c2.shape[0]:
# Count of matching coordinates should equal the number of
# coordinates. If not, complain with 'coord_mismatch' fail type.
match = False
fail_type = ECGM.COORDS
return match, fail_type, fail_loc
## end if
#** Element-wise check for atoms match. Quietly convert both input and
# instance atom arrays to atom_nums to allow np.equals comparison.
if np.issubdtype(a1.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a1 = np.array([atom_num[e] for e in a1])
## end if
if np.issubdtype(a2.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a2 = np.array([atom_num[e] for e in a2])
## end if
fail_loc = np.equal(a1, a2)
#** Perform the test to ensure all atoms match.
if sum(fail_loc) != a2.shape[0]:
# Count of matching atoms should equal number of atoms. If not,
# complain with the 'atom_mismatch' fail type.
match = False
fail_type = ECGM.ATOMS
return match, fail_type, fail_loc
#** If reached here, all tests passed; return success.
return match, None, None | python | def check_geom(c1, a1, c2, a2, tol=_DEF.XYZ_COORD_MATCH_TOL):
""" Check for consistency of two geometries and atom symbol lists
Cartesian coordinates are considered consistent with the input
coords if each component matches to within `tol`. If coords or
atoms vectors are passed that are of mismatched lengths, a
|False| value is returned.
Both coords vectors must be three times the length of the atoms vectors
or a :exc:`~exceptions.ValueError` is raised.
Parameters
----------
c1
length-3N |npfloat_| --
Vector of first set of stacked 'lab-frame' Cartesian coordinates
a1
length-N |str| or |int| --
Vector of first set of atom symbols or atomic numbers
c2
length-3N |npfloat_| --
Vector of second set of stacked 'lab-frame' Cartesian coordinates
a2
length-N |str| or |int| --
Vector of second set of atom symbols or atomic numbers
tol
|float|, optional --
Tolerance for acceptable deviation of each geometry coordinate
from that in the reference instance to still be considered
matching. Default value is specified by
:attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`)
Returns
-------
match
|bool| --
Whether input coords and atoms match (|True|) or
not (|False|)
fail_type
:class:`~opan.const.EnumCheckGeomMismatch` or |None|
-- Type of check failure
If `match` == |True|:
Returns as |None|
If `match` == |False|:
An :class:`~opan.const.EnumCheckGeomMismatch` value
indicating the reason for the failed match:
:attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION`
-- Mismatch in geometry size (number of atoms)
:attr:`~opan.const.EnumCheckGeomMismatch.COORDS`
-- Mismatch in one or more coordinates
:attr:`~opan.const.EnumCheckGeomMismatch.ATOMS`
-- Mismatch in one or more atoms
fail_loc
length-3N |bool| or length-N |bool| or |None| --
Mismatched elements
If `match` == |True|:
Returns as |None|
If `match` == |False|:
For "array-level" problems such as a dimension mismatch, a
|None| value is returned.
For "element-level" problems, a vector is returned
indicating positions of mismatch in either `coords` or `atoms`,
depending on the value of `fail_type`.
|True| elements indicate **MATCHING** values
|False| elements mark **MISMATCHES**
Raises
------
~exceptions.ValueError
If a pair of coords & atoms array lengths is inconsistent:
.. code-block:: python
if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2):
raise ValueError(...)
"""
# Import(s)
from ..const import atom_num
import numpy as np
from ..const import EnumCheckGeomMismatch as ECGM
# Initialize return value to success condition
match = True
#** Check coords for suitable shape. Assume 1-D np.arrays.
if not len(c1.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c1' is not a vector."))
## end if
if not len(c2.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c2' is not a vector."))
## end if
#** Check atoms for suitable shape. Assume lists of strings, so
# convert to np.array to check.
if not len(a1.shape) == 1:
# Not a vector; complain
raise ValueError(("'a1' is not a simple list."))
## end if
if not len(a2.shape) == 1:
# Not a vector; complain.
raise ValueError(("'a2' is not a simple list."))
## end if
#** Confirm proper lengths of coords vs atoms
if not c1.shape[0] == 3 * a1.shape[0]:
raise ValueError("len(c1) != 3*len(a1)")
## end if
if not c2.shape[0] == 3 * a2.shape[0]:
raise ValueError("len(c2) != 3*len(a2)")
## end if
#** Confirm matching lengths of coords and atoms w/corresponding
# objects among the two geometries
if not c1.shape[0] == c2.shape[0]:
match = False
fail_type = ECGM.DIMENSION
return match, fail_type, None
## end if
#** Element-wise check for geometry match to within 'tol'
fail_loc = np.less_equal(np.abs(np.subtract(c1,c2)), tol)
if sum(fail_loc) != c2.shape[0]:
# Count of matching coordinates should equal the number of
# coordinates. If not, complain with 'coord_mismatch' fail type.
match = False
fail_type = ECGM.COORDS
return match, fail_type, fail_loc
## end if
#** Element-wise check for atoms match. Quietly convert both input and
# instance atom arrays to atom_nums to allow np.equals comparison.
if np.issubdtype(a1.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a1 = np.array([atom_num[e] for e in a1])
## end if
if np.issubdtype(a2.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a2 = np.array([atom_num[e] for e in a2])
## end if
fail_loc = np.equal(a1, a2)
#** Perform the test to ensure all atoms match.
if sum(fail_loc) != a2.shape[0]:
# Count of matching atoms should equal number of atoms. If not,
# complain with the 'atom_mismatch' fail type.
match = False
fail_type = ECGM.ATOMS
return match, fail_type, fail_loc
#** If reached here, all tests passed; return success.
return match, None, None | [
"def",
"check_geom",
"(",
"c1",
",",
"a1",
",",
"c2",
",",
"a2",
",",
"tol",
"=",
"_DEF",
".",
"XYZ_COORD_MATCH_TOL",
")",
":",
"from",
".",
".",
"const",
"import",
"atom_num",
"import",
"numpy",
"as",
"np",
"from",
".",
".",
"const",
"import",
"EnumCheckGeomMismatch",
"as",
"ECGM",
"match",
"=",
"True",
"if",
"not",
"len",
"(",
"c1",
".",
"shape",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"(",
"\"'c1' is not a vector.\"",
")",
")",
"if",
"not",
"len",
"(",
"c2",
".",
"shape",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"(",
"\"'c2' is not a vector.\"",
")",
")",
"if",
"not",
"len",
"(",
"a1",
".",
"shape",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"(",
"\"'a1' is not a simple list.\"",
")",
")",
"if",
"not",
"len",
"(",
"a2",
".",
"shape",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"(",
"\"'a2' is not a simple list.\"",
")",
")",
"if",
"not",
"c1",
".",
"shape",
"[",
"0",
"]",
"==",
"3",
"*",
"a1",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"len(c1) != 3*len(a1)\"",
")",
"if",
"not",
"c2",
".",
"shape",
"[",
"0",
"]",
"==",
"3",
"*",
"a2",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"len(c2) != 3*len(a2)\"",
")",
"if",
"not",
"c1",
".",
"shape",
"[",
"0",
"]",
"==",
"c2",
".",
"shape",
"[",
"0",
"]",
":",
"match",
"=",
"False",
"fail_type",
"=",
"ECGM",
".",
"DIMENSION",
"return",
"match",
",",
"fail_type",
",",
"None",
"fail_loc",
"=",
"np",
".",
"less_equal",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"subtract",
"(",
"c1",
",",
"c2",
")",
")",
",",
"tol",
")",
"if",
"sum",
"(",
"fail_loc",
")",
"!=",
"c2",
".",
"shape",
"[",
"0",
"]",
":",
"match",
"=",
"False",
"fail_type",
"=",
"ECGM",
".",
"COORDS",
"return",
"match",
",",
"fail_type",
",",
"fail_loc",
"if",
"np",
".",
"issubdtype",
"(",
"a1",
".",
"dtype",
",",
"np",
".",
"dtype",
"(",
"'str'",
")",
")",
":",
"a1",
"=",
"np",
".",
"array",
"(",
"[",
"atom_num",
"[",
"e",
"]",
"for",
"e",
"in",
"a1",
"]",
")",
"if",
"np",
".",
"issubdtype",
"(",
"a2",
".",
"dtype",
",",
"np",
".",
"dtype",
"(",
"'str'",
")",
")",
":",
"a2",
"=",
"np",
".",
"array",
"(",
"[",
"atom_num",
"[",
"e",
"]",
"for",
"e",
"in",
"a2",
"]",
")",
"fail_loc",
"=",
"np",
".",
"equal",
"(",
"a1",
",",
"a2",
")",
"if",
"sum",
"(",
"fail_loc",
")",
"!=",
"a2",
".",
"shape",
"[",
"0",
"]",
":",
"match",
"=",
"False",
"fail_type",
"=",
"ECGM",
".",
"ATOMS",
"return",
"match",
",",
"fail_type",
",",
"fail_loc",
"return",
"match",
",",
"None",
",",
"None"
] | Check for consistency of two geometries and atom symbol lists
Cartesian coordinates are considered consistent with the input
coords if each component matches to within `tol`. If coords or
atoms vectors are passed that are of mismatched lengths, a
|False| value is returned.
Both coords vectors must be three times the length of the atoms vectors
or a :exc:`~exceptions.ValueError` is raised.
Parameters
----------
c1
length-3N |npfloat_| --
Vector of first set of stacked 'lab-frame' Cartesian coordinates
a1
length-N |str| or |int| --
Vector of first set of atom symbols or atomic numbers
c2
length-3N |npfloat_| --
Vector of second set of stacked 'lab-frame' Cartesian coordinates
a2
length-N |str| or |int| --
Vector of second set of atom symbols or atomic numbers
tol
|float|, optional --
Tolerance for acceptable deviation of each geometry coordinate
from that in the reference instance to still be considered
matching. Default value is specified by
:attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`)
Returns
-------
match
|bool| --
Whether input coords and atoms match (|True|) or
not (|False|)
fail_type
:class:`~opan.const.EnumCheckGeomMismatch` or |None|
-- Type of check failure
If `match` == |True|:
Returns as |None|
If `match` == |False|:
An :class:`~opan.const.EnumCheckGeomMismatch` value
indicating the reason for the failed match:
:attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION`
-- Mismatch in geometry size (number of atoms)
:attr:`~opan.const.EnumCheckGeomMismatch.COORDS`
-- Mismatch in one or more coordinates
:attr:`~opan.const.EnumCheckGeomMismatch.ATOMS`
-- Mismatch in one or more atoms
fail_loc
length-3N |bool| or length-N |bool| or |None| --
Mismatched elements
If `match` == |True|:
Returns as |None|
If `match` == |False|:
For "array-level" problems such as a dimension mismatch, a
|None| value is returned.
For "element-level" problems, a vector is returned
indicating positions of mismatch in either `coords` or `atoms`,
depending on the value of `fail_type`.
|True| elements indicate **MATCHING** values
|False| elements mark **MISMATCHES**
Raises
------
~exceptions.ValueError
If a pair of coords & atoms array lengths is inconsistent:
.. code-block:: python
if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2):
raise ValueError(...) | [
"Check",
"for",
"consistency",
"of",
"two",
"geometries",
"and",
"atom",
"symbol",
"lists"
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L250-L424 | train |
bskinn/opan | opan/utils/base.py | template_subst | def template_subst(template, subs, delims=('<', '>')):
""" Perform substitution of content into tagged string.
For substitutions into template input files for external computational
packages, no checks for valid syntax are performed.
Each key in `subs` corresponds to a delimited
substitution tag to be replaced in `template` by the entire text of the
value of that key. For example, the dict ``{"ABC": "text"}`` would
convert ``The <ABC> is working`` to ``The text is working``, using the
default delimiters of '<' and '>'. Substitutions are performed in
iteration order from `subs`; recursive substitution
as the tag parsing proceeds is thus
feasible if an :class:`~collections.OrderedDict` is used and substitution
key/value pairs are added in the proper order.
Start and end delimiters for the tags are modified by `delims`. For
example, to substitute a tag of the form **{\|TAG\|}**, the tuple
``("{|","|}")`` should be passed to `subs_delims`. Any elements in
`delims` past the second are ignored. No checking is
performed for whether the delimiters are "sensible" or not.
Parameters
----------
template
|str| --
Template containing tags delimited by `subs_delims`,
with tag names and substitution contents provided in `subs`
subs
|dict| of |str| --
Each item's key and value are the tag name and corresponding content to
be substituted into the provided template.
delims
iterable of |str| --
Iterable containing the 'open' and 'close' strings used to mark tags
in the template, which are drawn from elements zero and one,
respectively. Any elements beyond these are ignored.
Returns
-------
subst_text
|str| --
String generated from the parsed template, with all tag
substitutions performed.
"""
# Store the template into the working variable
subst_text = template
# Iterate over subs and perform the .replace() calls
for (k,v) in subs.items():
subst_text = subst_text.replace(
delims[0] + k + delims[1], v)
## next tup
# Return the result
return subst_text | python | def template_subst(template, subs, delims=('<', '>')):
""" Perform substitution of content into tagged string.
For substitutions into template input files for external computational
packages, no checks for valid syntax are performed.
Each key in `subs` corresponds to a delimited
substitution tag to be replaced in `template` by the entire text of the
value of that key. For example, the dict ``{"ABC": "text"}`` would
convert ``The <ABC> is working`` to ``The text is working``, using the
default delimiters of '<' and '>'. Substitutions are performed in
iteration order from `subs`; recursive substitution
as the tag parsing proceeds is thus
feasible if an :class:`~collections.OrderedDict` is used and substitution
key/value pairs are added in the proper order.
Start and end delimiters for the tags are modified by `delims`. For
example, to substitute a tag of the form **{\|TAG\|}**, the tuple
``("{|","|}")`` should be passed to `subs_delims`. Any elements in
`delims` past the second are ignored. No checking is
performed for whether the delimiters are "sensible" or not.
Parameters
----------
template
|str| --
Template containing tags delimited by `subs_delims`,
with tag names and substitution contents provided in `subs`
subs
|dict| of |str| --
Each item's key and value are the tag name and corresponding content to
be substituted into the provided template.
delims
iterable of |str| --
Iterable containing the 'open' and 'close' strings used to mark tags
in the template, which are drawn from elements zero and one,
respectively. Any elements beyond these are ignored.
Returns
-------
subst_text
|str| --
String generated from the parsed template, with all tag
substitutions performed.
"""
# Store the template into the working variable
subst_text = template
# Iterate over subs and perform the .replace() calls
for (k,v) in subs.items():
subst_text = subst_text.replace(
delims[0] + k + delims[1], v)
## next tup
# Return the result
return subst_text | [
"def",
"template_subst",
"(",
"template",
",",
"subs",
",",
"delims",
"=",
"(",
"'<'",
",",
"'>'",
")",
")",
":",
"subst_text",
"=",
"template",
"for",
"(",
"k",
",",
"v",
")",
"in",
"subs",
".",
"items",
"(",
")",
":",
"subst_text",
"=",
"subst_text",
".",
"replace",
"(",
"delims",
"[",
"0",
"]",
"+",
"k",
"+",
"delims",
"[",
"1",
"]",
",",
"v",
")",
"return",
"subst_text"
] | Perform substitution of content into tagged string.
For substitutions into template input files for external computational
packages, no checks for valid syntax are performed.
Each key in `subs` corresponds to a delimited
substitution tag to be replaced in `template` by the entire text of the
value of that key. For example, the dict ``{"ABC": "text"}`` would
convert ``The <ABC> is working`` to ``The text is working``, using the
default delimiters of '<' and '>'. Substitutions are performed in
iteration order from `subs`; recursive substitution
as the tag parsing proceeds is thus
feasible if an :class:`~collections.OrderedDict` is used and substitution
key/value pairs are added in the proper order.
Start and end delimiters for the tags are modified by `delims`. For
example, to substitute a tag of the form **{\|TAG\|}**, the tuple
``("{|","|}")`` should be passed to `subs_delims`. Any elements in
`delims` past the second are ignored. No checking is
performed for whether the delimiters are "sensible" or not.
Parameters
----------
template
|str| --
Template containing tags delimited by `subs_delims`,
with tag names and substitution contents provided in `subs`
subs
|dict| of |str| --
Each item's key and value are the tag name and corresponding content to
be substituted into the provided template.
delims
iterable of |str| --
Iterable containing the 'open' and 'close' strings used to mark tags
in the template, which are drawn from elements zero and one,
respectively. Any elements beyond these are ignored.
Returns
-------
subst_text
|str| --
String generated from the parsed template, with all tag
substitutions performed. | [
"Perform",
"substitution",
"of",
"content",
"into",
"tagged",
"string",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L429-L488 | train |
bskinn/opan | opan/utils/base.py | assert_npfloatarray | def assert_npfloatarray(obj, varname, desc, exc, tc, errsrc):
""" Assert a value is an |nparray| of NumPy floats.
Pass |None| to `varname` if `obj` itself is to be checked.
Otherwise, `varname` is the string name of the attribute of `obj` to
check. In either case, `desc` is a string description of the
object to be checked, for use in raising of exceptions.
Raises the exception `exc` with typecode `tc` if the indicated
object is determined not to be an |nparray|, with a NumPy float dtype.
Intended primarily to serve as an early check for
proper implementation of subclasses of
:class:`~opan.grad.SuperOpanGrad` and
:class:`~opan.hess.SuperOpanHess`. Early type-checking of key
attributes will hopefully avoid confusing bugs downstream.
Parameters
----------
obj
(arbitrary) --
Object to be checked, or object with attribute to be checked.
varname
|str| or |None| --
Name of the attribute of `obj` to be type-checked. |None|
indicates to check `obj` itself.
desc
|str| --
Description of the object being checked to be used in any
raised exceptions.
exc
Subclass of :class:`~opan.error.OpanError` to be raised on
a failed typecheck.
tc
Typecode of `exc` to be raised on a failed typecheck.
errsrc
|str| --
String description of the source of the data leading to a
failed typecheck.
"""
# Imports
import numpy as np
# Check for whether member or object is to be checked
if varname is None:
var = obj
else:
# Try to get the variable to be typechecked
try:
var = getattr(obj, varname)
except AttributeError:
raise exc(tc, "Attribute '{0}' not defined in '{1}'"
.format(varname, obj), errsrc)
## end try
## end if
# Try to pull the np dtype off of it
try:
dt = var.dtype
except AttributeError:
raise exc(tc, "'{0}' is not an np.array (lacks a 'dtype' member)"
.format(desc), errsrc)
else:
if not var.shape:
raise exc(tc, "'{0}' is not an np.array ('len(shape)' < 1)"
.format(desc), errsrc)
## end try
# Confirm dtype inherits from np.float
if not np.issubdtype(dt, np.float):
raise exc(tc, "'{0}' is not an np.array of np.float".format(desc),
errsrc) | python | def assert_npfloatarray(obj, varname, desc, exc, tc, errsrc):
""" Assert a value is an |nparray| of NumPy floats.
Pass |None| to `varname` if `obj` itself is to be checked.
Otherwise, `varname` is the string name of the attribute of `obj` to
check. In either case, `desc` is a string description of the
object to be checked, for use in raising of exceptions.
Raises the exception `exc` with typecode `tc` if the indicated
object is determined not to be an |nparray|, with a NumPy float dtype.
Intended primarily to serve as an early check for
proper implementation of subclasses of
:class:`~opan.grad.SuperOpanGrad` and
:class:`~opan.hess.SuperOpanHess`. Early type-checking of key
attributes will hopefully avoid confusing bugs downstream.
Parameters
----------
obj
(arbitrary) --
Object to be checked, or object with attribute to be checked.
varname
|str| or |None| --
Name of the attribute of `obj` to be type-checked. |None|
indicates to check `obj` itself.
desc
|str| --
Description of the object being checked to be used in any
raised exceptions.
exc
Subclass of :class:`~opan.error.OpanError` to be raised on
a failed typecheck.
tc
Typecode of `exc` to be raised on a failed typecheck.
errsrc
|str| --
String description of the source of the data leading to a
failed typecheck.
"""
# Imports
import numpy as np
# Check for whether member or object is to be checked
if varname is None:
var = obj
else:
# Try to get the variable to be typechecked
try:
var = getattr(obj, varname)
except AttributeError:
raise exc(tc, "Attribute '{0}' not defined in '{1}'"
.format(varname, obj), errsrc)
## end try
## end if
# Try to pull the np dtype off of it
try:
dt = var.dtype
except AttributeError:
raise exc(tc, "'{0}' is not an np.array (lacks a 'dtype' member)"
.format(desc), errsrc)
else:
if not var.shape:
raise exc(tc, "'{0}' is not an np.array ('len(shape)' < 1)"
.format(desc), errsrc)
## end try
# Confirm dtype inherits from np.float
if not np.issubdtype(dt, np.float):
raise exc(tc, "'{0}' is not an np.array of np.float".format(desc),
errsrc) | [
"def",
"assert_npfloatarray",
"(",
"obj",
",",
"varname",
",",
"desc",
",",
"exc",
",",
"tc",
",",
"errsrc",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"varname",
"is",
"None",
":",
"var",
"=",
"obj",
"else",
":",
"try",
":",
"var",
"=",
"getattr",
"(",
"obj",
",",
"varname",
")",
"except",
"AttributeError",
":",
"raise",
"exc",
"(",
"tc",
",",
"\"Attribute '{0}' not defined in '{1}'\"",
".",
"format",
"(",
"varname",
",",
"obj",
")",
",",
"errsrc",
")",
"try",
":",
"dt",
"=",
"var",
".",
"dtype",
"except",
"AttributeError",
":",
"raise",
"exc",
"(",
"tc",
",",
"\"'{0}' is not an np.array (lacks a 'dtype' member)\"",
".",
"format",
"(",
"desc",
")",
",",
"errsrc",
")",
"else",
":",
"if",
"not",
"var",
".",
"shape",
":",
"raise",
"exc",
"(",
"tc",
",",
"\"'{0}' is not an np.array ('len(shape)' < 1)\"",
".",
"format",
"(",
"desc",
")",
",",
"errsrc",
")",
"if",
"not",
"np",
".",
"issubdtype",
"(",
"dt",
",",
"np",
".",
"float",
")",
":",
"raise",
"exc",
"(",
"tc",
",",
"\"'{0}' is not an np.array of np.float\"",
".",
"format",
"(",
"desc",
")",
",",
"errsrc",
")"
] | Assert a value is an |nparray| of NumPy floats.
Pass |None| to `varname` if `obj` itself is to be checked.
Otherwise, `varname` is the string name of the attribute of `obj` to
check. In either case, `desc` is a string description of the
object to be checked, for use in raising of exceptions.
Raises the exception `exc` with typecode `tc` if the indicated
object is determined not to be an |nparray|, with a NumPy float dtype.
Intended primarily to serve as an early check for
proper implementation of subclasses of
:class:`~opan.grad.SuperOpanGrad` and
:class:`~opan.hess.SuperOpanHess`. Early type-checking of key
attributes will hopefully avoid confusing bugs downstream.
Parameters
----------
obj
(arbitrary) --
Object to be checked, or object with attribute to be checked.
varname
|str| or |None| --
Name of the attribute of `obj` to be type-checked. |None|
indicates to check `obj` itself.
desc
|str| --
Description of the object being checked to be used in any
raised exceptions.
exc
Subclass of :class:`~opan.error.OpanError` to be raised on
a failed typecheck.
tc
Typecode of `exc` to be raised on a failed typecheck.
errsrc
|str| --
String description of the source of the data leading to a
failed typecheck. | [
"Assert",
"a",
"value",
"is",
"an",
"|nparray|",
"of",
"NumPy",
"floats",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L531-L609 | train |
mila-iqia/picklable-itertools | picklable_itertools/tee.py | tee_manager.advance | def advance(self):
"""Advance the base iterator, publish to constituent iterators."""
elem = next(self._iterable)
for deque in self._deques:
deque.append(elem) | python | def advance(self):
"""Advance the base iterator, publish to constituent iterators."""
elem = next(self._iterable)
for deque in self._deques:
deque.append(elem) | [
"def",
"advance",
"(",
"self",
")",
":",
"elem",
"=",
"next",
"(",
"self",
".",
"_iterable",
")",
"for",
"deque",
"in",
"self",
".",
"_deques",
":",
"deque",
".",
"append",
"(",
"elem",
")"
] | Advance the base iterator, publish to constituent iterators. | [
"Advance",
"the",
"base",
"iterator",
"publish",
"to",
"constituent",
"iterators",
"."
] | e00238867875df0258cf4f83f528d846e7c1afc4 | https://github.com/mila-iqia/picklable-itertools/blob/e00238867875df0258cf4f83f528d846e7c1afc4/picklable_itertools/tee.py#L36-L40 | train |
pyviz/imagen | imagen/deprecated.py | SeparatedComposite._advance_pattern_generators | def _advance_pattern_generators(self,p):
"""
Advance the parameters for each generator for this
presentation.
Picks a position for each generator that is accepted by
__distance_valid for all combinations. Returns a new list of
the generators, with some potentially omitted due to failure
to meet the constraints.
"""
valid_generators = []
for g in p.generators:
for trial in range(self.max_trials):
# Generate a new position and add generator if it's ok
if np.alltrue([self.__distance_valid(g,v,p) for v in valid_generators]):
valid_generators.append(g)
break
g.force_new_dynamic_value('x')
g.force_new_dynamic_value('y')
else:
self.warning("Unable to place pattern %s subject to given constraints" %
g.name)
return valid_generators | python | def _advance_pattern_generators(self,p):
"""
Advance the parameters for each generator for this
presentation.
Picks a position for each generator that is accepted by
__distance_valid for all combinations. Returns a new list of
the generators, with some potentially omitted due to failure
to meet the constraints.
"""
valid_generators = []
for g in p.generators:
for trial in range(self.max_trials):
# Generate a new position and add generator if it's ok
if np.alltrue([self.__distance_valid(g,v,p) for v in valid_generators]):
valid_generators.append(g)
break
g.force_new_dynamic_value('x')
g.force_new_dynamic_value('y')
else:
self.warning("Unable to place pattern %s subject to given constraints" %
g.name)
return valid_generators | [
"def",
"_advance_pattern_generators",
"(",
"self",
",",
"p",
")",
":",
"valid_generators",
"=",
"[",
"]",
"for",
"g",
"in",
"p",
".",
"generators",
":",
"for",
"trial",
"in",
"range",
"(",
"self",
".",
"max_trials",
")",
":",
"if",
"np",
".",
"alltrue",
"(",
"[",
"self",
".",
"__distance_valid",
"(",
"g",
",",
"v",
",",
"p",
")",
"for",
"v",
"in",
"valid_generators",
"]",
")",
":",
"valid_generators",
".",
"append",
"(",
"g",
")",
"break",
"g",
".",
"force_new_dynamic_value",
"(",
"'x'",
")",
"g",
".",
"force_new_dynamic_value",
"(",
"'y'",
")",
"else",
":",
"self",
".",
"warning",
"(",
"\"Unable to place pattern %s subject to given constraints\"",
"%",
"g",
".",
"name",
")",
"return",
"valid_generators"
] | Advance the parameters for each generator for this
presentation.
Picks a position for each generator that is accepted by
__distance_valid for all combinations. Returns a new list of
the generators, with some potentially omitted due to failure
to meet the constraints. | [
"Advance",
"the",
"parameters",
"for",
"each",
"generator",
"for",
"this",
"presentation",
"."
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/deprecated.py#L64-L92 | train |
pyviz/imagen | imagen/deprecated.py | Translator._advance_params | def _advance_params(self):
"""
Explicitly generate new values for these parameters only
when appropriate.
"""
for p in ['x','y','direction']:
self.force_new_dynamic_value(p)
self.last_time = self.time_fn() | python | def _advance_params(self):
"""
Explicitly generate new values for these parameters only
when appropriate.
"""
for p in ['x','y','direction']:
self.force_new_dynamic_value(p)
self.last_time = self.time_fn() | [
"def",
"_advance_params",
"(",
"self",
")",
":",
"for",
"p",
"in",
"[",
"'x'",
",",
"'y'",
",",
"'direction'",
"]",
":",
"self",
".",
"force_new_dynamic_value",
"(",
"p",
")",
"self",
".",
"last_time",
"=",
"self",
".",
"time_fn",
"(",
")"
] | Explicitly generate new values for these parameters only
when appropriate. | [
"Explicitly",
"generate",
"new",
"values",
"for",
"these",
"parameters",
"only",
"when",
"appropriate",
"."
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/deprecated.py#L236-L243 | train |
matthewwithanm/django-classbasedsettings | cbsettings/switching/__init__.py | BaseSwitcher.register | def register(self, settings_class=NoSwitcher, *simple_checks,
**conditions):
"""
Register a settings class with the switcher. Can be passed the settings
class to register or be used as a decorator.
:param settings_class: The class to register with the provided
conditions.
:param *simple_checks: A list of conditions for using the settings
class. If any of the values are falsy, the class will not be
used. If any of the values are callable, they will be called
before evaluating.
:param **conditions: Values to check. The key specifies which of the
check functions (registered with ``add_check``) to use; the
value is passed to the check function.
"""
if settings_class is NoSwitcher:
def decorator(cls):
self.register(cls, *simple_checks, **conditions)
return cls
return decorator
available_checks = self.checks.keys()
for condition in conditions.keys():
if condition not in available_checks:
raise InvalidCondition(
'There is no check for the condition "%s"' % condition)
self._registry.append((settings_class, simple_checks, conditions)) | python | def register(self, settings_class=NoSwitcher, *simple_checks,
**conditions):
"""
Register a settings class with the switcher. Can be passed the settings
class to register or be used as a decorator.
:param settings_class: The class to register with the provided
conditions.
:param *simple_checks: A list of conditions for using the settings
class. If any of the values are falsy, the class will not be
used. If any of the values are callable, they will be called
before evaluating.
:param **conditions: Values to check. The key specifies which of the
check functions (registered with ``add_check``) to use; the
value is passed to the check function.
"""
if settings_class is NoSwitcher:
def decorator(cls):
self.register(cls, *simple_checks, **conditions)
return cls
return decorator
available_checks = self.checks.keys()
for condition in conditions.keys():
if condition not in available_checks:
raise InvalidCondition(
'There is no check for the condition "%s"' % condition)
self._registry.append((settings_class, simple_checks, conditions)) | [
"def",
"register",
"(",
"self",
",",
"settings_class",
"=",
"NoSwitcher",
",",
"*",
"simple_checks",
",",
"**",
"conditions",
")",
":",
"if",
"settings_class",
"is",
"NoSwitcher",
":",
"def",
"decorator",
"(",
"cls",
")",
":",
"self",
".",
"register",
"(",
"cls",
",",
"*",
"simple_checks",
",",
"**",
"conditions",
")",
"return",
"cls",
"return",
"decorator",
"available_checks",
"=",
"self",
".",
"checks",
".",
"keys",
"(",
")",
"for",
"condition",
"in",
"conditions",
".",
"keys",
"(",
")",
":",
"if",
"condition",
"not",
"in",
"available_checks",
":",
"raise",
"InvalidCondition",
"(",
"'There is no check for the condition \"%s\"'",
"%",
"condition",
")",
"self",
".",
"_registry",
".",
"append",
"(",
"(",
"settings_class",
",",
"simple_checks",
",",
"conditions",
")",
")"
] | Register a settings class with the switcher. Can be passed the settings
class to register or be used as a decorator.
:param settings_class: The class to register with the provided
conditions.
:param *simple_checks: A list of conditions for using the settings
class. If any of the values are falsy, the class will not be
used. If any of the values are callable, they will be called
before evaluating.
:param **conditions: Values to check. The key specifies which of the
check functions (registered with ``add_check``) to use; the
value is passed to the check function. | [
"Register",
"a",
"settings",
"class",
"with",
"the",
"switcher",
".",
"Can",
"be",
"passed",
"the",
"settings",
"class",
"to",
"register",
"or",
"be",
"used",
"as",
"a",
"decorator",
"."
] | ac9e4362bd1f4954f3e4679b97726cab2b22aea9 | https://github.com/matthewwithanm/django-classbasedsettings/blob/ac9e4362bd1f4954f3e4679b97726cab2b22aea9/cbsettings/switching/__init__.py#L32-L60 | train |
mikeboers/PyHAML | haml/parse.py | Parser._peek_buffer | def _peek_buffer(self, i=0):
"""Get the next line without consuming it."""
while len(self._buffer) <= i:
self._buffer.append(next(self._source))
return self._buffer[i] | python | def _peek_buffer(self, i=0):
"""Get the next line without consuming it."""
while len(self._buffer) <= i:
self._buffer.append(next(self._source))
return self._buffer[i] | [
"def",
"_peek_buffer",
"(",
"self",
",",
"i",
"=",
"0",
")",
":",
"while",
"len",
"(",
"self",
".",
"_buffer",
")",
"<=",
"i",
":",
"self",
".",
"_buffer",
".",
"append",
"(",
"next",
"(",
"self",
".",
"_source",
")",
")",
"return",
"self",
".",
"_buffer",
"[",
"i",
"]"
] | Get the next line without consuming it. | [
"Get",
"the",
"next",
"line",
"without",
"consuming",
"it",
"."
] | 9ecb7c85349948428474869aad5b8d1c7de8dbed | https://github.com/mikeboers/PyHAML/blob/9ecb7c85349948428474869aad5b8d1c7de8dbed/haml/parse.py#L37-L41 | train |
mikeboers/PyHAML | haml/parse.py | Parser._make_readline_peeker | def _make_readline_peeker(self):
"""Make a readline-like function which peeks into the source."""
counter = itertools.count(0)
def readline():
try:
return self._peek_buffer(next(counter))
except StopIteration:
return ''
return readline | python | def _make_readline_peeker(self):
"""Make a readline-like function which peeks into the source."""
counter = itertools.count(0)
def readline():
try:
return self._peek_buffer(next(counter))
except StopIteration:
return ''
return readline | [
"def",
"_make_readline_peeker",
"(",
"self",
")",
":",
"counter",
"=",
"itertools",
".",
"count",
"(",
"0",
")",
"def",
"readline",
"(",
")",
":",
"try",
":",
"return",
"self",
".",
"_peek_buffer",
"(",
"next",
"(",
"counter",
")",
")",
"except",
"StopIteration",
":",
"return",
"''",
"return",
"readline"
] | Make a readline-like function which peeks into the source. | [
"Make",
"a",
"readline",
"-",
"like",
"function",
"which",
"peeks",
"into",
"the",
"source",
"."
] | 9ecb7c85349948428474869aad5b8d1c7de8dbed | https://github.com/mikeboers/PyHAML/blob/9ecb7c85349948428474869aad5b8d1c7de8dbed/haml/parse.py#L52-L60 | train |
mikeboers/PyHAML | haml/parse.py | Parser._add_node | def _add_node(self, node, depth):
"""Add a node to the graph, and the stack."""
self._topmost_node.add_child(node, bool(depth[1]))
self._stack.append((depth, node)) | python | def _add_node(self, node, depth):
"""Add a node to the graph, and the stack."""
self._topmost_node.add_child(node, bool(depth[1]))
self._stack.append((depth, node)) | [
"def",
"_add_node",
"(",
"self",
",",
"node",
",",
"depth",
")",
":",
"self",
".",
"_topmost_node",
".",
"add_child",
"(",
"node",
",",
"bool",
"(",
"depth",
"[",
"1",
"]",
")",
")",
"self",
".",
"_stack",
".",
"append",
"(",
"(",
"depth",
",",
"node",
")",
")"
] | Add a node to the graph, and the stack. | [
"Add",
"a",
"node",
"to",
"the",
"graph",
"and",
"the",
"stack",
"."
] | 9ecb7c85349948428474869aad5b8d1c7de8dbed | https://github.com/mikeboers/PyHAML/blob/9ecb7c85349948428474869aad5b8d1c7de8dbed/haml/parse.py#L386-L389 | train |
bskinn/opan | opan/xyz.py | OpanXYZ._load_data | def _load_data(self, atom_syms, coords, bohrs=True):
""" Internal function for making XYZ object from explicit geom data.
Parameters
----------
atom_syms
Squeezes to array of N |str| --
Element symbols for the XYZ. Must be valid elements as defined in
the keys of :data:`const.atom_num <opan.const.atom_num>`.
coords
Squeezes to array of 3N |npfloat_| castables --
Coordinates for the geometry.
bohrs
|bool|, optional --
Units of coordinates (default |True|)
Raises
------
~opan.XYZError
(typecode :attr:`~opan.error.XYZError.OVERWRITE`)
If :class:`ORCA_XYZ` object has already been initialized.
~exceptions.ValueError
If atom_syms & coords dimensions are incompatible
~exceptions.ValueError
If type of `atom_syms` and/or `coords` is invalid
"""
# Imports
import numpy as np
from .const import atom_num, PHYS
from .error import XYZError
# Gripe if already initialized
if 'geoms' in dir(self):
raise XYZError(XYZError.OVERWRITE,
"Cannot overwrite contents of existing OpanXYZ", "")
## end if
# Check and store dimensions
if not len(coords.shape) == 1:
raise ValueError("Coordinates are not a vector")
## end if
if not len(atom_syms.shape) == 1:
raise ValueError("Atom symbols are not a simple list")
## end if
if not coords.shape[0] == 3 * atom_syms.shape[0]:
raise ValueError("len(coords) != 3 * len(atom_syms)")
## end if
# Proof the atoms list
if not all( (atom_syms[i].upper() in atom_num)
for i in range(atom_syms.shape[0]) ):
# Invalid atoms specified
raise ValueError("Invalid atoms specified: {0}".format(
[(j, atom_syms[j]) for j in
(i for (i, valid) in
enumerate(map(lambda k: k in atom_num, atom_syms))
if not valid
)
] ))
## end if
# Ensure the geometry is all numeric
if not all(map(np.isreal, coords)):
raise ValueError("All coordinates must be real numeric")
## end if
# Store the number of atoms. Only one geometry. Standard string
# content for things only relevant to file load.
self.num_atoms = atom_syms.shape[0]
self.num_geoms = 1
self.in_str = self.LOAD_DATA_FLAG
self.descs = np.array([self.LOAD_DATA_FLAG])
self.XYZ_path = self.LOAD_DATA_FLAG
# Store the atoms as vector
self.atom_syms = list(map(str.upper, list(atom_syms)))
# Store the single geometry by bracketing with an array
self.geoms = [coords / (1.0 if bohrs else PHYS.ANG_PER_BOHR)] | python | def _load_data(self, atom_syms, coords, bohrs=True):
""" Internal function for making XYZ object from explicit geom data.
Parameters
----------
atom_syms
Squeezes to array of N |str| --
Element symbols for the XYZ. Must be valid elements as defined in
the keys of :data:`const.atom_num <opan.const.atom_num>`.
coords
Squeezes to array of 3N |npfloat_| castables --
Coordinates for the geometry.
bohrs
|bool|, optional --
Units of coordinates (default |True|)
Raises
------
~opan.XYZError
(typecode :attr:`~opan.error.XYZError.OVERWRITE`)
If :class:`ORCA_XYZ` object has already been initialized.
~exceptions.ValueError
If atom_syms & coords dimensions are incompatible
~exceptions.ValueError
If type of `atom_syms` and/or `coords` is invalid
"""
# Imports
import numpy as np
from .const import atom_num, PHYS
from .error import XYZError
# Gripe if already initialized
if 'geoms' in dir(self):
raise XYZError(XYZError.OVERWRITE,
"Cannot overwrite contents of existing OpanXYZ", "")
## end if
# Check and store dimensions
if not len(coords.shape) == 1:
raise ValueError("Coordinates are not a vector")
## end if
if not len(atom_syms.shape) == 1:
raise ValueError("Atom symbols are not a simple list")
## end if
if not coords.shape[0] == 3 * atom_syms.shape[0]:
raise ValueError("len(coords) != 3 * len(atom_syms)")
## end if
# Proof the atoms list
if not all( (atom_syms[i].upper() in atom_num)
for i in range(atom_syms.shape[0]) ):
# Invalid atoms specified
raise ValueError("Invalid atoms specified: {0}".format(
[(j, atom_syms[j]) for j in
(i for (i, valid) in
enumerate(map(lambda k: k in atom_num, atom_syms))
if not valid
)
] ))
## end if
# Ensure the geometry is all numeric
if not all(map(np.isreal, coords)):
raise ValueError("All coordinates must be real numeric")
## end if
# Store the number of atoms. Only one geometry. Standard string
# content for things only relevant to file load.
self.num_atoms = atom_syms.shape[0]
self.num_geoms = 1
self.in_str = self.LOAD_DATA_FLAG
self.descs = np.array([self.LOAD_DATA_FLAG])
self.XYZ_path = self.LOAD_DATA_FLAG
# Store the atoms as vector
self.atom_syms = list(map(str.upper, list(atom_syms)))
# Store the single geometry by bracketing with an array
self.geoms = [coords / (1.0 if bohrs else PHYS.ANG_PER_BOHR)] | [
"def",
"_load_data",
"(",
"self",
",",
"atom_syms",
",",
"coords",
",",
"bohrs",
"=",
"True",
")",
":",
"import",
"numpy",
"as",
"np",
"from",
".",
"const",
"import",
"atom_num",
",",
"PHYS",
"from",
".",
"error",
"import",
"XYZError",
"if",
"'geoms'",
"in",
"dir",
"(",
"self",
")",
":",
"raise",
"XYZError",
"(",
"XYZError",
".",
"OVERWRITE",
",",
"\"Cannot overwrite contents of existing OpanXYZ\"",
",",
"\"\"",
")",
"if",
"not",
"len",
"(",
"coords",
".",
"shape",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"Coordinates are not a vector\"",
")",
"if",
"not",
"len",
"(",
"atom_syms",
".",
"shape",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"Atom symbols are not a simple list\"",
")",
"if",
"not",
"coords",
".",
"shape",
"[",
"0",
"]",
"==",
"3",
"*",
"atom_syms",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"len(coords) != 3 * len(atom_syms)\"",
")",
"if",
"not",
"all",
"(",
"(",
"atom_syms",
"[",
"i",
"]",
".",
"upper",
"(",
")",
"in",
"atom_num",
")",
"for",
"i",
"in",
"range",
"(",
"atom_syms",
".",
"shape",
"[",
"0",
"]",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid atoms specified: {0}\"",
".",
"format",
"(",
"[",
"(",
"j",
",",
"atom_syms",
"[",
"j",
"]",
")",
"for",
"j",
"in",
"(",
"i",
"for",
"(",
"i",
",",
"valid",
")",
"in",
"enumerate",
"(",
"map",
"(",
"lambda",
"k",
":",
"k",
"in",
"atom_num",
",",
"atom_syms",
")",
")",
"if",
"not",
"valid",
")",
"]",
")",
")",
"if",
"not",
"all",
"(",
"map",
"(",
"np",
".",
"isreal",
",",
"coords",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"All coordinates must be real numeric\"",
")",
"self",
".",
"num_atoms",
"=",
"atom_syms",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"num_geoms",
"=",
"1",
"self",
".",
"in_str",
"=",
"self",
".",
"LOAD_DATA_FLAG",
"self",
".",
"descs",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"LOAD_DATA_FLAG",
"]",
")",
"self",
".",
"XYZ_path",
"=",
"self",
".",
"LOAD_DATA_FLAG",
"self",
".",
"atom_syms",
"=",
"list",
"(",
"map",
"(",
"str",
".",
"upper",
",",
"list",
"(",
"atom_syms",
")",
")",
")",
"self",
".",
"geoms",
"=",
"[",
"coords",
"/",
"(",
"1.0",
"if",
"bohrs",
"else",
"PHYS",
".",
"ANG_PER_BOHR",
")",
"]"
] | Internal function for making XYZ object from explicit geom data.
Parameters
----------
atom_syms
Squeezes to array of N |str| --
Element symbols for the XYZ. Must be valid elements as defined in
the keys of :data:`const.atom_num <opan.const.atom_num>`.
coords
Squeezes to array of 3N |npfloat_| castables --
Coordinates for the geometry.
bohrs
|bool|, optional --
Units of coordinates (default |True|)
Raises
------
~opan.XYZError
(typecode :attr:`~opan.error.XYZError.OVERWRITE`)
If :class:`ORCA_XYZ` object has already been initialized.
~exceptions.ValueError
If atom_syms & coords dimensions are incompatible
~exceptions.ValueError
If type of `atom_syms` and/or `coords` is invalid | [
"Internal",
"function",
"for",
"making",
"XYZ",
"object",
"from",
"explicit",
"geom",
"data",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L354-L438 | train |
bskinn/opan | opan/xyz.py | OpanXYZ.geom_iter | def geom_iter(self, g_nums):
"""Iterator over a subset of geometries.
The indices of the geometries to be returned are indicated by an
iterable of |int|\\ s passed as `g_nums`.
As with :meth:`geom_single`, each geometry is returned as a
length-3N |npfloat_| with each atom's x/y/z coordinates
grouped together::
[A1x, A1y, A1z, A2x, A2y, A2z, ...]
In order to use NumPy `slicing or advanced indexing
<http://docs.scipy.org/doc/numpy-1.10.0/reference/
arrays.indexing.html>`__, :data:`geoms` must first be
explicitly converted to |nparray|, e.g.::
>>> x = opan.xyz.OpanXYZ(path='...')
>>> np.array(x.geoms)[[2,6,9]]
Parameters
----------
g_nums
length-R iterable of |int| --
Indices of the desired geometries
Yields
------
geom
length-3N |npfloat_| --
Vectors of the atomic coordinates for each geometry
indicated in `g_nums`
Raises
------
~exceptions.IndexError
If an item in `g_nums` is invalid (out of range)
"""
# Using the custom coded pack_tups to not have to care whether the
# input is iterable
from .utils import pack_tups
vals = pack_tups(g_nums)
for val in vals:
yield self.geom_single(val[0]) | python | def geom_iter(self, g_nums):
"""Iterator over a subset of geometries.
The indices of the geometries to be returned are indicated by an
iterable of |int|\\ s passed as `g_nums`.
As with :meth:`geom_single`, each geometry is returned as a
length-3N |npfloat_| with each atom's x/y/z coordinates
grouped together::
[A1x, A1y, A1z, A2x, A2y, A2z, ...]
In order to use NumPy `slicing or advanced indexing
<http://docs.scipy.org/doc/numpy-1.10.0/reference/
arrays.indexing.html>`__, :data:`geoms` must first be
explicitly converted to |nparray|, e.g.::
>>> x = opan.xyz.OpanXYZ(path='...')
>>> np.array(x.geoms)[[2,6,9]]
Parameters
----------
g_nums
length-R iterable of |int| --
Indices of the desired geometries
Yields
------
geom
length-3N |npfloat_| --
Vectors of the atomic coordinates for each geometry
indicated in `g_nums`
Raises
------
~exceptions.IndexError
If an item in `g_nums` is invalid (out of range)
"""
# Using the custom coded pack_tups to not have to care whether the
# input is iterable
from .utils import pack_tups
vals = pack_tups(g_nums)
for val in vals:
yield self.geom_single(val[0]) | [
"def",
"geom_iter",
"(",
"self",
",",
"g_nums",
")",
":",
"from",
".",
"utils",
"import",
"pack_tups",
"vals",
"=",
"pack_tups",
"(",
"g_nums",
")",
"for",
"val",
"in",
"vals",
":",
"yield",
"self",
".",
"geom_single",
"(",
"val",
"[",
"0",
"]",
")"
] | Iterator over a subset of geometries.
The indices of the geometries to be returned are indicated by an
iterable of |int|\\ s passed as `g_nums`.
As with :meth:`geom_single`, each geometry is returned as a
length-3N |npfloat_| with each atom's x/y/z coordinates
grouped together::
[A1x, A1y, A1z, A2x, A2y, A2z, ...]
In order to use NumPy `slicing or advanced indexing
<http://docs.scipy.org/doc/numpy-1.10.0/reference/
arrays.indexing.html>`__, :data:`geoms` must first be
explicitly converted to |nparray|, e.g.::
>>> x = opan.xyz.OpanXYZ(path='...')
>>> np.array(x.geoms)[[2,6,9]]
Parameters
----------
g_nums
length-R iterable of |int| --
Indices of the desired geometries
Yields
------
geom
length-3N |npfloat_| --
Vectors of the atomic coordinates for each geometry
indicated in `g_nums`
Raises
------
~exceptions.IndexError
If an item in `g_nums` is invalid (out of range) | [
"Iterator",
"over",
"a",
"subset",
"of",
"geometries",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L725-L770 | train |
bskinn/opan | opan/xyz.py | OpanXYZ.dist_single | def dist_single(self, g_num, at_1, at_2):
""" Distance between two atoms.
Parameters
----------
g_num
|int| -- Index of the desired geometry
at_1
|int| -- Index of the first atom
at_2
|int| -- Index of the second atom
Returns
-------
dist
|npfloat_| --
Distance in Bohrs between `at_1` and `at_2` from
geometry `g_num`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided
"""
# Import used math library function(s)
import numpy as np
from scipy import linalg as spla
from .utils import safe_cast as scast
# The below errors are explicitly thrown since values are multiplied by
# three when they are used as an index and thus give non-intuitive
# errors in subsequent code.
# Complain if at_1 is invalid
if not (-self.num_atoms <= at_1 < self.num_atoms):
raise IndexError("Invalid index for 'at_1' ({0})".format(at_1))
# Complain if at_2 is invalid
if not (-self.num_atoms <= at_2 < self.num_atoms):
raise IndexError("Invalid index for 'at_2' ({0})".format(at_2))
# Should never be necessary (save for badly erroneous calling code),
# but coerce at_1 and at_2 to their floor() values. This is again
# needed since they are multiplied by three in the index expresssions
# below, and can cause funny behavior when truncated by the indexing
at_1 = scast(np.floor(at_1), np.int_)
at_2 = scast(np.floor(at_2), np.int_)
# Calculate the interatomic distance and return. Return identically
# zero if the indices are equal
if at_1 == at_2:
dist = 0.0
else:
dist = scast(
spla.norm(self.displ_single(g_num, at_1, at_2)),
np.float_)
## end if
return dist | python | def dist_single(self, g_num, at_1, at_2):
""" Distance between two atoms.
Parameters
----------
g_num
|int| -- Index of the desired geometry
at_1
|int| -- Index of the first atom
at_2
|int| -- Index of the second atom
Returns
-------
dist
|npfloat_| --
Distance in Bohrs between `at_1` and `at_2` from
geometry `g_num`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided
"""
# Import used math library function(s)
import numpy as np
from scipy import linalg as spla
from .utils import safe_cast as scast
# The below errors are explicitly thrown since values are multiplied by
# three when they are used as an index and thus give non-intuitive
# errors in subsequent code.
# Complain if at_1 is invalid
if not (-self.num_atoms <= at_1 < self.num_atoms):
raise IndexError("Invalid index for 'at_1' ({0})".format(at_1))
# Complain if at_2 is invalid
if not (-self.num_atoms <= at_2 < self.num_atoms):
raise IndexError("Invalid index for 'at_2' ({0})".format(at_2))
# Should never be necessary (save for badly erroneous calling code),
# but coerce at_1 and at_2 to their floor() values. This is again
# needed since they are multiplied by three in the index expresssions
# below, and can cause funny behavior when truncated by the indexing
at_1 = scast(np.floor(at_1), np.int_)
at_2 = scast(np.floor(at_2), np.int_)
# Calculate the interatomic distance and return. Return identically
# zero if the indices are equal
if at_1 == at_2:
dist = 0.0
else:
dist = scast(
spla.norm(self.displ_single(g_num, at_1, at_2)),
np.float_)
## end if
return dist | [
"def",
"dist_single",
"(",
"self",
",",
"g_num",
",",
"at_1",
",",
"at_2",
")",
":",
"import",
"numpy",
"as",
"np",
"from",
"scipy",
"import",
"linalg",
"as",
"spla",
"from",
".",
"utils",
"import",
"safe_cast",
"as",
"scast",
"if",
"not",
"(",
"-",
"self",
".",
"num_atoms",
"<=",
"at_1",
"<",
"self",
".",
"num_atoms",
")",
":",
"raise",
"IndexError",
"(",
"\"Invalid index for 'at_1' ({0})\"",
".",
"format",
"(",
"at_1",
")",
")",
"if",
"not",
"(",
"-",
"self",
".",
"num_atoms",
"<=",
"at_2",
"<",
"self",
".",
"num_atoms",
")",
":",
"raise",
"IndexError",
"(",
"\"Invalid index for 'at_2' ({0})\"",
".",
"format",
"(",
"at_2",
")",
")",
"at_1",
"=",
"scast",
"(",
"np",
".",
"floor",
"(",
"at_1",
")",
",",
"np",
".",
"int_",
")",
"at_2",
"=",
"scast",
"(",
"np",
".",
"floor",
"(",
"at_2",
")",
",",
"np",
".",
"int_",
")",
"if",
"at_1",
"==",
"at_2",
":",
"dist",
"=",
"0.0",
"else",
":",
"dist",
"=",
"scast",
"(",
"spla",
".",
"norm",
"(",
"self",
".",
"displ_single",
"(",
"g_num",
",",
"at_1",
",",
"at_2",
")",
")",
",",
"np",
".",
"float_",
")",
"return",
"dist"
] | Distance between two atoms.
Parameters
----------
g_num
|int| -- Index of the desired geometry
at_1
|int| -- Index of the first atom
at_2
|int| -- Index of the second atom
Returns
-------
dist
|npfloat_| --
Distance in Bohrs between `at_1` and `at_2` from
geometry `g_num`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided | [
"Distance",
"between",
"two",
"atoms",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L775-L836 | train |
bskinn/opan | opan/xyz.py | OpanXYZ.dist_iter | def dist_iter(self, g_nums, ats_1, ats_2, invalid_error=False):
""" Iterator over selected interatomic distances.
Distances are in Bohrs as with :meth:`dist_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dist
|npfloat_| --
Interatomic distance in Bohrs between each atom pair of
`ats_1` and `ats_2` from the corresponding geometries
of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
"""
# Imports
import numpy as np
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples. If 'None' expansion
# was used, return None for any invalid indices instead of raising
# an exception.
for tup in tups:
yield self._iter_return(tup, self.dist_single, invalid_error) | python | def dist_iter(self, g_nums, ats_1, ats_2, invalid_error=False):
""" Iterator over selected interatomic distances.
Distances are in Bohrs as with :meth:`dist_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dist
|npfloat_| --
Interatomic distance in Bohrs between each atom pair of
`ats_1` and `ats_2` from the corresponding geometries
of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
"""
# Imports
import numpy as np
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples. If 'None' expansion
# was used, return None for any invalid indices instead of raising
# an exception.
for tup in tups:
yield self._iter_return(tup, self.dist_single, invalid_error) | [
"def",
"dist_iter",
"(",
"self",
",",
"g_nums",
",",
"ats_1",
",",
"ats_2",
",",
"invalid_error",
"=",
"False",
")",
":",
"import",
"numpy",
"as",
"np",
"from",
".",
"utils",
"import",
"pack_tups",
"if",
"_DEBUG",
":",
"print",
"(",
"\"g_nums = {0}\"",
".",
"format",
"(",
"g_nums",
")",
")",
"print",
"(",
"\"ats_1 = {0}\"",
".",
"format",
"(",
"ats_1",
")",
")",
"print",
"(",
"\"ats_2 = {0}\"",
".",
"format",
"(",
"ats_2",
")",
")",
"arglist",
"=",
"self",
".",
"_none_subst",
"(",
"g_nums",
",",
"ats_1",
",",
"ats_2",
")",
"tups",
"=",
"pack_tups",
"(",
"*",
"arglist",
")",
"if",
"_DEBUG",
":",
"print",
"(",
"tups",
")",
"for",
"tup",
"in",
"tups",
":",
"yield",
"self",
".",
"_iter_return",
"(",
"tup",
",",
"self",
".",
"dist_single",
",",
"invalid_error",
")"
] | Iterator over selected interatomic distances.
Distances are in Bohrs as with :meth:`dist_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dist
|npfloat_| --
Interatomic distance in Bohrs between each atom pair of
`ats_1` and `ats_2` from the corresponding geometries
of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length. | [
"Iterator",
"over",
"selected",
"interatomic",
"distances",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L841-L913 | train |
bskinn/opan | opan/xyz.py | OpanXYZ.angle_single | def angle_single(self, g_num, at_1, at_2, at_3):
""" Spanning angle among three atoms.
The indices `at_1` and `at_3` can be the same (yielding a
trivial zero angle), but `at_2` must be different from
both `at_1` and `at_3`.
Parameters
----------
g_num
|int| --
Index of the desired geometry
at_1
|int| --
Index of the first atom
at_2
|int| --
Index of the second atom
at_3
|int| --
Index of the third atom
Returns
-------
angle
|npfloat_| --
Spanning angle in degrees between `at_1`-`at_2`-`at_3`, from
geometry `g_num`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided
~exceptions.ValueError
If `at_2` is equal to either `at_1` or `at_3`
"""
# Imports
import numpy as np
from .utils import safe_cast as scast
from .utils.vector import vec_angle
# The below errors are explicitly thrown since they are multiplied by
# three when they are used as an index and thus give non-intuitive
# errors in later code.
# Complain if at_1 is invalid
if not(-self.num_atoms <= at_1 < self.num_atoms):
raise IndexError("Invalid index for 'at_1' ({0})".format(at_1))
# Complain if at_2 is invalid
if not(-self.num_atoms <= at_2 < self.num_atoms):
raise IndexError("Invalid index for 'at_2' ({0})".format(at_2))
# Complain if at_3 is invalid
if not(-self.num_atoms <= at_3 < self.num_atoms):
raise IndexError("Invalid index for 'at_3' ({0})".format(at_3))
# Should never be necessary (save for badly erroneous calling code),
# but coerce the at_x to their floor() values. This is again
# needed since they are multiplied by three in the index expresssions
# below, and can cause funny behavior when truncated by the indexing
at_1 = scast(np.floor(at_1), np.int_)
at_2 = scast(np.floor(at_2), np.int_)
at_3 = scast(np.floor(at_3), np.int_)
# Complain if at_2 is equal to either at_1 or at_3. Must factor in
# the possibility of negative indexing via modulo arithmetic.
if (at_2 % self.num_atoms) == (at_1 % self.num_atoms):
raise ValueError("'at_1' and 'at_2' must be different")
if (at_2 % self.num_atoms) == (at_3 % self.num_atoms):
raise ValueError("'at_2' and 'at_3' must be different")
# Trivial return if at_1 and at_3 are the same
if (at_1 % self.num_atoms) == (at_3 % self.num_atoms):
# Angle is identically zero in this case
return 0.0
## end if
# Store the displacement vectors from at_2 to at_1 and to at_3
# The np.float64 type should be retained through the displ_single call.
vec_2_1 = self.displ_single(g_num, at_2, at_1)
vec_2_3 = self.displ_single(g_num, at_2, at_3)
# Compute and return the calculated angle, in degrees
# v1 {dot} v2 == |v1||v2| * cos(theta)
angle = vec_angle(vec_2_1, vec_2_3)
return angle | python | def angle_single(self, g_num, at_1, at_2, at_3):
""" Spanning angle among three atoms.
The indices `at_1` and `at_3` can be the same (yielding a
trivial zero angle), but `at_2` must be different from
both `at_1` and `at_3`.
Parameters
----------
g_num
|int| --
Index of the desired geometry
at_1
|int| --
Index of the first atom
at_2
|int| --
Index of the second atom
at_3
|int| --
Index of the third atom
Returns
-------
angle
|npfloat_| --
Spanning angle in degrees between `at_1`-`at_2`-`at_3`, from
geometry `g_num`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided
~exceptions.ValueError
If `at_2` is equal to either `at_1` or `at_3`
"""
# Imports
import numpy as np
from .utils import safe_cast as scast
from .utils.vector import vec_angle
# The below errors are explicitly thrown since they are multiplied by
# three when they are used as an index and thus give non-intuitive
# errors in later code.
# Complain if at_1 is invalid
if not(-self.num_atoms <= at_1 < self.num_atoms):
raise IndexError("Invalid index for 'at_1' ({0})".format(at_1))
# Complain if at_2 is invalid
if not(-self.num_atoms <= at_2 < self.num_atoms):
raise IndexError("Invalid index for 'at_2' ({0})".format(at_2))
# Complain if at_3 is invalid
if not(-self.num_atoms <= at_3 < self.num_atoms):
raise IndexError("Invalid index for 'at_3' ({0})".format(at_3))
# Should never be necessary (save for badly erroneous calling code),
# but coerce the at_x to their floor() values. This is again
# needed since they are multiplied by three in the index expresssions
# below, and can cause funny behavior when truncated by the indexing
at_1 = scast(np.floor(at_1), np.int_)
at_2 = scast(np.floor(at_2), np.int_)
at_3 = scast(np.floor(at_3), np.int_)
# Complain if at_2 is equal to either at_1 or at_3. Must factor in
# the possibility of negative indexing via modulo arithmetic.
if (at_2 % self.num_atoms) == (at_1 % self.num_atoms):
raise ValueError("'at_1' and 'at_2' must be different")
if (at_2 % self.num_atoms) == (at_3 % self.num_atoms):
raise ValueError("'at_2' and 'at_3' must be different")
# Trivial return if at_1 and at_3 are the same
if (at_1 % self.num_atoms) == (at_3 % self.num_atoms):
# Angle is identically zero in this case
return 0.0
## end if
# Store the displacement vectors from at_2 to at_1 and to at_3
# The np.float64 type should be retained through the displ_single call.
vec_2_1 = self.displ_single(g_num, at_2, at_1)
vec_2_3 = self.displ_single(g_num, at_2, at_3)
# Compute and return the calculated angle, in degrees
# v1 {dot} v2 == |v1||v2| * cos(theta)
angle = vec_angle(vec_2_1, vec_2_3)
return angle | [
"def",
"angle_single",
"(",
"self",
",",
"g_num",
",",
"at_1",
",",
"at_2",
",",
"at_3",
")",
":",
"import",
"numpy",
"as",
"np",
"from",
".",
"utils",
"import",
"safe_cast",
"as",
"scast",
"from",
".",
"utils",
".",
"vector",
"import",
"vec_angle",
"if",
"not",
"(",
"-",
"self",
".",
"num_atoms",
"<=",
"at_1",
"<",
"self",
".",
"num_atoms",
")",
":",
"raise",
"IndexError",
"(",
"\"Invalid index for 'at_1' ({0})\"",
".",
"format",
"(",
"at_1",
")",
")",
"if",
"not",
"(",
"-",
"self",
".",
"num_atoms",
"<=",
"at_2",
"<",
"self",
".",
"num_atoms",
")",
":",
"raise",
"IndexError",
"(",
"\"Invalid index for 'at_2' ({0})\"",
".",
"format",
"(",
"at_2",
")",
")",
"if",
"not",
"(",
"-",
"self",
".",
"num_atoms",
"<=",
"at_3",
"<",
"self",
".",
"num_atoms",
")",
":",
"raise",
"IndexError",
"(",
"\"Invalid index for 'at_3' ({0})\"",
".",
"format",
"(",
"at_3",
")",
")",
"at_1",
"=",
"scast",
"(",
"np",
".",
"floor",
"(",
"at_1",
")",
",",
"np",
".",
"int_",
")",
"at_2",
"=",
"scast",
"(",
"np",
".",
"floor",
"(",
"at_2",
")",
",",
"np",
".",
"int_",
")",
"at_3",
"=",
"scast",
"(",
"np",
".",
"floor",
"(",
"at_3",
")",
",",
"np",
".",
"int_",
")",
"if",
"(",
"at_2",
"%",
"self",
".",
"num_atoms",
")",
"==",
"(",
"at_1",
"%",
"self",
".",
"num_atoms",
")",
":",
"raise",
"ValueError",
"(",
"\"'at_1' and 'at_2' must be different\"",
")",
"if",
"(",
"at_2",
"%",
"self",
".",
"num_atoms",
")",
"==",
"(",
"at_3",
"%",
"self",
".",
"num_atoms",
")",
":",
"raise",
"ValueError",
"(",
"\"'at_2' and 'at_3' must be different\"",
")",
"if",
"(",
"at_1",
"%",
"self",
".",
"num_atoms",
")",
"==",
"(",
"at_3",
"%",
"self",
".",
"num_atoms",
")",
":",
"return",
"0.0",
"vec_2_1",
"=",
"self",
".",
"displ_single",
"(",
"g_num",
",",
"at_2",
",",
"at_1",
")",
"vec_2_3",
"=",
"self",
".",
"displ_single",
"(",
"g_num",
",",
"at_2",
",",
"at_3",
")",
"angle",
"=",
"vec_angle",
"(",
"vec_2_1",
",",
"vec_2_3",
")",
"return",
"angle"
] | Spanning angle among three atoms.
The indices `at_1` and `at_3` can be the same (yielding a
trivial zero angle), but `at_2` must be different from
both `at_1` and `at_3`.
Parameters
----------
g_num
|int| --
Index of the desired geometry
at_1
|int| --
Index of the first atom
at_2
|int| --
Index of the second atom
at_3
|int| --
Index of the third atom
Returns
-------
angle
|npfloat_| --
Spanning angle in degrees between `at_1`-`at_2`-`at_3`, from
geometry `g_num`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided
~exceptions.ValueError
If `at_2` is equal to either `at_1` or `at_3` | [
"Spanning",
"angle",
"among",
"three",
"atoms",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L919-L1010 | train |
bskinn/opan | opan/xyz.py | OpanXYZ.angle_iter | def angle_iter(self, g_nums, ats_1, ats_2, ats_3, invalid_error=False):
""" Iterator over selected atomic angles.
Angles are in degrees as with :meth:`angle_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or iterable |int| or |None| --
Index of the desired geometry
ats_1
|int| or iterable |int| or |None| --
Index of the first atom
ats_2
|int| or iterable |int| or |None| --
Index of the second atom
ats_3
|int| or iterable |int| or |None| --
Index of the third atom
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
angle
|npfloat_| --
Spanning angles in degrees between corresponding |br|
`ats_1`-`ats_2`-`ats_3`, from geometry/geometries `g_nums`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
~exceptions.ValueError
If any `ats_2` element is equal to either the corresponding `ats_1`
or `ats_3` element.
"""
# Suitability of ats_n indices will be checked within the
# self.angle_single() calls and thus no check is needed here.
# Import the tuple-generating function
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
print("ats_3 = {0}".format(ats_3))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2, ats_3)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples.
for tup in tups:
if _DEBUG: # pragma: no cover
print(tup)
## end if
yield self._iter_return(tup, self.angle_single, invalid_error) | python | def angle_iter(self, g_nums, ats_1, ats_2, ats_3, invalid_error=False):
""" Iterator over selected atomic angles.
Angles are in degrees as with :meth:`angle_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or iterable |int| or |None| --
Index of the desired geometry
ats_1
|int| or iterable |int| or |None| --
Index of the first atom
ats_2
|int| or iterable |int| or |None| --
Index of the second atom
ats_3
|int| or iterable |int| or |None| --
Index of the third atom
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
angle
|npfloat_| --
Spanning angles in degrees between corresponding |br|
`ats_1`-`ats_2`-`ats_3`, from geometry/geometries `g_nums`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
~exceptions.ValueError
If any `ats_2` element is equal to either the corresponding `ats_1`
or `ats_3` element.
"""
# Suitability of ats_n indices will be checked within the
# self.angle_single() calls and thus no check is needed here.
# Import the tuple-generating function
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
print("ats_3 = {0}".format(ats_3))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2, ats_3)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples.
for tup in tups:
if _DEBUG: # pragma: no cover
print(tup)
## end if
yield self._iter_return(tup, self.angle_single, invalid_error) | [
"def",
"angle_iter",
"(",
"self",
",",
"g_nums",
",",
"ats_1",
",",
"ats_2",
",",
"ats_3",
",",
"invalid_error",
"=",
"False",
")",
":",
"from",
".",
"utils",
"import",
"pack_tups",
"if",
"_DEBUG",
":",
"print",
"(",
"\"g_nums = {0}\"",
".",
"format",
"(",
"g_nums",
")",
")",
"print",
"(",
"\"ats_1 = {0}\"",
".",
"format",
"(",
"ats_1",
")",
")",
"print",
"(",
"\"ats_2 = {0}\"",
".",
"format",
"(",
"ats_2",
")",
")",
"print",
"(",
"\"ats_3 = {0}\"",
".",
"format",
"(",
"ats_3",
")",
")",
"arglist",
"=",
"self",
".",
"_none_subst",
"(",
"g_nums",
",",
"ats_1",
",",
"ats_2",
",",
"ats_3",
")",
"tups",
"=",
"pack_tups",
"(",
"*",
"arglist",
")",
"if",
"_DEBUG",
":",
"print",
"(",
"tups",
")",
"for",
"tup",
"in",
"tups",
":",
"if",
"_DEBUG",
":",
"print",
"(",
"tup",
")",
"yield",
"self",
".",
"_iter_return",
"(",
"tup",
",",
"self",
".",
"angle_single",
",",
"invalid_error",
")"
] | Iterator over selected atomic angles.
Angles are in degrees as with :meth:`angle_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or iterable |int| or |None| --
Index of the desired geometry
ats_1
|int| or iterable |int| or |None| --
Index of the first atom
ats_2
|int| or iterable |int| or |None| --
Index of the second atom
ats_3
|int| or iterable |int| or |None| --
Index of the third atom
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
angle
|npfloat_| --
Spanning angles in degrees between corresponding |br|
`ats_1`-`ats_2`-`ats_3`, from geometry/geometries `g_nums`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
~exceptions.ValueError
If any `ats_2` element is equal to either the corresponding `ats_1`
or `ats_3` element. | [
"Iterator",
"over",
"selected",
"atomic",
"angles",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1015-L1098 | train |
bskinn/opan | opan/xyz.py | OpanXYZ.dihed_iter | def dihed_iter(self, g_nums, ats_1, ats_2, ats_3, ats_4, \
invalid_error=False):
""" Iterator over selected dihedral angles.
Angles are in degrees as with :meth:`dihed_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or iterable |int| or |None| --
Indices of the desired geometry
ats_1
|int| or iterable |int| or |None| --
Indices of the first atoms
ats_2
|int| or iterable |int| or |None| --
Indices of the second atoms
ats_3
|int| or iterable |int| or |None| --
Indices of the third atoms
ats_4
|int| or iterable |int| or |None| --
Indices of the fourth atoms
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dihed
|npfloat_| --
Out-of-plane/dihedral angles in degrees for the indicated
atom sets `ats_1`-`ats_2`-`ats_3`-`ats_4`, drawn from
the respective `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
~exceptions.ValueError
If any corresponding `ats_#` indices are equal.
~opan.error.XYZError
(typecode :data:`~opan.error.XYZError.DIHED`) If either
of the atom trios (1-2-3 or
2-3-4) is too close to linearity for any group of `ats_#`
"""
# Suitability of ats_n indices will be checked within the
# self.dihed_single() calls and thus no check is needed here.
# Import the tuple-generating function
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
print("ats_3 = {0}".format(ats_3))
print("ats_4 = {0}".format(ats_4))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2, ats_3, ats_4)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples.
for tup in tups:
yield self._iter_return(tup, self.dihed_single, invalid_error) | python | def dihed_iter(self, g_nums, ats_1, ats_2, ats_3, ats_4, \
invalid_error=False):
""" Iterator over selected dihedral angles.
Angles are in degrees as with :meth:`dihed_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or iterable |int| or |None| --
Indices of the desired geometry
ats_1
|int| or iterable |int| or |None| --
Indices of the first atoms
ats_2
|int| or iterable |int| or |None| --
Indices of the second atoms
ats_3
|int| or iterable |int| or |None| --
Indices of the third atoms
ats_4
|int| or iterable |int| or |None| --
Indices of the fourth atoms
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dihed
|npfloat_| --
Out-of-plane/dihedral angles in degrees for the indicated
atom sets `ats_1`-`ats_2`-`ats_3`-`ats_4`, drawn from
the respective `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
~exceptions.ValueError
If any corresponding `ats_#` indices are equal.
~opan.error.XYZError
(typecode :data:`~opan.error.XYZError.DIHED`) If either
of the atom trios (1-2-3 or
2-3-4) is too close to linearity for any group of `ats_#`
"""
# Suitability of ats_n indices will be checked within the
# self.dihed_single() calls and thus no check is needed here.
# Import the tuple-generating function
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
print("ats_3 = {0}".format(ats_3))
print("ats_4 = {0}".format(ats_4))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2, ats_3, ats_4)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples.
for tup in tups:
yield self._iter_return(tup, self.dihed_single, invalid_error) | [
"def",
"dihed_iter",
"(",
"self",
",",
"g_nums",
",",
"ats_1",
",",
"ats_2",
",",
"ats_3",
",",
"ats_4",
",",
"invalid_error",
"=",
"False",
")",
":",
"from",
".",
"utils",
"import",
"pack_tups",
"if",
"_DEBUG",
":",
"print",
"(",
"\"g_nums = {0}\"",
".",
"format",
"(",
"g_nums",
")",
")",
"print",
"(",
"\"ats_1 = {0}\"",
".",
"format",
"(",
"ats_1",
")",
")",
"print",
"(",
"\"ats_2 = {0}\"",
".",
"format",
"(",
"ats_2",
")",
")",
"print",
"(",
"\"ats_3 = {0}\"",
".",
"format",
"(",
"ats_3",
")",
")",
"print",
"(",
"\"ats_4 = {0}\"",
".",
"format",
"(",
"ats_4",
")",
")",
"arglist",
"=",
"self",
".",
"_none_subst",
"(",
"g_nums",
",",
"ats_1",
",",
"ats_2",
",",
"ats_3",
",",
"ats_4",
")",
"tups",
"=",
"pack_tups",
"(",
"*",
"arglist",
")",
"if",
"_DEBUG",
":",
"print",
"(",
"tups",
")",
"for",
"tup",
"in",
"tups",
":",
"yield",
"self",
".",
"_iter_return",
"(",
"tup",
",",
"self",
".",
"dihed_single",
",",
"invalid_error",
")"
] | Iterator over selected dihedral angles.
Angles are in degrees as with :meth:`dihed_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or iterable |int| or |None| --
Indices of the desired geometry
ats_1
|int| or iterable |int| or |None| --
Indices of the first atoms
ats_2
|int| or iterable |int| or |None| --
Indices of the second atoms
ats_3
|int| or iterable |int| or |None| --
Indices of the third atoms
ats_4
|int| or iterable |int| or |None| --
Indices of the fourth atoms
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dihed
|npfloat_| --
Out-of-plane/dihedral angles in degrees for the indicated
atom sets `ats_1`-`ats_2`-`ats_3`-`ats_4`, drawn from
the respective `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
~exceptions.ValueError
If any corresponding `ats_#` indices are equal.
~opan.error.XYZError
(typecode :data:`~opan.error.XYZError.DIHED`) If either
of the atom trios (1-2-3 or
2-3-4) is too close to linearity for any group of `ats_#` | [
"Iterator",
"over",
"selected",
"dihedral",
"angles",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1293-L1384 | train |
bskinn/opan | opan/xyz.py | OpanXYZ.displ_single | def displ_single(self, g_num, at_1, at_2):
""" Displacement vector between two atoms.
Returns the displacement vector pointing from `at_1`
toward `at_2` from geometry `g_num`.
If `at_1` == `at_2` a strict zero vector is returned.
Displacement vector is returned in units of Bohrs.
Parameters
----------
g_num
|int| -- Index of the desired geometry
at_1
|int| -- Index of the first atom
at_2
|int| -- Index of the second atom
Returns
-------
displ
length-3 |npfloat_| --
Displacement vector from `at_1` to `at_2`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided
"""
# Library imports
import numpy as np
from .utils import safe_cast as scast
# The below errors are explicitly thrown since they are multiplied by
# three when they are used as an index and thus give non-intuitive
# errors.
# Complain if at_1 is invalid
if not (-self.num_atoms <= at_1 < self.num_atoms):
raise IndexError("Invalid index for 'at_1' ({0})".format(at_1))
# Complain if at_2 is invalid
if not (-self.num_atoms <= at_2 < self.num_atoms):
raise IndexError("Invalid index for 'at_2' ({0})".format(at_2))
# Should never be necessary (save for badly erroneous calling code),
# but coerce at_1 and at_2 to their floor() values. This is again
# needed since they are multiplied by three in the index expresssions
# below, and can cause funny behavior when truncated by the indexing
at_1 = scast(np.floor(at_1), np.int_)
at_2 = scast(np.floor(at_2), np.int_)
# If the atom indices are the same, return trivial zero vector
if (at_1 % self.num_atoms) == (at_2 % self.num_atoms):
return np.array([0.0, 0.0, 0.0])
## end if
# Retrieve the geometry; np.float_ type should be retained
g = self.geom_single(g_num)
# Calculate the displacement vector and return
displ = np.array([ g[i + 3*at_2] - g[i + 3*at_1] for i in range(3) ])
# Return the displacement vector
return displ | python | def displ_single(self, g_num, at_1, at_2):
""" Displacement vector between two atoms.
Returns the displacement vector pointing from `at_1`
toward `at_2` from geometry `g_num`.
If `at_1` == `at_2` a strict zero vector is returned.
Displacement vector is returned in units of Bohrs.
Parameters
----------
g_num
|int| -- Index of the desired geometry
at_1
|int| -- Index of the first atom
at_2
|int| -- Index of the second atom
Returns
-------
displ
length-3 |npfloat_| --
Displacement vector from `at_1` to `at_2`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided
"""
# Library imports
import numpy as np
from .utils import safe_cast as scast
# The below errors are explicitly thrown since they are multiplied by
# three when they are used as an index and thus give non-intuitive
# errors.
# Complain if at_1 is invalid
if not (-self.num_atoms <= at_1 < self.num_atoms):
raise IndexError("Invalid index for 'at_1' ({0})".format(at_1))
# Complain if at_2 is invalid
if not (-self.num_atoms <= at_2 < self.num_atoms):
raise IndexError("Invalid index for 'at_2' ({0})".format(at_2))
# Should never be necessary (save for badly erroneous calling code),
# but coerce at_1 and at_2 to their floor() values. This is again
# needed since they are multiplied by three in the index expresssions
# below, and can cause funny behavior when truncated by the indexing
at_1 = scast(np.floor(at_1), np.int_)
at_2 = scast(np.floor(at_2), np.int_)
# If the atom indices are the same, return trivial zero vector
if (at_1 % self.num_atoms) == (at_2 % self.num_atoms):
return np.array([0.0, 0.0, 0.0])
## end if
# Retrieve the geometry; np.float_ type should be retained
g = self.geom_single(g_num)
# Calculate the displacement vector and return
displ = np.array([ g[i + 3*at_2] - g[i + 3*at_1] for i in range(3) ])
# Return the displacement vector
return displ | [
"def",
"displ_single",
"(",
"self",
",",
"g_num",
",",
"at_1",
",",
"at_2",
")",
":",
"import",
"numpy",
"as",
"np",
"from",
".",
"utils",
"import",
"safe_cast",
"as",
"scast",
"if",
"not",
"(",
"-",
"self",
".",
"num_atoms",
"<=",
"at_1",
"<",
"self",
".",
"num_atoms",
")",
":",
"raise",
"IndexError",
"(",
"\"Invalid index for 'at_1' ({0})\"",
".",
"format",
"(",
"at_1",
")",
")",
"if",
"not",
"(",
"-",
"self",
".",
"num_atoms",
"<=",
"at_2",
"<",
"self",
".",
"num_atoms",
")",
":",
"raise",
"IndexError",
"(",
"\"Invalid index for 'at_2' ({0})\"",
".",
"format",
"(",
"at_2",
")",
")",
"at_1",
"=",
"scast",
"(",
"np",
".",
"floor",
"(",
"at_1",
")",
",",
"np",
".",
"int_",
")",
"at_2",
"=",
"scast",
"(",
"np",
".",
"floor",
"(",
"at_2",
")",
",",
"np",
".",
"int_",
")",
"if",
"(",
"at_1",
"%",
"self",
".",
"num_atoms",
")",
"==",
"(",
"at_2",
"%",
"self",
".",
"num_atoms",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
")",
"g",
"=",
"self",
".",
"geom_single",
"(",
"g_num",
")",
"displ",
"=",
"np",
".",
"array",
"(",
"[",
"g",
"[",
"i",
"+",
"3",
"*",
"at_2",
"]",
"-",
"g",
"[",
"i",
"+",
"3",
"*",
"at_1",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
"]",
")",
"return",
"displ"
] | Displacement vector between two atoms.
Returns the displacement vector pointing from `at_1`
toward `at_2` from geometry `g_num`.
If `at_1` == `at_2` a strict zero vector is returned.
Displacement vector is returned in units of Bohrs.
Parameters
----------
g_num
|int| -- Index of the desired geometry
at_1
|int| -- Index of the first atom
at_2
|int| -- Index of the second atom
Returns
-------
displ
length-3 |npfloat_| --
Displacement vector from `at_1` to `at_2`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided | [
"Displacement",
"vector",
"between",
"two",
"atoms",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1390-L1457 | train |
bskinn/opan | opan/xyz.py | OpanXYZ.displ_iter | def displ_iter(self, g_nums, ats_1, ats_2, invalid_error=False):
""" Iterator over indicated displacement vectors.
Displacements are in Bohrs as with :meth:`displ_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or length-R iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or length-R iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
displ
|npfloat_| --
Displacement vector in Bohrs between each atom pair of |br|
`ats_1` :math:`\\rightarrow` `ats_2` from the corresponding
geometries of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
"""
# Import the tuple-generating function
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples.
for tup in tups:
yield self._iter_return(tup, self.displ_single, invalid_error) | python | def displ_iter(self, g_nums, ats_1, ats_2, invalid_error=False):
""" Iterator over indicated displacement vectors.
Displacements are in Bohrs as with :meth:`displ_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or length-R iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or length-R iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
displ
|npfloat_| --
Displacement vector in Bohrs between each atom pair of |br|
`ats_1` :math:`\\rightarrow` `ats_2` from the corresponding
geometries of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
"""
# Import the tuple-generating function
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples.
for tup in tups:
yield self._iter_return(tup, self.displ_single, invalid_error) | [
"def",
"displ_iter",
"(",
"self",
",",
"g_nums",
",",
"ats_1",
",",
"ats_2",
",",
"invalid_error",
"=",
"False",
")",
":",
"from",
".",
"utils",
"import",
"pack_tups",
"if",
"_DEBUG",
":",
"print",
"(",
"\"g_nums = {0}\"",
".",
"format",
"(",
"g_nums",
")",
")",
"print",
"(",
"\"ats_1 = {0}\"",
".",
"format",
"(",
"ats_1",
")",
")",
"print",
"(",
"\"ats_2 = {0}\"",
".",
"format",
"(",
"ats_2",
")",
")",
"arglist",
"=",
"self",
".",
"_none_subst",
"(",
"g_nums",
",",
"ats_1",
",",
"ats_2",
")",
"tups",
"=",
"pack_tups",
"(",
"*",
"arglist",
")",
"if",
"_DEBUG",
":",
"print",
"(",
"tups",
")",
"for",
"tup",
"in",
"tups",
":",
"yield",
"self",
".",
"_iter_return",
"(",
"tup",
",",
"self",
".",
"displ_single",
",",
"invalid_error",
")"
] | Iterator over indicated displacement vectors.
Displacements are in Bohrs as with :meth:`displ_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or length-R iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or length-R iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
displ
|npfloat_| --
Displacement vector in Bohrs between each atom pair of |br|
`ats_1` :math:`\\rightarrow` `ats_2` from the corresponding
geometries of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length. | [
"Iterator",
"over",
"indicated",
"displacement",
"vectors",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1462-L1531 | train |
bskinn/opan | opan/xyz.py | OpanXYZ._none_subst | def _none_subst(self, *args):
""" Helper function to insert full ranges for |None| for X_iter methods.
Custom method, specifically tailored, taking in the arguments from
an X_iter method and performing the replacement of |None| after
error-checking the arguments for a max of one |None| value, and ensuring
that if a |None| is present, no other non-|str| iterables are present.
Parameters
----------
args : 3-5 arguments of |int| or iterable |int|, or |None|
First argument is always the indices for the geometries; all
following are for the atoms in sequence as required for the
particular :samp:`{x}_iter` method
Returns
-------
arglist : 3-5 arguments, matching input params
Argument list, with |None| substituted if validly present
Raises
------
~exceptions.ValueError : If more than one |None| argument is present
~exceptions.ValueError : If an arg is non-|str| iterable when one
|None| is present
"""
# Imports
import numpy as np
# Initialize argument list return value, and as None not found
arglist = [a for a in args]
none_found = False
# Check for None values
none_vals = list(map(lambda e: isinstance(e, type(None)), arglist))
# Error if more than one None; handle if exactly one; pass through if
# none.
if np.count_nonzero(none_vals) > 1:
raise ValueError(
"Multiple 'None' values [indices {0}] not supported"
.format(tuple(np.nonzero(none_vals)[0])))
elif np.count_nonzero(none_vals) == 1:
# Must be no iterables that are not strings. Thus, an element-wise
# test for iterability and an element-wise test for stringiness
# must give matching arrays
if not all(np.equal(list(map(np.iterable, arglist)),
list(map(lambda e: isinstance(e, str), arglist)))):
raise ValueError(
"'None' as parameter invalid with non-str iterables")
## end if
# Parameters okay; replace the None with the appropriate range()
none_found = True
none_loc = np.nonzero(none_vals)[0][0]
arglist[none_loc] = \
range(self.num_geoms if none_loc == 0 else self.num_atoms)
## end if
# Return the arguments list and the none-found value
return arglist | python | def _none_subst(self, *args):
""" Helper function to insert full ranges for |None| for X_iter methods.
Custom method, specifically tailored, taking in the arguments from
an X_iter method and performing the replacement of |None| after
error-checking the arguments for a max of one |None| value, and ensuring
that if a |None| is present, no other non-|str| iterables are present.
Parameters
----------
args : 3-5 arguments of |int| or iterable |int|, or |None|
First argument is always the indices for the geometries; all
following are for the atoms in sequence as required for the
particular :samp:`{x}_iter` method
Returns
-------
arglist : 3-5 arguments, matching input params
Argument list, with |None| substituted if validly present
Raises
------
~exceptions.ValueError : If more than one |None| argument is present
~exceptions.ValueError : If an arg is non-|str| iterable when one
|None| is present
"""
# Imports
import numpy as np
# Initialize argument list return value, and as None not found
arglist = [a for a in args]
none_found = False
# Check for None values
none_vals = list(map(lambda e: isinstance(e, type(None)), arglist))
# Error if more than one None; handle if exactly one; pass through if
# none.
if np.count_nonzero(none_vals) > 1:
raise ValueError(
"Multiple 'None' values [indices {0}] not supported"
.format(tuple(np.nonzero(none_vals)[0])))
elif np.count_nonzero(none_vals) == 1:
# Must be no iterables that are not strings. Thus, an element-wise
# test for iterability and an element-wise test for stringiness
# must give matching arrays
if not all(np.equal(list(map(np.iterable, arglist)),
list(map(lambda e: isinstance(e, str), arglist)))):
raise ValueError(
"'None' as parameter invalid with non-str iterables")
## end if
# Parameters okay; replace the None with the appropriate range()
none_found = True
none_loc = np.nonzero(none_vals)[0][0]
arglist[none_loc] = \
range(self.num_geoms if none_loc == 0 else self.num_atoms)
## end if
# Return the arguments list and the none-found value
return arglist | [
"def",
"_none_subst",
"(",
"self",
",",
"*",
"args",
")",
":",
"import",
"numpy",
"as",
"np",
"arglist",
"=",
"[",
"a",
"for",
"a",
"in",
"args",
"]",
"none_found",
"=",
"False",
"none_vals",
"=",
"list",
"(",
"map",
"(",
"lambda",
"e",
":",
"isinstance",
"(",
"e",
",",
"type",
"(",
"None",
")",
")",
",",
"arglist",
")",
")",
"if",
"np",
".",
"count_nonzero",
"(",
"none_vals",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Multiple 'None' values [indices {0}] not supported\"",
".",
"format",
"(",
"tuple",
"(",
"np",
".",
"nonzero",
"(",
"none_vals",
")",
"[",
"0",
"]",
")",
")",
")",
"elif",
"np",
".",
"count_nonzero",
"(",
"none_vals",
")",
"==",
"1",
":",
"if",
"not",
"all",
"(",
"np",
".",
"equal",
"(",
"list",
"(",
"map",
"(",
"np",
".",
"iterable",
",",
"arglist",
")",
")",
",",
"list",
"(",
"map",
"(",
"lambda",
"e",
":",
"isinstance",
"(",
"e",
",",
"str",
")",
",",
"arglist",
")",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"'None' as parameter invalid with non-str iterables\"",
")",
"none_found",
"=",
"True",
"none_loc",
"=",
"np",
".",
"nonzero",
"(",
"none_vals",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"arglist",
"[",
"none_loc",
"]",
"=",
"range",
"(",
"self",
".",
"num_geoms",
"if",
"none_loc",
"==",
"0",
"else",
"self",
".",
"num_atoms",
")",
"return",
"arglist"
] | Helper function to insert full ranges for |None| for X_iter methods.
Custom method, specifically tailored, taking in the arguments from
an X_iter method and performing the replacement of |None| after
error-checking the arguments for a max of one |None| value, and ensuring
that if a |None| is present, no other non-|str| iterables are present.
Parameters
----------
args : 3-5 arguments of |int| or iterable |int|, or |None|
First argument is always the indices for the geometries; all
following are for the atoms in sequence as required for the
particular :samp:`{x}_iter` method
Returns
-------
arglist : 3-5 arguments, matching input params
Argument list, with |None| substituted if validly present
Raises
------
~exceptions.ValueError : If more than one |None| argument is present
~exceptions.ValueError : If an arg is non-|str| iterable when one
|None| is present | [
"Helper",
"function",
"to",
"insert",
"full",
"ranges",
"for",
"|None|",
"for",
"X_iter",
"methods",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1537-L1599 | train |
vfaronov/turq | turq/util/http.py | guess_external_url | def guess_external_url(local_host, port):
"""Return a URL that is most likely to route to `local_host` from outside.
The point is that we may be running on a remote host from the user's
point of view, so they can't access `local_host` from a Web browser just
by typing ``http://localhost:12345/``.
"""
if local_host in ['0.0.0.0', '::']:
# The server is listening on all interfaces, but we have to pick one.
# The system's FQDN should give us a hint.
local_host = socket.getfqdn()
# https://github.com/vfaronov/turq/issues/9
match = IPV4_REVERSE_DNS.match(local_host)
if match:
local_host = '.'.join(reversed(match.groups()))
else:
match = IPV6_REVERSE_DNS.match(local_host)
if match:
address_as_int = int(''.join(reversed(match.groups())), 16)
local_host = str(IPv6Address(address_as_int))
if ':' in local_host:
# Looks like an IPv6 literal. Has to be wrapped in brackets in a URL.
# Also, an IPv6 address can have a zone ID tacked on the end,
# like "%3". RFC 6874 allows encoding them in URLs as well,
# but in my experiments on Windows 8.1, I had more success
# removing the zone ID altogether. After all this is just a guess.
local_host = '[%s]' % local_host.rsplit('%', 1)[0]
return 'http://%s:%d/' % (local_host, port) | python | def guess_external_url(local_host, port):
"""Return a URL that is most likely to route to `local_host` from outside.
The point is that we may be running on a remote host from the user's
point of view, so they can't access `local_host` from a Web browser just
by typing ``http://localhost:12345/``.
"""
if local_host in ['0.0.0.0', '::']:
# The server is listening on all interfaces, but we have to pick one.
# The system's FQDN should give us a hint.
local_host = socket.getfqdn()
# https://github.com/vfaronov/turq/issues/9
match = IPV4_REVERSE_DNS.match(local_host)
if match:
local_host = '.'.join(reversed(match.groups()))
else:
match = IPV6_REVERSE_DNS.match(local_host)
if match:
address_as_int = int(''.join(reversed(match.groups())), 16)
local_host = str(IPv6Address(address_as_int))
if ':' in local_host:
# Looks like an IPv6 literal. Has to be wrapped in brackets in a URL.
# Also, an IPv6 address can have a zone ID tacked on the end,
# like "%3". RFC 6874 allows encoding them in URLs as well,
# but in my experiments on Windows 8.1, I had more success
# removing the zone ID altogether. After all this is just a guess.
local_host = '[%s]' % local_host.rsplit('%', 1)[0]
return 'http://%s:%d/' % (local_host, port) | [
"def",
"guess_external_url",
"(",
"local_host",
",",
"port",
")",
":",
"if",
"local_host",
"in",
"[",
"'0.0.0.0'",
",",
"'::'",
"]",
":",
"local_host",
"=",
"socket",
".",
"getfqdn",
"(",
")",
"match",
"=",
"IPV4_REVERSE_DNS",
".",
"match",
"(",
"local_host",
")",
"if",
"match",
":",
"local_host",
"=",
"'.'",
".",
"join",
"(",
"reversed",
"(",
"match",
".",
"groups",
"(",
")",
")",
")",
"else",
":",
"match",
"=",
"IPV6_REVERSE_DNS",
".",
"match",
"(",
"local_host",
")",
"if",
"match",
":",
"address_as_int",
"=",
"int",
"(",
"''",
".",
"join",
"(",
"reversed",
"(",
"match",
".",
"groups",
"(",
")",
")",
")",
",",
"16",
")",
"local_host",
"=",
"str",
"(",
"IPv6Address",
"(",
"address_as_int",
")",
")",
"if",
"':'",
"in",
"local_host",
":",
"local_host",
"=",
"'[%s]'",
"%",
"local_host",
".",
"rsplit",
"(",
"'%'",
",",
"1",
")",
"[",
"0",
"]",
"return",
"'http://%s:%d/'",
"%",
"(",
"local_host",
",",
"port",
")"
] | Return a URL that is most likely to route to `local_host` from outside.
The point is that we may be running on a remote host from the user's
point of view, so they can't access `local_host` from a Web browser just
by typing ``http://localhost:12345/``. | [
"Return",
"a",
"URL",
"that",
"is",
"most",
"likely",
"to",
"route",
"to",
"local_host",
"from",
"outside",
"."
] | 3ef1261442b90d6d947b8fe2362e19e7f47a64c3 | https://github.com/vfaronov/turq/blob/3ef1261442b90d6d947b8fe2362e19e7f47a64c3/turq/util/http.py#L49-L79 | train |
openvax/isovar | isovar/dataframe_builder.py | DataFrameBuilder._check_column_lengths | def _check_column_lengths(self):
"""
Make sure columns are of the same length or else DataFrame construction
will fail.
"""
column_lengths_dict = {
name: len(xs)
for (name, xs)
in self.columns_dict.items()
}
unique_column_lengths = set(column_lengths_dict.values())
if len(unique_column_lengths) != 1:
raise ValueError(
"Mismatch between lengths of columns: %s" % (column_lengths_dict,)) | python | def _check_column_lengths(self):
"""
Make sure columns are of the same length or else DataFrame construction
will fail.
"""
column_lengths_dict = {
name: len(xs)
for (name, xs)
in self.columns_dict.items()
}
unique_column_lengths = set(column_lengths_dict.values())
if len(unique_column_lengths) != 1:
raise ValueError(
"Mismatch between lengths of columns: %s" % (column_lengths_dict,)) | [
"def",
"_check_column_lengths",
"(",
"self",
")",
":",
"column_lengths_dict",
"=",
"{",
"name",
":",
"len",
"(",
"xs",
")",
"for",
"(",
"name",
",",
"xs",
")",
"in",
"self",
".",
"columns_dict",
".",
"items",
"(",
")",
"}",
"unique_column_lengths",
"=",
"set",
"(",
"column_lengths_dict",
".",
"values",
"(",
")",
")",
"if",
"len",
"(",
"unique_column_lengths",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Mismatch between lengths of columns: %s\"",
"%",
"(",
"column_lengths_dict",
",",
")",
")"
] | Make sure columns are of the same length or else DataFrame construction
will fail. | [
"Make",
"sure",
"columns",
"are",
"of",
"the",
"same",
"length",
"or",
"else",
"DataFrame",
"construction",
"will",
"fail",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/dataframe_builder.py#L169-L182 | train |
bskinn/opan | opan/vpt2/base.py | OpanVPT2.new_from_files | def new_from_files(self, basepath, basename, repo, \
bohrs=False, \
software=_E_SW.ORCA, \
repo_clobber=False, **kwargs):
""" Initialize with data from files.
"""
# Imports
import os
from os import path as osp
from ..xyz import OpanXYZ as OX
from ..grad import OrcaEngrad as OE
from ..hess import OrcaHess as OH
from .repo import OpanAnharmRepo as OR
from ..const import EnumDispDirection as E_DDir, EnumFileType as E_FT
from ..const import EnumSoftware as E_SW
from ..const import DEF
from ..error import AnharmError as ANHErr
## # Store working directory for restore?
## prev_dir = os.getcwd()
# Complain if anything is already bound
if not self.w_xyz == None:
raise ANHErr(ANHErr.STATUS,
"XYZ object is already bound",
"")
## end if
if not self.w_grad == None:
raise ANHErr(ANHErr.STATUS,
"GRAD object is already bound",
"")
## end if
if not self.w_hess == None:
raise ANHErr(ANHErr.STATUS,
"HESS object is already bound",
"")
## end if
if not self.repo == None:
raise ANHErr(ANHErr.STATUS,
"Repository object is already bound",
"")
## end if
# RESUME: vpt2--factor for loading from different software pkgs
# Load the three data files
self.w_xyz = OX( osp.join(basepath, \
basename + osp.extsep + xyz_ext) )
self.w_grad = OE( osp.join(basepath, \
basename + osp.extsep + engrad_ext), \
0, E_DDir.NO_DISP, 0.0 )
self.w_hess = OH( osp.join(basepath, \
basename + osp.extsep + hess_ext), \
0, E_DDir.NO_DISP, 0.0 )
# Only accept new repos for now
if not isinstance(repo, str):
raise TypeError("Must create new repository when loading " +
"a new dataset.")
## end if
# Repo is string, treat as filename and try to load
# Check if it's a complete path
# If it's a relative path, prepend the basepath
if osp.split(repo[0]) > 0 and not osp.isabs(repo):
repo = osp.join(basepath, repo)
## end if
# Complain if it's a directory
if osp.isdir(repo):
raise IOError("Cannot bind repository -- specified " +
"location is a directory")
## end if
# If file exists ...
if osp.isfile(repo):
# Depending on clobber, either delete existing or raise error
if repo_clobber:
# Clobber old repo
os.remove(repo)
else:
# Raise error
raise IOError("Target repository file exists and " +
"clobber is disabled.")
## end if
## end if
# Should be good to create the repo
self.repo = OR(repo) | python | def new_from_files(self, basepath, basename, repo, \
bohrs=False, \
software=_E_SW.ORCA, \
repo_clobber=False, **kwargs):
""" Initialize with data from files.
"""
# Imports
import os
from os import path as osp
from ..xyz import OpanXYZ as OX
from ..grad import OrcaEngrad as OE
from ..hess import OrcaHess as OH
from .repo import OpanAnharmRepo as OR
from ..const import EnumDispDirection as E_DDir, EnumFileType as E_FT
from ..const import EnumSoftware as E_SW
from ..const import DEF
from ..error import AnharmError as ANHErr
## # Store working directory for restore?
## prev_dir = os.getcwd()
# Complain if anything is already bound
if not self.w_xyz == None:
raise ANHErr(ANHErr.STATUS,
"XYZ object is already bound",
"")
## end if
if not self.w_grad == None:
raise ANHErr(ANHErr.STATUS,
"GRAD object is already bound",
"")
## end if
if not self.w_hess == None:
raise ANHErr(ANHErr.STATUS,
"HESS object is already bound",
"")
## end if
if not self.repo == None:
raise ANHErr(ANHErr.STATUS,
"Repository object is already bound",
"")
## end if
# RESUME: vpt2--factor for loading from different software pkgs
# Load the three data files
self.w_xyz = OX( osp.join(basepath, \
basename + osp.extsep + xyz_ext) )
self.w_grad = OE( osp.join(basepath, \
basename + osp.extsep + engrad_ext), \
0, E_DDir.NO_DISP, 0.0 )
self.w_hess = OH( osp.join(basepath, \
basename + osp.extsep + hess_ext), \
0, E_DDir.NO_DISP, 0.0 )
# Only accept new repos for now
if not isinstance(repo, str):
raise TypeError("Must create new repository when loading " +
"a new dataset.")
## end if
# Repo is string, treat as filename and try to load
# Check if it's a complete path
# If it's a relative path, prepend the basepath
if osp.split(repo[0]) > 0 and not osp.isabs(repo):
repo = osp.join(basepath, repo)
## end if
# Complain if it's a directory
if osp.isdir(repo):
raise IOError("Cannot bind repository -- specified " +
"location is a directory")
## end if
# If file exists ...
if osp.isfile(repo):
# Depending on clobber, either delete existing or raise error
if repo_clobber:
# Clobber old repo
os.remove(repo)
else:
# Raise error
raise IOError("Target repository file exists and " +
"clobber is disabled.")
## end if
## end if
# Should be good to create the repo
self.repo = OR(repo) | [
"def",
"new_from_files",
"(",
"self",
",",
"basepath",
",",
"basename",
",",
"repo",
",",
"bohrs",
"=",
"False",
",",
"software",
"=",
"_E_SW",
".",
"ORCA",
",",
"repo_clobber",
"=",
"False",
",",
"**",
"kwargs",
")",
":",
"import",
"os",
"from",
"os",
"import",
"path",
"as",
"osp",
"from",
".",
".",
"xyz",
"import",
"OpanXYZ",
"as",
"OX",
"from",
".",
".",
"grad",
"import",
"OrcaEngrad",
"as",
"OE",
"from",
".",
".",
"hess",
"import",
"OrcaHess",
"as",
"OH",
"from",
".",
"repo",
"import",
"OpanAnharmRepo",
"as",
"OR",
"from",
".",
".",
"const",
"import",
"EnumDispDirection",
"as",
"E_DDir",
",",
"EnumFileType",
"as",
"E_FT",
"from",
".",
".",
"const",
"import",
"EnumSoftware",
"as",
"E_SW",
"from",
".",
".",
"const",
"import",
"DEF",
"from",
".",
".",
"error",
"import",
"AnharmError",
"as",
"ANHErr",
"if",
"not",
"self",
".",
"w_xyz",
"==",
"None",
":",
"raise",
"ANHErr",
"(",
"ANHErr",
".",
"STATUS",
",",
"\"XYZ object is already bound\"",
",",
"\"\"",
")",
"if",
"not",
"self",
".",
"w_grad",
"==",
"None",
":",
"raise",
"ANHErr",
"(",
"ANHErr",
".",
"STATUS",
",",
"\"GRAD object is already bound\"",
",",
"\"\"",
")",
"if",
"not",
"self",
".",
"w_hess",
"==",
"None",
":",
"raise",
"ANHErr",
"(",
"ANHErr",
".",
"STATUS",
",",
"\"HESS object is already bound\"",
",",
"\"\"",
")",
"if",
"not",
"self",
".",
"repo",
"==",
"None",
":",
"raise",
"ANHErr",
"(",
"ANHErr",
".",
"STATUS",
",",
"\"Repository object is already bound\"",
",",
"\"\"",
")",
"self",
".",
"w_xyz",
"=",
"OX",
"(",
"osp",
".",
"join",
"(",
"basepath",
",",
"basename",
"+",
"osp",
".",
"extsep",
"+",
"xyz_ext",
")",
")",
"self",
".",
"w_grad",
"=",
"OE",
"(",
"osp",
".",
"join",
"(",
"basepath",
",",
"basename",
"+",
"osp",
".",
"extsep",
"+",
"engrad_ext",
")",
",",
"0",
",",
"E_DDir",
".",
"NO_DISP",
",",
"0.0",
")",
"self",
".",
"w_hess",
"=",
"OH",
"(",
"osp",
".",
"join",
"(",
"basepath",
",",
"basename",
"+",
"osp",
".",
"extsep",
"+",
"hess_ext",
")",
",",
"0",
",",
"E_DDir",
".",
"NO_DISP",
",",
"0.0",
")",
"if",
"not",
"isinstance",
"(",
"repo",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Must create new repository when loading \"",
"+",
"\"a new dataset.\"",
")",
"if",
"osp",
".",
"split",
"(",
"repo",
"[",
"0",
"]",
")",
">",
"0",
"and",
"not",
"osp",
".",
"isabs",
"(",
"repo",
")",
":",
"repo",
"=",
"osp",
".",
"join",
"(",
"basepath",
",",
"repo",
")",
"if",
"osp",
".",
"isdir",
"(",
"repo",
")",
":",
"raise",
"IOError",
"(",
"\"Cannot bind repository -- specified \"",
"+",
"\"location is a directory\"",
")",
"if",
"osp",
".",
"isfile",
"(",
"repo",
")",
":",
"if",
"repo_clobber",
":",
"os",
".",
"remove",
"(",
"repo",
")",
"else",
":",
"raise",
"IOError",
"(",
"\"Target repository file exists and \"",
"+",
"\"clobber is disabled.\"",
")",
"self",
".",
"repo",
"=",
"OR",
"(",
"repo",
")"
] | Initialize with data from files. | [
"Initialize",
"with",
"data",
"from",
"files",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/vpt2/base.py#L54-L144 | train |
daskos/mentor | mentor/utils.py | remote_exception | def remote_exception(exc, tb):
""" Metaclass that wraps exception type in RemoteException """
if type(exc) in exceptions:
typ = exceptions[type(exc)]
return typ(exc, tb)
else:
try:
typ = type(exc.__class__.__name__,
(RemoteException, type(exc)),
{'exception_type': type(exc)})
exceptions[type(exc)] = typ
return typ(exc, tb)
except TypeError:
return exc | python | def remote_exception(exc, tb):
""" Metaclass that wraps exception type in RemoteException """
if type(exc) in exceptions:
typ = exceptions[type(exc)]
return typ(exc, tb)
else:
try:
typ = type(exc.__class__.__name__,
(RemoteException, type(exc)),
{'exception_type': type(exc)})
exceptions[type(exc)] = typ
return typ(exc, tb)
except TypeError:
return exc | [
"def",
"remote_exception",
"(",
"exc",
",",
"tb",
")",
":",
"if",
"type",
"(",
"exc",
")",
"in",
"exceptions",
":",
"typ",
"=",
"exceptions",
"[",
"type",
"(",
"exc",
")",
"]",
"return",
"typ",
"(",
"exc",
",",
"tb",
")",
"else",
":",
"try",
":",
"typ",
"=",
"type",
"(",
"exc",
".",
"__class__",
".",
"__name__",
",",
"(",
"RemoteException",
",",
"type",
"(",
"exc",
")",
")",
",",
"{",
"'exception_type'",
":",
"type",
"(",
"exc",
")",
"}",
")",
"exceptions",
"[",
"type",
"(",
"exc",
")",
"]",
"=",
"typ",
"return",
"typ",
"(",
"exc",
",",
"tb",
")",
"except",
"TypeError",
":",
"return",
"exc"
] | Metaclass that wraps exception type in RemoteException | [
"Metaclass",
"that",
"wraps",
"exception",
"type",
"in",
"RemoteException"
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/utils.py#L61-L74 | train |
openvax/isovar | isovar/allele_reads.py | reads_overlapping_variants | def reads_overlapping_variants(variants, samfile, **kwargs):
"""
Generates sequence of tuples, each containing a variant paired with
a list of AlleleRead objects.
Parameters
----------
variants : varcode.VariantCollection
samfile : pysam.AlignmentFile
use_duplicate_reads : bool
Should we use reads that have been marked as PCR duplicates
use_secondary_alignments : bool
Should we use reads at locations other than their best alignment
min_mapping_quality : int
Drop reads below this mapping quality
"""
chromosome_names = set(samfile.references)
for variant in variants:
# I imagine the conversation went like this:
# A: "Hey, I have an awesome idea"
# B: "What's up?"
# A: "Let's make two nearly identical reference genomes"
# B: "But...that sounds like it might confuse people."
# A: "Nah, it's cool, we'll give the chromosomes different prefixes!"
# B: "OK, sounds like a good idea."
if variant.contig in chromosome_names:
chromosome = variant.contig
elif "chr" + variant.contig in chromosome_names:
chromosome = "chr" + variant.contig
else:
logger.warn(
"Chromosome '%s' from variant %s not in alignment file %s",
chromosome,
variant,
samfile.filename)
yield variant, []
continue
allele_reads = reads_overlapping_variant(
samfile=samfile,
chromosome=chromosome,
variant=variant,
**kwargs)
yield variant, allele_reads | python | def reads_overlapping_variants(variants, samfile, **kwargs):
"""
Generates sequence of tuples, each containing a variant paired with
a list of AlleleRead objects.
Parameters
----------
variants : varcode.VariantCollection
samfile : pysam.AlignmentFile
use_duplicate_reads : bool
Should we use reads that have been marked as PCR duplicates
use_secondary_alignments : bool
Should we use reads at locations other than their best alignment
min_mapping_quality : int
Drop reads below this mapping quality
"""
chromosome_names = set(samfile.references)
for variant in variants:
# I imagine the conversation went like this:
# A: "Hey, I have an awesome idea"
# B: "What's up?"
# A: "Let's make two nearly identical reference genomes"
# B: "But...that sounds like it might confuse people."
# A: "Nah, it's cool, we'll give the chromosomes different prefixes!"
# B: "OK, sounds like a good idea."
if variant.contig in chromosome_names:
chromosome = variant.contig
elif "chr" + variant.contig in chromosome_names:
chromosome = "chr" + variant.contig
else:
logger.warn(
"Chromosome '%s' from variant %s not in alignment file %s",
chromosome,
variant,
samfile.filename)
yield variant, []
continue
allele_reads = reads_overlapping_variant(
samfile=samfile,
chromosome=chromosome,
variant=variant,
**kwargs)
yield variant, allele_reads | [
"def",
"reads_overlapping_variants",
"(",
"variants",
",",
"samfile",
",",
"**",
"kwargs",
")",
":",
"chromosome_names",
"=",
"set",
"(",
"samfile",
".",
"references",
")",
"for",
"variant",
"in",
"variants",
":",
"if",
"variant",
".",
"contig",
"in",
"chromosome_names",
":",
"chromosome",
"=",
"variant",
".",
"contig",
"elif",
"\"chr\"",
"+",
"variant",
".",
"contig",
"in",
"chromosome_names",
":",
"chromosome",
"=",
"\"chr\"",
"+",
"variant",
".",
"contig",
"else",
":",
"logger",
".",
"warn",
"(",
"\"Chromosome '%s' from variant %s not in alignment file %s\"",
",",
"chromosome",
",",
"variant",
",",
"samfile",
".",
"filename",
")",
"yield",
"variant",
",",
"[",
"]",
"continue",
"allele_reads",
"=",
"reads_overlapping_variant",
"(",
"samfile",
"=",
"samfile",
",",
"chromosome",
"=",
"chromosome",
",",
"variant",
"=",
"variant",
",",
"**",
"kwargs",
")",
"yield",
"variant",
",",
"allele_reads"
] | Generates sequence of tuples, each containing a variant paired with
a list of AlleleRead objects.
Parameters
----------
variants : varcode.VariantCollection
samfile : pysam.AlignmentFile
use_duplicate_reads : bool
Should we use reads that have been marked as PCR duplicates
use_secondary_alignments : bool
Should we use reads at locations other than their best alignment
min_mapping_quality : int
Drop reads below this mapping quality | [
"Generates",
"sequence",
"of",
"tuples",
"each",
"containing",
"a",
"variant",
"paired",
"with",
"a",
"list",
"of",
"AlleleRead",
"objects",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/allele_reads.py#L234-L280 | train |
openvax/isovar | isovar/allele_reads.py | group_reads_by_allele | def group_reads_by_allele(allele_reads):
"""
Returns dictionary mapping each allele's nucleotide sequence to a list of
supporting AlleleRead objects.
"""
allele_to_reads_dict = defaultdict(list)
for allele_read in allele_reads:
allele_to_reads_dict[allele_read.allele].append(allele_read)
return allele_to_reads_dict | python | def group_reads_by_allele(allele_reads):
"""
Returns dictionary mapping each allele's nucleotide sequence to a list of
supporting AlleleRead objects.
"""
allele_to_reads_dict = defaultdict(list)
for allele_read in allele_reads:
allele_to_reads_dict[allele_read.allele].append(allele_read)
return allele_to_reads_dict | [
"def",
"group_reads_by_allele",
"(",
"allele_reads",
")",
":",
"allele_to_reads_dict",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"allele_read",
"in",
"allele_reads",
":",
"allele_to_reads_dict",
"[",
"allele_read",
".",
"allele",
"]",
".",
"append",
"(",
"allele_read",
")",
"return",
"allele_to_reads_dict"
] | Returns dictionary mapping each allele's nucleotide sequence to a list of
supporting AlleleRead objects. | [
"Returns",
"dictionary",
"mapping",
"each",
"allele",
"s",
"nucleotide",
"sequence",
"to",
"a",
"list",
"of",
"supporting",
"AlleleRead",
"objects",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/allele_reads.py#L283-L291 | train |
openvax/isovar | isovar/allele_reads.py | AlleleRead.from_locus_read | def from_locus_read(cls, locus_read, n_ref):
"""
Given a single LocusRead object, return either an AlleleRead or None
Parameters
----------
locus_read : LocusRead
Read which overlaps a variant locus but doesn't necessarily contain the
alternate nucleotides
n_ref : int
Number of reference positions we are expecting to be modified or
deleted (for insertions this should be 0)
"""
sequence = locus_read.sequence
reference_positions = locus_read.reference_positions
# positions of the nucleotides before and after the variant within
# the read sequence
read_pos_before = locus_read.base0_read_position_before_variant
read_pos_after = locus_read.base0_read_position_after_variant
# positions of the nucleotides before and after the variant on the
# reference genome
ref_pos_before = reference_positions[read_pos_before]
if ref_pos_before is None:
logger.warn(
"Missing reference pos for nucleotide before variant on read: %s",
locus_read)
return None
ref_pos_after = reference_positions[read_pos_after]
if ref_pos_after is None:
logger.warn(
"Missing reference pos for nucleotide after variant on read: %s",
locus_read)
return None
if n_ref == 0:
if ref_pos_after - ref_pos_before != 1:
# if the number of nucleotides skipped isn't the same
# as the number of reference nucleotides in the variant then
# don't use this read
logger.debug(
"Positions before (%d) and after (%d) variant should be adjacent on read %s",
ref_pos_before,
ref_pos_after,
locus_read)
return None
# insertions require a sequence of non-aligned bases
# followed by the subsequence reference position
ref_positions_for_inserted = reference_positions[
read_pos_before + 1:read_pos_after]
if any(insert_pos is not None for insert_pos in ref_positions_for_inserted):
# all these inserted nucleotides should *not* align to the
# reference
logger.debug(
"Skipping read, inserted nucleotides shouldn't map to reference")
return None
else:
# substitutions and deletions
if ref_pos_after - ref_pos_before != n_ref + 1:
# if the number of nucleotides skipped isn't the same
# as the number of reference nucleotides in the variant then
# don't use this read
logger.debug(
("Positions before (%d) and after (%d) variant should be "
"adjacent on read %s"),
ref_pos_before,
ref_pos_after,
locus_read)
return None
nucleotides_at_variant_locus = sequence[read_pos_before + 1:read_pos_after]
prefix = sequence[:read_pos_before + 1]
suffix = sequence[read_pos_after:]
prefix, suffix = convert_from_bytes_if_necessary(prefix, suffix)
prefix, suffix = trim_N_nucleotides(prefix, suffix)
return cls(
prefix,
nucleotides_at_variant_locus,
suffix,
name=locus_read.name) | python | def from_locus_read(cls, locus_read, n_ref):
"""
Given a single LocusRead object, return either an AlleleRead or None
Parameters
----------
locus_read : LocusRead
Read which overlaps a variant locus but doesn't necessarily contain the
alternate nucleotides
n_ref : int
Number of reference positions we are expecting to be modified or
deleted (for insertions this should be 0)
"""
sequence = locus_read.sequence
reference_positions = locus_read.reference_positions
# positions of the nucleotides before and after the variant within
# the read sequence
read_pos_before = locus_read.base0_read_position_before_variant
read_pos_after = locus_read.base0_read_position_after_variant
# positions of the nucleotides before and after the variant on the
# reference genome
ref_pos_before = reference_positions[read_pos_before]
if ref_pos_before is None:
logger.warn(
"Missing reference pos for nucleotide before variant on read: %s",
locus_read)
return None
ref_pos_after = reference_positions[read_pos_after]
if ref_pos_after is None:
logger.warn(
"Missing reference pos for nucleotide after variant on read: %s",
locus_read)
return None
if n_ref == 0:
if ref_pos_after - ref_pos_before != 1:
# if the number of nucleotides skipped isn't the same
# as the number of reference nucleotides in the variant then
# don't use this read
logger.debug(
"Positions before (%d) and after (%d) variant should be adjacent on read %s",
ref_pos_before,
ref_pos_after,
locus_read)
return None
# insertions require a sequence of non-aligned bases
# followed by the subsequence reference position
ref_positions_for_inserted = reference_positions[
read_pos_before + 1:read_pos_after]
if any(insert_pos is not None for insert_pos in ref_positions_for_inserted):
# all these inserted nucleotides should *not* align to the
# reference
logger.debug(
"Skipping read, inserted nucleotides shouldn't map to reference")
return None
else:
# substitutions and deletions
if ref_pos_after - ref_pos_before != n_ref + 1:
# if the number of nucleotides skipped isn't the same
# as the number of reference nucleotides in the variant then
# don't use this read
logger.debug(
("Positions before (%d) and after (%d) variant should be "
"adjacent on read %s"),
ref_pos_before,
ref_pos_after,
locus_read)
return None
nucleotides_at_variant_locus = sequence[read_pos_before + 1:read_pos_after]
prefix = sequence[:read_pos_before + 1]
suffix = sequence[read_pos_after:]
prefix, suffix = convert_from_bytes_if_necessary(prefix, suffix)
prefix, suffix = trim_N_nucleotides(prefix, suffix)
return cls(
prefix,
nucleotides_at_variant_locus,
suffix,
name=locus_read.name) | [
"def",
"from_locus_read",
"(",
"cls",
",",
"locus_read",
",",
"n_ref",
")",
":",
"sequence",
"=",
"locus_read",
".",
"sequence",
"reference_positions",
"=",
"locus_read",
".",
"reference_positions",
"read_pos_before",
"=",
"locus_read",
".",
"base0_read_position_before_variant",
"read_pos_after",
"=",
"locus_read",
".",
"base0_read_position_after_variant",
"ref_pos_before",
"=",
"reference_positions",
"[",
"read_pos_before",
"]",
"if",
"ref_pos_before",
"is",
"None",
":",
"logger",
".",
"warn",
"(",
"\"Missing reference pos for nucleotide before variant on read: %s\"",
",",
"locus_read",
")",
"return",
"None",
"ref_pos_after",
"=",
"reference_positions",
"[",
"read_pos_after",
"]",
"if",
"ref_pos_after",
"is",
"None",
":",
"logger",
".",
"warn",
"(",
"\"Missing reference pos for nucleotide after variant on read: %s\"",
",",
"locus_read",
")",
"return",
"None",
"if",
"n_ref",
"==",
"0",
":",
"if",
"ref_pos_after",
"-",
"ref_pos_before",
"!=",
"1",
":",
"logger",
".",
"debug",
"(",
"\"Positions before (%d) and after (%d) variant should be adjacent on read %s\"",
",",
"ref_pos_before",
",",
"ref_pos_after",
",",
"locus_read",
")",
"return",
"None",
"ref_positions_for_inserted",
"=",
"reference_positions",
"[",
"read_pos_before",
"+",
"1",
":",
"read_pos_after",
"]",
"if",
"any",
"(",
"insert_pos",
"is",
"not",
"None",
"for",
"insert_pos",
"in",
"ref_positions_for_inserted",
")",
":",
"logger",
".",
"debug",
"(",
"\"Skipping read, inserted nucleotides shouldn't map to reference\"",
")",
"return",
"None",
"else",
":",
"if",
"ref_pos_after",
"-",
"ref_pos_before",
"!=",
"n_ref",
"+",
"1",
":",
"logger",
".",
"debug",
"(",
"(",
"\"Positions before (%d) and after (%d) variant should be \"",
"\"adjacent on read %s\"",
")",
",",
"ref_pos_before",
",",
"ref_pos_after",
",",
"locus_read",
")",
"return",
"None",
"nucleotides_at_variant_locus",
"=",
"sequence",
"[",
"read_pos_before",
"+",
"1",
":",
"read_pos_after",
"]",
"prefix",
"=",
"sequence",
"[",
":",
"read_pos_before",
"+",
"1",
"]",
"suffix",
"=",
"sequence",
"[",
"read_pos_after",
":",
"]",
"prefix",
",",
"suffix",
"=",
"convert_from_bytes_if_necessary",
"(",
"prefix",
",",
"suffix",
")",
"prefix",
",",
"suffix",
"=",
"trim_N_nucleotides",
"(",
"prefix",
",",
"suffix",
")",
"return",
"cls",
"(",
"prefix",
",",
"nucleotides_at_variant_locus",
",",
"suffix",
",",
"name",
"=",
"locus_read",
".",
"name",
")"
] | Given a single LocusRead object, return either an AlleleRead or None
Parameters
----------
locus_read : LocusRead
Read which overlaps a variant locus but doesn't necessarily contain the
alternate nucleotides
n_ref : int
Number of reference positions we are expecting to be modified or
deleted (for insertions this should be 0) | [
"Given",
"a",
"single",
"LocusRead",
"object",
"return",
"either",
"an",
"AlleleRead",
"or",
"None"
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/allele_reads.py#L52-L140 | train |
openvax/isovar | isovar/nucleotide_counts.py | most_common_nucleotides | def most_common_nucleotides(partitioned_read_sequences):
"""
Find the most common nucleotide at each offset to the left and
right of a variant.
Parameters
----------
partitioned_read_sequences : list of tuples
Each tuple has three elements:
- sequence before mutant nucleotides
- mutant nucleotides
- sequence after mutant nucleotides
Returns a tuple with the following elements:
- nucleotide sequence from most common nucleotide at each offset
relative to the variant
- an array of counts indicating how many reads supported this nucleotide
- an array of counts for all the *other* nucleotides at that position
"""
counts, variant_column_indices = nucleotide_counts(
partitioned_read_sequences)
max_count_per_column = counts.max(axis=0)
assert len(max_count_per_column) == counts.shape[1]
max_nucleotide_index_per_column = np.argmax(counts, axis=0)
assert len(max_nucleotide_index_per_column) == counts.shape[1]
nucleotides = [
index_to_dna_nucleotide[idx]
for idx in max_nucleotide_index_per_column
]
other_nucleotide_counts = counts.sum(axis=0) - max_count_per_column
return "".join(nucleotides), max_count_per_column, other_nucleotide_counts | python | def most_common_nucleotides(partitioned_read_sequences):
"""
Find the most common nucleotide at each offset to the left and
right of a variant.
Parameters
----------
partitioned_read_sequences : list of tuples
Each tuple has three elements:
- sequence before mutant nucleotides
- mutant nucleotides
- sequence after mutant nucleotides
Returns a tuple with the following elements:
- nucleotide sequence from most common nucleotide at each offset
relative to the variant
- an array of counts indicating how many reads supported this nucleotide
- an array of counts for all the *other* nucleotides at that position
"""
counts, variant_column_indices = nucleotide_counts(
partitioned_read_sequences)
max_count_per_column = counts.max(axis=0)
assert len(max_count_per_column) == counts.shape[1]
max_nucleotide_index_per_column = np.argmax(counts, axis=0)
assert len(max_nucleotide_index_per_column) == counts.shape[1]
nucleotides = [
index_to_dna_nucleotide[idx]
for idx in max_nucleotide_index_per_column
]
other_nucleotide_counts = counts.sum(axis=0) - max_count_per_column
return "".join(nucleotides), max_count_per_column, other_nucleotide_counts | [
"def",
"most_common_nucleotides",
"(",
"partitioned_read_sequences",
")",
":",
"counts",
",",
"variant_column_indices",
"=",
"nucleotide_counts",
"(",
"partitioned_read_sequences",
")",
"max_count_per_column",
"=",
"counts",
".",
"max",
"(",
"axis",
"=",
"0",
")",
"assert",
"len",
"(",
"max_count_per_column",
")",
"==",
"counts",
".",
"shape",
"[",
"1",
"]",
"max_nucleotide_index_per_column",
"=",
"np",
".",
"argmax",
"(",
"counts",
",",
"axis",
"=",
"0",
")",
"assert",
"len",
"(",
"max_nucleotide_index_per_column",
")",
"==",
"counts",
".",
"shape",
"[",
"1",
"]",
"nucleotides",
"=",
"[",
"index_to_dna_nucleotide",
"[",
"idx",
"]",
"for",
"idx",
"in",
"max_nucleotide_index_per_column",
"]",
"other_nucleotide_counts",
"=",
"counts",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"-",
"max_count_per_column",
"return",
"\"\"",
".",
"join",
"(",
"nucleotides",
")",
",",
"max_count_per_column",
",",
"other_nucleotide_counts"
] | Find the most common nucleotide at each offset to the left and
right of a variant.
Parameters
----------
partitioned_read_sequences : list of tuples
Each tuple has three elements:
- sequence before mutant nucleotides
- mutant nucleotides
- sequence after mutant nucleotides
Returns a tuple with the following elements:
- nucleotide sequence from most common nucleotide at each offset
relative to the variant
- an array of counts indicating how many reads supported this nucleotide
- an array of counts for all the *other* nucleotides at that position | [
"Find",
"the",
"most",
"common",
"nucleotide",
"at",
"each",
"offset",
"to",
"the",
"left",
"and",
"right",
"of",
"a",
"variant",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/nucleotide_counts.py#L81-L112 | train |
bskinn/opan | opan/utils/symm.py | point_displ | def point_displ(pt1, pt2):
""" Calculate the displacement vector between two n-D points.
pt1 - pt2
.. todo:: Complete point_disp docstring
"""
#Imports
import numpy as np
# Make iterable
if not np.iterable(pt1):
pt1 = np.float64(np.array([pt1]))
else:
pt1 = np.float64(np.array(pt1).squeeze())
## end if
if not np.iterable(pt2):
pt2 = np.float64(np.array([pt2]))
else:
pt2 = np.float64(np.array(pt2).squeeze())
## end if
# Calculate the displacement vector and return
displ = np.matrix(np.subtract(pt2, pt1)).reshape(3,1)
return displ | python | def point_displ(pt1, pt2):
""" Calculate the displacement vector between two n-D points.
pt1 - pt2
.. todo:: Complete point_disp docstring
"""
#Imports
import numpy as np
# Make iterable
if not np.iterable(pt1):
pt1 = np.float64(np.array([pt1]))
else:
pt1 = np.float64(np.array(pt1).squeeze())
## end if
if not np.iterable(pt2):
pt2 = np.float64(np.array([pt2]))
else:
pt2 = np.float64(np.array(pt2).squeeze())
## end if
# Calculate the displacement vector and return
displ = np.matrix(np.subtract(pt2, pt1)).reshape(3,1)
return displ | [
"def",
"point_displ",
"(",
"pt1",
",",
"pt2",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"not",
"np",
".",
"iterable",
"(",
"pt1",
")",
":",
"pt1",
"=",
"np",
".",
"float64",
"(",
"np",
".",
"array",
"(",
"[",
"pt1",
"]",
")",
")",
"else",
":",
"pt1",
"=",
"np",
".",
"float64",
"(",
"np",
".",
"array",
"(",
"pt1",
")",
".",
"squeeze",
"(",
")",
")",
"if",
"not",
"np",
".",
"iterable",
"(",
"pt2",
")",
":",
"pt2",
"=",
"np",
".",
"float64",
"(",
"np",
".",
"array",
"(",
"[",
"pt2",
"]",
")",
")",
"else",
":",
"pt2",
"=",
"np",
".",
"float64",
"(",
"np",
".",
"array",
"(",
"pt2",
")",
".",
"squeeze",
"(",
")",
")",
"displ",
"=",
"np",
".",
"matrix",
"(",
"np",
".",
"subtract",
"(",
"pt2",
",",
"pt1",
")",
")",
".",
"reshape",
"(",
"3",
",",
"1",
")",
"return",
"displ"
] | Calculate the displacement vector between two n-D points.
pt1 - pt2
.. todo:: Complete point_disp docstring | [
"Calculate",
"the",
"displacement",
"vector",
"between",
"two",
"n",
"-",
"D",
"points",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L42-L68 | train |
bskinn/opan | opan/utils/symm.py | point_dist | def point_dist(pt1, pt2):
""" Calculate the Euclidean distance between two n-D points.
|pt1 - pt2|
.. todo:: Complete point_dist docstring
"""
# Imports
from scipy import linalg as spla
dist = spla.norm(point_displ(pt1, pt2))
return dist | python | def point_dist(pt1, pt2):
""" Calculate the Euclidean distance between two n-D points.
|pt1 - pt2|
.. todo:: Complete point_dist docstring
"""
# Imports
from scipy import linalg as spla
dist = spla.norm(point_displ(pt1, pt2))
return dist | [
"def",
"point_dist",
"(",
"pt1",
",",
"pt2",
")",
":",
"from",
"scipy",
"import",
"linalg",
"as",
"spla",
"dist",
"=",
"spla",
".",
"norm",
"(",
"point_displ",
"(",
"pt1",
",",
"pt2",
")",
")",
"return",
"dist"
] | Calculate the Euclidean distance between two n-D points.
|pt1 - pt2|
.. todo:: Complete point_dist docstring | [
"Calculate",
"the",
"Euclidean",
"distance",
"between",
"two",
"n",
"-",
"D",
"points",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L73-L86 | train |
bskinn/opan | opan/utils/symm.py | point_rotate | def point_rotate(pt, ax, theta):
""" Rotate a 3-D point around a 3-D axis through the origin.
Handedness is a counter-clockwise rotation when viewing the rotation
axis as pointing at the observer. Thus, in a right-handed x-y-z frame,
a 90deg rotation of (1,0,0) around the z-axis (0,0,1) yields a point at
(0,1,0).
.. todo:: Complete point_rotate docstring
Raises
------
ValueError : If theta is nonscalar
ValueError : If pt or ax are not reducible to 3-D vectors
ValueError : If norm of ax is too small
"""
# Imports
import numpy as np
# Ensure pt is reducible to 3-D vector.
pt = make_nd_vec(pt, nd=3, t=np.float64, norm=False)
# Calculate the rotation
rot_pt = np.dot(mtx_rot(ax, theta, reps=1), pt)
# Should be ready to return
return rot_pt | python | def point_rotate(pt, ax, theta):
""" Rotate a 3-D point around a 3-D axis through the origin.
Handedness is a counter-clockwise rotation when viewing the rotation
axis as pointing at the observer. Thus, in a right-handed x-y-z frame,
a 90deg rotation of (1,0,0) around the z-axis (0,0,1) yields a point at
(0,1,0).
.. todo:: Complete point_rotate docstring
Raises
------
ValueError : If theta is nonscalar
ValueError : If pt or ax are not reducible to 3-D vectors
ValueError : If norm of ax is too small
"""
# Imports
import numpy as np
# Ensure pt is reducible to 3-D vector.
pt = make_nd_vec(pt, nd=3, t=np.float64, norm=False)
# Calculate the rotation
rot_pt = np.dot(mtx_rot(ax, theta, reps=1), pt)
# Should be ready to return
return rot_pt | [
"def",
"point_rotate",
"(",
"pt",
",",
"ax",
",",
"theta",
")",
":",
"import",
"numpy",
"as",
"np",
"pt",
"=",
"make_nd_vec",
"(",
"pt",
",",
"nd",
"=",
"3",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"False",
")",
"rot_pt",
"=",
"np",
".",
"dot",
"(",
"mtx_rot",
"(",
"ax",
",",
"theta",
",",
"reps",
"=",
"1",
")",
",",
"pt",
")",
"return",
"rot_pt"
] | Rotate a 3-D point around a 3-D axis through the origin.
Handedness is a counter-clockwise rotation when viewing the rotation
axis as pointing at the observer. Thus, in a right-handed x-y-z frame,
a 90deg rotation of (1,0,0) around the z-axis (0,0,1) yields a point at
(0,1,0).
.. todo:: Complete point_rotate docstring
Raises
------
ValueError : If theta is nonscalar
ValueError : If pt or ax are not reducible to 3-D vectors
ValueError : If norm of ax is too small | [
"Rotate",
"a",
"3",
"-",
"D",
"point",
"around",
"a",
"3",
"-",
"D",
"axis",
"through",
"the",
"origin",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L91-L118 | train |
bskinn/opan | opan/utils/symm.py | point_reflect | def point_reflect(pt, nv):
""" Reflect a 3-D point through a plane intersecting the origin.
nv defines the normal vector to the plane (needs not be normalized)
.. todo:: Complete point_reflect docstring
Raises
------
ValueError : If pt or nv are not reducible to 3-D vectors
ValueError : If norm of nv is too small
"""
# Imports
import numpy as np
from scipy import linalg as spla
# Ensure pt is reducible to 3-D vector
pt = make_nd_vec(pt, nd=3, t=np.float64, norm=False)
# Transform the point and return
refl_pt = np.dot(mtx_refl(nv, reps=1), pt)
return refl_pt | python | def point_reflect(pt, nv):
""" Reflect a 3-D point through a plane intersecting the origin.
nv defines the normal vector to the plane (needs not be normalized)
.. todo:: Complete point_reflect docstring
Raises
------
ValueError : If pt or nv are not reducible to 3-D vectors
ValueError : If norm of nv is too small
"""
# Imports
import numpy as np
from scipy import linalg as spla
# Ensure pt is reducible to 3-D vector
pt = make_nd_vec(pt, nd=3, t=np.float64, norm=False)
# Transform the point and return
refl_pt = np.dot(mtx_refl(nv, reps=1), pt)
return refl_pt | [
"def",
"point_reflect",
"(",
"pt",
",",
"nv",
")",
":",
"import",
"numpy",
"as",
"np",
"from",
"scipy",
"import",
"linalg",
"as",
"spla",
"pt",
"=",
"make_nd_vec",
"(",
"pt",
",",
"nd",
"=",
"3",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"False",
")",
"refl_pt",
"=",
"np",
".",
"dot",
"(",
"mtx_refl",
"(",
"nv",
",",
"reps",
"=",
"1",
")",
",",
"pt",
")",
"return",
"refl_pt"
] | Reflect a 3-D point through a plane intersecting the origin.
nv defines the normal vector to the plane (needs not be normalized)
.. todo:: Complete point_reflect docstring
Raises
------
ValueError : If pt or nv are not reducible to 3-D vectors
ValueError : If norm of nv is too small | [
"Reflect",
"a",
"3",
"-",
"D",
"point",
"through",
"a",
"plane",
"intersecting",
"the",
"origin",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L123-L145 | train |
bskinn/opan | opan/utils/symm.py | geom_reflect | def geom_reflect(g, nv):
""" Reflection symmetry operation.
nv is normal vector to reflection plane
g is assumed already translated to center of mass @ origin
.. todo:: Complete geom_reflect docstring
"""
# Imports
import numpy as np
# Force g to n-vector
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Transform the geometry and return
refl_g = np.dot(mtx_refl(nv, reps=(g.shape[0] // 3)), g) \
.reshape((g.shape[0],1))
return refl_g | python | def geom_reflect(g, nv):
""" Reflection symmetry operation.
nv is normal vector to reflection plane
g is assumed already translated to center of mass @ origin
.. todo:: Complete geom_reflect docstring
"""
# Imports
import numpy as np
# Force g to n-vector
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Transform the geometry and return
refl_g = np.dot(mtx_refl(nv, reps=(g.shape[0] // 3)), g) \
.reshape((g.shape[0],1))
return refl_g | [
"def",
"geom_reflect",
"(",
"g",
",",
"nv",
")",
":",
"import",
"numpy",
"as",
"np",
"g",
"=",
"make_nd_vec",
"(",
"g",
",",
"nd",
"=",
"None",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"False",
")",
"refl_g",
"=",
"np",
".",
"dot",
"(",
"mtx_refl",
"(",
"nv",
",",
"reps",
"=",
"(",
"g",
".",
"shape",
"[",
"0",
"]",
"//",
"3",
")",
")",
",",
"g",
")",
".",
"reshape",
"(",
"(",
"g",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"return",
"refl_g"
] | Reflection symmetry operation.
nv is normal vector to reflection plane
g is assumed already translated to center of mass @ origin
.. todo:: Complete geom_reflect docstring | [
"Reflection",
"symmetry",
"operation",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L150-L169 | train |
bskinn/opan | opan/utils/symm.py | geom_rotate | def geom_rotate(g, ax, theta):
""" Rotation symmetry operation.
ax is rotation axis
g is assumed already translated to center of mass @ origin
Sense of rotation is the same as point_rotate
.. todo:: Complete geom_rotate docstring
"""
# Imports
import numpy as np
# Force g to n-vector
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Perform rotation and return
rot_g = np.dot(mtx_rot(ax, theta, reps=(g.shape[0] // 3)), g) \
.reshape((g.shape[0],1))
return rot_g | python | def geom_rotate(g, ax, theta):
""" Rotation symmetry operation.
ax is rotation axis
g is assumed already translated to center of mass @ origin
Sense of rotation is the same as point_rotate
.. todo:: Complete geom_rotate docstring
"""
# Imports
import numpy as np
# Force g to n-vector
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Perform rotation and return
rot_g = np.dot(mtx_rot(ax, theta, reps=(g.shape[0] // 3)), g) \
.reshape((g.shape[0],1))
return rot_g | [
"def",
"geom_rotate",
"(",
"g",
",",
"ax",
",",
"theta",
")",
":",
"import",
"numpy",
"as",
"np",
"g",
"=",
"make_nd_vec",
"(",
"g",
",",
"nd",
"=",
"None",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"False",
")",
"rot_g",
"=",
"np",
".",
"dot",
"(",
"mtx_rot",
"(",
"ax",
",",
"theta",
",",
"reps",
"=",
"(",
"g",
".",
"shape",
"[",
"0",
"]",
"//",
"3",
")",
")",
",",
"g",
")",
".",
"reshape",
"(",
"(",
"g",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"return",
"rot_g"
] | Rotation symmetry operation.
ax is rotation axis
g is assumed already translated to center of mass @ origin
Sense of rotation is the same as point_rotate
.. todo:: Complete geom_rotate docstring | [
"Rotation",
"symmetry",
"operation",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L174-L195 | train |
bskinn/opan | opan/utils/symm.py | symm_op | def symm_op(g, ax, theta, do_refl):
""" Perform general point symmetry operation on a geometry.
.. todo:: Complete symm_op docstring
"""
# Imports
import numpy as np
# Depend on lower functions' geometry vector coercion. Just
# do the rotation and, if indicated, the reflection.
gx = geom_rotate(g, ax, theta)
if do_refl:
gx = geom_reflect(gx, ax)
## end if
# Should be good to go
return gx | python | def symm_op(g, ax, theta, do_refl):
""" Perform general point symmetry operation on a geometry.
.. todo:: Complete symm_op docstring
"""
# Imports
import numpy as np
# Depend on lower functions' geometry vector coercion. Just
# do the rotation and, if indicated, the reflection.
gx = geom_rotate(g, ax, theta)
if do_refl:
gx = geom_reflect(gx, ax)
## end if
# Should be good to go
return gx | [
"def",
"symm_op",
"(",
"g",
",",
"ax",
",",
"theta",
",",
"do_refl",
")",
":",
"import",
"numpy",
"as",
"np",
"gx",
"=",
"geom_rotate",
"(",
"g",
",",
"ax",
",",
"theta",
")",
"if",
"do_refl",
":",
"gx",
"=",
"geom_reflect",
"(",
"gx",
",",
"ax",
")",
"return",
"gx"
] | Perform general point symmetry operation on a geometry.
.. todo:: Complete symm_op docstring | [
"Perform",
"general",
"point",
"symmetry",
"operation",
"on",
"a",
"geometry",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L200-L218 | train |
bskinn/opan | opan/utils/symm.py | geom_find_rotsymm | def geom_find_rotsymm(g, atwts, ax, improp, \
nmax=_DEF.SYMM_MATCH_NMAX, \
tol=_DEF.SYMM_MATCH_TOL):
""" Identify highest-order symmetry for a geometry on a given axis.
Regular and improper axes possible.
.. todo:: Complete geom_find_rotsymm docstring
"""
# Imports
import numpy as np
# Vectorize the geometry
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Ensure a 3-D axis vector
ax = make_nd_vec(ax, nd=3, t=np.float64, norm=True)
# Loop downward either until a good axis is found or nval < 1
# Should never traverse below n == 1 for regular rotation check;
# could for improper, though.
nval = nmax + 1
nfac = 1.0
while nfac > tol and nval > 0:
nval = nval - 1
try:
nfac = geom_symm_match(g, atwts, ax, \
2*np.pi/nval, improp)
except ZeroDivisionError as zde:
# If it's because nval == zero, ignore. Else re-raise.
if nval > 0:
raise zde
## end if
## end try
## loop
# Should be good to return
return nval, nfac | python | def geom_find_rotsymm(g, atwts, ax, improp, \
nmax=_DEF.SYMM_MATCH_NMAX, \
tol=_DEF.SYMM_MATCH_TOL):
""" Identify highest-order symmetry for a geometry on a given axis.
Regular and improper axes possible.
.. todo:: Complete geom_find_rotsymm docstring
"""
# Imports
import numpy as np
# Vectorize the geometry
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Ensure a 3-D axis vector
ax = make_nd_vec(ax, nd=3, t=np.float64, norm=True)
# Loop downward either until a good axis is found or nval < 1
# Should never traverse below n == 1 for regular rotation check;
# could for improper, though.
nval = nmax + 1
nfac = 1.0
while nfac > tol and nval > 0:
nval = nval - 1
try:
nfac = geom_symm_match(g, atwts, ax, \
2*np.pi/nval, improp)
except ZeroDivisionError as zde:
# If it's because nval == zero, ignore. Else re-raise.
if nval > 0:
raise zde
## end if
## end try
## loop
# Should be good to return
return nval, nfac | [
"def",
"geom_find_rotsymm",
"(",
"g",
",",
"atwts",
",",
"ax",
",",
"improp",
",",
"nmax",
"=",
"_DEF",
".",
"SYMM_MATCH_NMAX",
",",
"tol",
"=",
"_DEF",
".",
"SYMM_MATCH_TOL",
")",
":",
"import",
"numpy",
"as",
"np",
"g",
"=",
"make_nd_vec",
"(",
"g",
",",
"nd",
"=",
"None",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"False",
")",
"ax",
"=",
"make_nd_vec",
"(",
"ax",
",",
"nd",
"=",
"3",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"True",
")",
"nval",
"=",
"nmax",
"+",
"1",
"nfac",
"=",
"1.0",
"while",
"nfac",
">",
"tol",
"and",
"nval",
">",
"0",
":",
"nval",
"=",
"nval",
"-",
"1",
"try",
":",
"nfac",
"=",
"geom_symm_match",
"(",
"g",
",",
"atwts",
",",
"ax",
",",
"2",
"*",
"np",
".",
"pi",
"/",
"nval",
",",
"improp",
")",
"except",
"ZeroDivisionError",
"as",
"zde",
":",
"if",
"nval",
">",
"0",
":",
"raise",
"zde",
"return",
"nval",
",",
"nfac"
] | Identify highest-order symmetry for a geometry on a given axis.
Regular and improper axes possible.
.. todo:: Complete geom_find_rotsymm docstring | [
"Identify",
"highest",
"-",
"order",
"symmetry",
"for",
"a",
"geometry",
"on",
"a",
"given",
"axis",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L305-L345 | train |
bskinn/opan | opan/utils/symm.py | g_subset | def g_subset(g, atwts, atwt,
digits=_DEF.SYMM_ATWT_ROUND_DIGITS):
""" Extract a subset of a geometry matching a desired atom.
.. todo:: Complete g_subset docstring
"""
# Imports
import numpy as np
# Ensure g and atwts are n-D vectors
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
atwts = make_nd_vec(atwts, nd=None, t=np.float64, norm=False)
# Ensure dims match (should already be checked at object creation...)
if not (len(g) == 3*len(atwts)):
raise ValueError("Dim mismatch [len(g) != 3*len(ats)].")
## end if
# Pull into coordinate groups
co = np.split(g, g.shape[0] // 3)
# Filter by the indicated atomic weight
cf = [c for (c,a) in zip(co, atwts) if \
np.round(a, digits) == np.round(atwt, digits)]
# Expand back to single vector, if possible
if not cf == []:
g_sub = np.concatenate(cf, axis=0)
g_sub = g_sub.reshape((g_sub.shape[0],1))
else:
g_sub = []
## end if
# Return the subset
return g_sub | python | def g_subset(g, atwts, atwt,
digits=_DEF.SYMM_ATWT_ROUND_DIGITS):
""" Extract a subset of a geometry matching a desired atom.
.. todo:: Complete g_subset docstring
"""
# Imports
import numpy as np
# Ensure g and atwts are n-D vectors
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
atwts = make_nd_vec(atwts, nd=None, t=np.float64, norm=False)
# Ensure dims match (should already be checked at object creation...)
if not (len(g) == 3*len(atwts)):
raise ValueError("Dim mismatch [len(g) != 3*len(ats)].")
## end if
# Pull into coordinate groups
co = np.split(g, g.shape[0] // 3)
# Filter by the indicated atomic weight
cf = [c for (c,a) in zip(co, atwts) if \
np.round(a, digits) == np.round(atwt, digits)]
# Expand back to single vector, if possible
if not cf == []:
g_sub = np.concatenate(cf, axis=0)
g_sub = g_sub.reshape((g_sub.shape[0],1))
else:
g_sub = []
## end if
# Return the subset
return g_sub | [
"def",
"g_subset",
"(",
"g",
",",
"atwts",
",",
"atwt",
",",
"digits",
"=",
"_DEF",
".",
"SYMM_ATWT_ROUND_DIGITS",
")",
":",
"import",
"numpy",
"as",
"np",
"g",
"=",
"make_nd_vec",
"(",
"g",
",",
"nd",
"=",
"None",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"False",
")",
"atwts",
"=",
"make_nd_vec",
"(",
"atwts",
",",
"nd",
"=",
"None",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"False",
")",
"if",
"not",
"(",
"len",
"(",
"g",
")",
"==",
"3",
"*",
"len",
"(",
"atwts",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Dim mismatch [len(g) != 3*len(ats)].\"",
")",
"co",
"=",
"np",
".",
"split",
"(",
"g",
",",
"g",
".",
"shape",
"[",
"0",
"]",
"//",
"3",
")",
"cf",
"=",
"[",
"c",
"for",
"(",
"c",
",",
"a",
")",
"in",
"zip",
"(",
"co",
",",
"atwts",
")",
"if",
"np",
".",
"round",
"(",
"a",
",",
"digits",
")",
"==",
"np",
".",
"round",
"(",
"atwt",
",",
"digits",
")",
"]",
"if",
"not",
"cf",
"==",
"[",
"]",
":",
"g_sub",
"=",
"np",
".",
"concatenate",
"(",
"cf",
",",
"axis",
"=",
"0",
")",
"g_sub",
"=",
"g_sub",
".",
"reshape",
"(",
"(",
"g_sub",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
"else",
":",
"g_sub",
"=",
"[",
"]",
"return",
"g_sub"
] | Extract a subset of a geometry matching a desired atom.
.. todo:: Complete g_subset docstring | [
"Extract",
"a",
"subset",
"of",
"a",
"geometry",
"matching",
"a",
"desired",
"atom",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L698-L734 | train |
bskinn/opan | opan/utils/symm.py | mtx_refl | def mtx_refl(nv, reps=1):
""" Generate block-diagonal reflection matrix about nv.
reps must be >=1 and indicates the number of times the reflection
matrix should be repeated along the block diagonal. Typically this
will be the number of atoms in a geometry.
.. todo:: Complete mtx_refl docstring
"""
# Imports
import numpy as np
from scipy import linalg as spla
from ..const import PRM
# Ensure |nv| is large enough for confident directionality
if spla.norm(nv) < PRM.ZERO_VEC_TOL:
raise ValueError("Norm of 'nv' is too small.")
## end if
# Ensure nv is a normalized np.float64 3-vector
nv = make_nd_vec(nv, nd=3, t=np.float64, norm=True)
# Ensure reps is a positive scalar integer
if not np.isscalar(reps):
raise ValueError("'reps' must be scalar.")
## end if
if not np.issubdtype(type(reps), int):
raise ValueError("'reps' must be an integer.")
## end if
if not reps > 0:
raise ValueError("'reps' must be a positive integer.")
## end if
# Initialize the single-point reflection transform matrix
base_mtx = np.zeros(shape=(3,3), dtype=np.float64)
# Construct the single-point transform matrix
for i in range(3):
for j in range(i,3):
if i==j:
base_mtx[i,j] = 1 - 2*nv[i]**2
else:
base_mtx[i,j] = base_mtx[j,i] = -2*nv[i]*nv[j]
## end if
## next j
## next i
# Construct the block-diagonal replicated reflection matrix
refl_mtx= spla.block_diag(*[base_mtx for i in range(reps)])
# Return the result
return refl_mtx | python | def mtx_refl(nv, reps=1):
""" Generate block-diagonal reflection matrix about nv.
reps must be >=1 and indicates the number of times the reflection
matrix should be repeated along the block diagonal. Typically this
will be the number of atoms in a geometry.
.. todo:: Complete mtx_refl docstring
"""
# Imports
import numpy as np
from scipy import linalg as spla
from ..const import PRM
# Ensure |nv| is large enough for confident directionality
if spla.norm(nv) < PRM.ZERO_VEC_TOL:
raise ValueError("Norm of 'nv' is too small.")
## end if
# Ensure nv is a normalized np.float64 3-vector
nv = make_nd_vec(nv, nd=3, t=np.float64, norm=True)
# Ensure reps is a positive scalar integer
if not np.isscalar(reps):
raise ValueError("'reps' must be scalar.")
## end if
if not np.issubdtype(type(reps), int):
raise ValueError("'reps' must be an integer.")
## end if
if not reps > 0:
raise ValueError("'reps' must be a positive integer.")
## end if
# Initialize the single-point reflection transform matrix
base_mtx = np.zeros(shape=(3,3), dtype=np.float64)
# Construct the single-point transform matrix
for i in range(3):
for j in range(i,3):
if i==j:
base_mtx[i,j] = 1 - 2*nv[i]**2
else:
base_mtx[i,j] = base_mtx[j,i] = -2*nv[i]*nv[j]
## end if
## next j
## next i
# Construct the block-diagonal replicated reflection matrix
refl_mtx= spla.block_diag(*[base_mtx for i in range(reps)])
# Return the result
return refl_mtx | [
"def",
"mtx_refl",
"(",
"nv",
",",
"reps",
"=",
"1",
")",
":",
"import",
"numpy",
"as",
"np",
"from",
"scipy",
"import",
"linalg",
"as",
"spla",
"from",
".",
".",
"const",
"import",
"PRM",
"if",
"spla",
".",
"norm",
"(",
"nv",
")",
"<",
"PRM",
".",
"ZERO_VEC_TOL",
":",
"raise",
"ValueError",
"(",
"\"Norm of 'nv' is too small.\"",
")",
"nv",
"=",
"make_nd_vec",
"(",
"nv",
",",
"nd",
"=",
"3",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"True",
")",
"if",
"not",
"np",
".",
"isscalar",
"(",
"reps",
")",
":",
"raise",
"ValueError",
"(",
"\"'reps' must be scalar.\"",
")",
"if",
"not",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"reps",
")",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"'reps' must be an integer.\"",
")",
"if",
"not",
"reps",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"'reps' must be a positive integer.\"",
")",
"base_mtx",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"3",
",",
"3",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"3",
")",
":",
"if",
"i",
"==",
"j",
":",
"base_mtx",
"[",
"i",
",",
"j",
"]",
"=",
"1",
"-",
"2",
"*",
"nv",
"[",
"i",
"]",
"**",
"2",
"else",
":",
"base_mtx",
"[",
"i",
",",
"j",
"]",
"=",
"base_mtx",
"[",
"j",
",",
"i",
"]",
"=",
"-",
"2",
"*",
"nv",
"[",
"i",
"]",
"*",
"nv",
"[",
"j",
"]",
"refl_mtx",
"=",
"spla",
".",
"block_diag",
"(",
"*",
"[",
"base_mtx",
"for",
"i",
"in",
"range",
"(",
"reps",
")",
"]",
")",
"return",
"refl_mtx"
] | Generate block-diagonal reflection matrix about nv.
reps must be >=1 and indicates the number of times the reflection
matrix should be repeated along the block diagonal. Typically this
will be the number of atoms in a geometry.
.. todo:: Complete mtx_refl docstring | [
"Generate",
"block",
"-",
"diagonal",
"reflection",
"matrix",
"about",
"nv",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L779-L832 | train |
bskinn/opan | opan/utils/symm.py | mtx_rot | def mtx_rot(ax, theta, reps=1):
""" Generate block-diagonal rotation matrix about ax.
[copy handedness from somewhere]
.. todo:: Complete mtx_rot docstring
"""
# Imports
import numpy as np
from scipy import linalg as spla
from ..const import PRM
# Ensure |ax| is large enough for confident directionality
if spla.norm(ax) < PRM.ZERO_VEC_TOL:
raise ValueError("Norm of 'ax' is too small.")
## end if
# Ensure ax is a normalized np.float64 3-vector
ax = make_nd_vec(ax, nd=3, t=np.float64, norm=True)
# Ensure reps is a positive scalar integer
if not np.isscalar(reps):
raise ValueError("'reps' must be scalar.")
## end if
if not np.issubdtype(type(reps), int):
raise ValueError("'reps' must be an integer.")
## end if
if not reps > 0:
raise ValueError("'reps' must be a positive integer.")
## end if
# Ensure theta is scalar
if not np.isscalar(theta):
raise ValueError("'theta' must be scalar.")
## end if
# Assemble the modified Levi-Civita matrix
mod_lc = np.array([ [0, -ax[2], ax[1]],
[ax[2], 0, -ax[0]],
[-ax[1], ax[0], 0] ], dtype=np.float64)
# Compute the outer product of the axis vector
ax_oprod = np.dot(ax.reshape((3,1)), ax.reshape((1,3)))
# Construct the base matrix
# Will need to refer to external math to explain this.
base_mtx = np.add(
np.add( (1.0 - np.cos(theta)) * ax_oprod,
np.cos(theta) * np.eye(3)
),
np.sin(theta) * mod_lc
)
# Construct the block-diagonal replicated reflection matrix
rot_mtx= spla.block_diag(*[base_mtx for i in range(reps)])
# Return the result
return rot_mtx | python | def mtx_rot(ax, theta, reps=1):
""" Generate block-diagonal rotation matrix about ax.
[copy handedness from somewhere]
.. todo:: Complete mtx_rot docstring
"""
# Imports
import numpy as np
from scipy import linalg as spla
from ..const import PRM
# Ensure |ax| is large enough for confident directionality
if spla.norm(ax) < PRM.ZERO_VEC_TOL:
raise ValueError("Norm of 'ax' is too small.")
## end if
# Ensure ax is a normalized np.float64 3-vector
ax = make_nd_vec(ax, nd=3, t=np.float64, norm=True)
# Ensure reps is a positive scalar integer
if not np.isscalar(reps):
raise ValueError("'reps' must be scalar.")
## end if
if not np.issubdtype(type(reps), int):
raise ValueError("'reps' must be an integer.")
## end if
if not reps > 0:
raise ValueError("'reps' must be a positive integer.")
## end if
# Ensure theta is scalar
if not np.isscalar(theta):
raise ValueError("'theta' must be scalar.")
## end if
# Assemble the modified Levi-Civita matrix
mod_lc = np.array([ [0, -ax[2], ax[1]],
[ax[2], 0, -ax[0]],
[-ax[1], ax[0], 0] ], dtype=np.float64)
# Compute the outer product of the axis vector
ax_oprod = np.dot(ax.reshape((3,1)), ax.reshape((1,3)))
# Construct the base matrix
# Will need to refer to external math to explain this.
base_mtx = np.add(
np.add( (1.0 - np.cos(theta)) * ax_oprod,
np.cos(theta) * np.eye(3)
),
np.sin(theta) * mod_lc
)
# Construct the block-diagonal replicated reflection matrix
rot_mtx= spla.block_diag(*[base_mtx for i in range(reps)])
# Return the result
return rot_mtx | [
"def",
"mtx_rot",
"(",
"ax",
",",
"theta",
",",
"reps",
"=",
"1",
")",
":",
"import",
"numpy",
"as",
"np",
"from",
"scipy",
"import",
"linalg",
"as",
"spla",
"from",
".",
".",
"const",
"import",
"PRM",
"if",
"spla",
".",
"norm",
"(",
"ax",
")",
"<",
"PRM",
".",
"ZERO_VEC_TOL",
":",
"raise",
"ValueError",
"(",
"\"Norm of 'ax' is too small.\"",
")",
"ax",
"=",
"make_nd_vec",
"(",
"ax",
",",
"nd",
"=",
"3",
",",
"t",
"=",
"np",
".",
"float64",
",",
"norm",
"=",
"True",
")",
"if",
"not",
"np",
".",
"isscalar",
"(",
"reps",
")",
":",
"raise",
"ValueError",
"(",
"\"'reps' must be scalar.\"",
")",
"if",
"not",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"reps",
")",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"'reps' must be an integer.\"",
")",
"if",
"not",
"reps",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"'reps' must be a positive integer.\"",
")",
"if",
"not",
"np",
".",
"isscalar",
"(",
"theta",
")",
":",
"raise",
"ValueError",
"(",
"\"'theta' must be scalar.\"",
")",
"mod_lc",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"-",
"ax",
"[",
"2",
"]",
",",
"ax",
"[",
"1",
"]",
"]",
",",
"[",
"ax",
"[",
"2",
"]",
",",
"0",
",",
"-",
"ax",
"[",
"0",
"]",
"]",
",",
"[",
"-",
"ax",
"[",
"1",
"]",
",",
"ax",
"[",
"0",
"]",
",",
"0",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"ax_oprod",
"=",
"np",
".",
"dot",
"(",
"ax",
".",
"reshape",
"(",
"(",
"3",
",",
"1",
")",
")",
",",
"ax",
".",
"reshape",
"(",
"(",
"1",
",",
"3",
")",
")",
")",
"base_mtx",
"=",
"np",
".",
"add",
"(",
"np",
".",
"add",
"(",
"(",
"1.0",
"-",
"np",
".",
"cos",
"(",
"theta",
")",
")",
"*",
"ax_oprod",
",",
"np",
".",
"cos",
"(",
"theta",
")",
"*",
"np",
".",
"eye",
"(",
"3",
")",
")",
",",
"np",
".",
"sin",
"(",
"theta",
")",
"*",
"mod_lc",
")",
"rot_mtx",
"=",
"spla",
".",
"block_diag",
"(",
"*",
"[",
"base_mtx",
"for",
"i",
"in",
"range",
"(",
"reps",
")",
"]",
")",
"return",
"rot_mtx"
] | Generate block-diagonal rotation matrix about ax.
[copy handedness from somewhere]
.. todo:: Complete mtx_rot docstring | [
"Generate",
"block",
"-",
"diagonal",
"rotation",
"matrix",
"about",
"ax",
"."
] | 0b1b21662df6abc971407a9386db21a8796fbfe5 | https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L837-L896 | train |
daskos/mentor | mentor/binpack.py | ff | def ff(items, targets):
"""First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
for target, content in bins:
if item <= (target - sum(content)):
content.append(item)
break
else:
skip.append(item)
return bins, skip | python | def ff(items, targets):
"""First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
for target, content in bins:
if item <= (target - sum(content)):
content.append(item)
break
else:
skip.append(item)
return bins, skip | [
"def",
"ff",
"(",
"items",
",",
"targets",
")",
":",
"bins",
"=",
"[",
"(",
"target",
",",
"[",
"]",
")",
"for",
"target",
"in",
"targets",
"]",
"skip",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"for",
"target",
",",
"content",
"in",
"bins",
":",
"if",
"item",
"<=",
"(",
"target",
"-",
"sum",
"(",
"content",
")",
")",
":",
"content",
".",
"append",
"(",
"item",
")",
"break",
"else",
":",
"skip",
".",
"append",
"(",
"item",
")",
"return",
"bins",
",",
"skip"
] | First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2) | [
"First",
"-",
"Fit"
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L22-L40 | train |
daskos/mentor | mentor/binpack.py | ffd | def ffd(items, targets, **kwargs):
"""First-Fit Decreasing
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
This algorithm differs only from Next-Fit Decreasing
in having a 'sort'; that is, the items are pre-sorted
(largest to smallest).
Complexity O(n^2)
"""
sizes = zip(items, weight(items, **kwargs))
sizes = sorted(sizes, key=operator.itemgetter(1), reverse=True)
items = map(operator.itemgetter(0), sizes)
return ff(items, targets) | python | def ffd(items, targets, **kwargs):
"""First-Fit Decreasing
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
This algorithm differs only from Next-Fit Decreasing
in having a 'sort'; that is, the items are pre-sorted
(largest to smallest).
Complexity O(n^2)
"""
sizes = zip(items, weight(items, **kwargs))
sizes = sorted(sizes, key=operator.itemgetter(1), reverse=True)
items = map(operator.itemgetter(0), sizes)
return ff(items, targets) | [
"def",
"ffd",
"(",
"items",
",",
"targets",
",",
"**",
"kwargs",
")",
":",
"sizes",
"=",
"zip",
"(",
"items",
",",
"weight",
"(",
"items",
",",
"**",
"kwargs",
")",
")",
"sizes",
"=",
"sorted",
"(",
"sizes",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"items",
"=",
"map",
"(",
"operator",
".",
"itemgetter",
"(",
"0",
")",
",",
"sizes",
")",
"return",
"ff",
"(",
"items",
",",
"targets",
")"
] | First-Fit Decreasing
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
This algorithm differs only from Next-Fit Decreasing
in having a 'sort'; that is, the items are pre-sorted
(largest to smallest).
Complexity O(n^2) | [
"First",
"-",
"Fit",
"Decreasing"
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L43-L58 | train |
daskos/mentor | mentor/binpack.py | mr | def mr(items, targets, **kwargs):
"""Max-Rest
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
capacities = [target - sum(content) for target, content in bins]
weighted = weight(capacities, **kwargs)
(target, content), capacity, _ = max(zip(bins, capacities, weighted),
key=operator.itemgetter(2))
if item <= capacity:
content.append(item)
else:
skip.append(item)
return bins, skip | python | def mr(items, targets, **kwargs):
"""Max-Rest
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
capacities = [target - sum(content) for target, content in bins]
weighted = weight(capacities, **kwargs)
(target, content), capacity, _ = max(zip(bins, capacities, weighted),
key=operator.itemgetter(2))
if item <= capacity:
content.append(item)
else:
skip.append(item)
return bins, skip | [
"def",
"mr",
"(",
"items",
",",
"targets",
",",
"**",
"kwargs",
")",
":",
"bins",
"=",
"[",
"(",
"target",
",",
"[",
"]",
")",
"for",
"target",
"in",
"targets",
"]",
"skip",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"capacities",
"=",
"[",
"target",
"-",
"sum",
"(",
"content",
")",
"for",
"target",
",",
"content",
"in",
"bins",
"]",
"weighted",
"=",
"weight",
"(",
"capacities",
",",
"**",
"kwargs",
")",
"(",
"target",
",",
"content",
")",
",",
"capacity",
",",
"_",
"=",
"max",
"(",
"zip",
"(",
"bins",
",",
"capacities",
",",
"weighted",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"2",
")",
")",
"if",
"item",
"<=",
"capacity",
":",
"content",
".",
"append",
"(",
"item",
")",
"else",
":",
"skip",
".",
"append",
"(",
"item",
")",
"return",
"bins",
",",
"skip"
] | Max-Rest
Complexity O(n^2) | [
"Max",
"-",
"Rest"
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L61-L79 | train |
daskos/mentor | mentor/binpack.py | bf | def bf(items, targets, **kwargs):
"""Best-Fit
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
containers = []
capacities = []
for target, content in bins:
capacity = target - sum(content)
if item <= capacity:
containers.append(content)
capacities.append(capacity - item)
if len(capacities):
weighted = zip(containers, weight(capacities, **kwargs))
content, _ = min(weighted, key=operator.itemgetter(1))
content.append(item)
else:
skip.append(item)
return bins, skip | python | def bf(items, targets, **kwargs):
"""Best-Fit
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
containers = []
capacities = []
for target, content in bins:
capacity = target - sum(content)
if item <= capacity:
containers.append(content)
capacities.append(capacity - item)
if len(capacities):
weighted = zip(containers, weight(capacities, **kwargs))
content, _ = min(weighted, key=operator.itemgetter(1))
content.append(item)
else:
skip.append(item)
return bins, skip | [
"def",
"bf",
"(",
"items",
",",
"targets",
",",
"**",
"kwargs",
")",
":",
"bins",
"=",
"[",
"(",
"target",
",",
"[",
"]",
")",
"for",
"target",
"in",
"targets",
"]",
"skip",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"containers",
"=",
"[",
"]",
"capacities",
"=",
"[",
"]",
"for",
"target",
",",
"content",
"in",
"bins",
":",
"capacity",
"=",
"target",
"-",
"sum",
"(",
"content",
")",
"if",
"item",
"<=",
"capacity",
":",
"containers",
".",
"append",
"(",
"content",
")",
"capacities",
".",
"append",
"(",
"capacity",
"-",
"item",
")",
"if",
"len",
"(",
"capacities",
")",
":",
"weighted",
"=",
"zip",
"(",
"containers",
",",
"weight",
"(",
"capacities",
",",
"**",
"kwargs",
")",
")",
"content",
",",
"_",
"=",
"min",
"(",
"weighted",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
")",
"content",
".",
"append",
"(",
"item",
")",
"else",
":",
"skip",
".",
"append",
"(",
"item",
")",
"return",
"bins",
",",
"skip"
] | Best-Fit
Complexity O(n^2) | [
"Best",
"-",
"Fit"
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L90-L113 | train |
daskos/mentor | mentor/binpack.py | bfd | def bfd(items, targets, **kwargs):
"""Best-Fit Decreasing
Complexity O(n^2)
"""
sizes = zip(items, weight(items, **kwargs))
sizes = sorted(sizes, key=operator.itemgetter(1), reverse=True)
items = map(operator.itemgetter(0), sizes)
return bf(items, targets, **kwargs) | python | def bfd(items, targets, **kwargs):
"""Best-Fit Decreasing
Complexity O(n^2)
"""
sizes = zip(items, weight(items, **kwargs))
sizes = sorted(sizes, key=operator.itemgetter(1), reverse=True)
items = map(operator.itemgetter(0), sizes)
return bf(items, targets, **kwargs) | [
"def",
"bfd",
"(",
"items",
",",
"targets",
",",
"**",
"kwargs",
")",
":",
"sizes",
"=",
"zip",
"(",
"items",
",",
"weight",
"(",
"items",
",",
"**",
"kwargs",
")",
")",
"sizes",
"=",
"sorted",
"(",
"sizes",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
",",
"reverse",
"=",
"True",
")",
"items",
"=",
"map",
"(",
"operator",
".",
"itemgetter",
"(",
"0",
")",
",",
"sizes",
")",
"return",
"bf",
"(",
"items",
",",
"targets",
",",
"**",
"kwargs",
")"
] | Best-Fit Decreasing
Complexity O(n^2) | [
"Best",
"-",
"Fit",
"Decreasing"
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L116-L124 | train |
openvax/isovar | isovar/variant_sequence_in_reading_frame.py | trim_sequences | def trim_sequences(variant_sequence, reference_context):
"""
A VariantSequence and ReferenceContext may contain a different number of
nucleotides before the variant locus. Furthermore, the VariantSequence is
always expressed in terms of the positive strand against which it aligned,
but reference transcripts may have sequences from the negative strand of the
genome. Take the reverse complement of the VariantSequence if the
ReferenceContext is from negative strand transcripts and trim either
sequence to ensure that the prefixes are of the same length.
Parameters
----------
variant_sequence : VariantSequence
reference_context : ReferenceContext
Returns a tuple with the following fields:
1) cDNA prefix of variant sequence, trimmed to be same length as the
reference prefix. If the reference context was on the negative
strand then this is the trimmed sequence *after* the variant from
the genomic DNA sequence.
2) cDNA sequence of the variant nucleotides, in reverse complement if
the reference context is on the negative strand.
3) cDNA sequence of the nucleotides after the variant nucleotides. If
the reference context is on the negative strand then this sequence
is the reverse complement of the original prefix sequence.
4) Reference sequence before the variant locus, trimmed to be the
same length as the variant prefix.
5) Reference sequence after the variant locus, untrimmed.
6) Number of nucleotides trimmed from the reference sequence, used
later for adjustint offset to first complete codon.
"""
cdna_prefix = variant_sequence.prefix
cdna_alt = variant_sequence.alt
cdna_suffix = variant_sequence.suffix
# if the transcript is on the reverse strand then we have to
# take the sequence PREFIX|VARIANT|SUFFIX
# and take the complement of XIFFUS|TNAIRAV|XIFERP
if reference_context.strand == "-":
# notice that we are setting the *prefix* to be reverse complement
# of the *suffix* and vice versa
cdna_prefix, cdna_alt, cdna_suffix = (
reverse_complement_dna(cdna_suffix),
reverse_complement_dna(cdna_alt),
reverse_complement_dna(cdna_prefix)
)
reference_sequence_before_variant = reference_context.sequence_before_variant_locus
reference_sequence_after_variant = reference_context.sequence_after_variant_locus
# trim the reference prefix and the RNA-derived prefix sequences to the same length
if len(reference_sequence_before_variant) > len(cdna_prefix):
n_trimmed_from_reference = len(reference_sequence_before_variant) - len(cdna_prefix)
n_trimmed_from_variant = 0
elif len(reference_sequence_before_variant) < len(cdna_prefix):
n_trimmed_from_variant = len(cdna_prefix) - len(reference_sequence_before_variant)
n_trimmed_from_reference = 0
else:
n_trimmed_from_variant = 0
n_trimmed_from_reference = 0
reference_sequence_before_variant = reference_sequence_before_variant[
n_trimmed_from_reference:]
cdna_prefix = cdna_prefix[n_trimmed_from_variant:]
return (
cdna_prefix,
cdna_alt,
cdna_suffix,
reference_sequence_before_variant,
reference_sequence_after_variant,
n_trimmed_from_reference
) | python | def trim_sequences(variant_sequence, reference_context):
"""
A VariantSequence and ReferenceContext may contain a different number of
nucleotides before the variant locus. Furthermore, the VariantSequence is
always expressed in terms of the positive strand against which it aligned,
but reference transcripts may have sequences from the negative strand of the
genome. Take the reverse complement of the VariantSequence if the
ReferenceContext is from negative strand transcripts and trim either
sequence to ensure that the prefixes are of the same length.
Parameters
----------
variant_sequence : VariantSequence
reference_context : ReferenceContext
Returns a tuple with the following fields:
1) cDNA prefix of variant sequence, trimmed to be same length as the
reference prefix. If the reference context was on the negative
strand then this is the trimmed sequence *after* the variant from
the genomic DNA sequence.
2) cDNA sequence of the variant nucleotides, in reverse complement if
the reference context is on the negative strand.
3) cDNA sequence of the nucleotides after the variant nucleotides. If
the reference context is on the negative strand then this sequence
is the reverse complement of the original prefix sequence.
4) Reference sequence before the variant locus, trimmed to be the
same length as the variant prefix.
5) Reference sequence after the variant locus, untrimmed.
6) Number of nucleotides trimmed from the reference sequence, used
later for adjustint offset to first complete codon.
"""
cdna_prefix = variant_sequence.prefix
cdna_alt = variant_sequence.alt
cdna_suffix = variant_sequence.suffix
# if the transcript is on the reverse strand then we have to
# take the sequence PREFIX|VARIANT|SUFFIX
# and take the complement of XIFFUS|TNAIRAV|XIFERP
if reference_context.strand == "-":
# notice that we are setting the *prefix* to be reverse complement
# of the *suffix* and vice versa
cdna_prefix, cdna_alt, cdna_suffix = (
reverse_complement_dna(cdna_suffix),
reverse_complement_dna(cdna_alt),
reverse_complement_dna(cdna_prefix)
)
reference_sequence_before_variant = reference_context.sequence_before_variant_locus
reference_sequence_after_variant = reference_context.sequence_after_variant_locus
# trim the reference prefix and the RNA-derived prefix sequences to the same length
if len(reference_sequence_before_variant) > len(cdna_prefix):
n_trimmed_from_reference = len(reference_sequence_before_variant) - len(cdna_prefix)
n_trimmed_from_variant = 0
elif len(reference_sequence_before_variant) < len(cdna_prefix):
n_trimmed_from_variant = len(cdna_prefix) - len(reference_sequence_before_variant)
n_trimmed_from_reference = 0
else:
n_trimmed_from_variant = 0
n_trimmed_from_reference = 0
reference_sequence_before_variant = reference_sequence_before_variant[
n_trimmed_from_reference:]
cdna_prefix = cdna_prefix[n_trimmed_from_variant:]
return (
cdna_prefix,
cdna_alt,
cdna_suffix,
reference_sequence_before_variant,
reference_sequence_after_variant,
n_trimmed_from_reference
) | [
"def",
"trim_sequences",
"(",
"variant_sequence",
",",
"reference_context",
")",
":",
"cdna_prefix",
"=",
"variant_sequence",
".",
"prefix",
"cdna_alt",
"=",
"variant_sequence",
".",
"alt",
"cdna_suffix",
"=",
"variant_sequence",
".",
"suffix",
"if",
"reference_context",
".",
"strand",
"==",
"\"-\"",
":",
"cdna_prefix",
",",
"cdna_alt",
",",
"cdna_suffix",
"=",
"(",
"reverse_complement_dna",
"(",
"cdna_suffix",
")",
",",
"reverse_complement_dna",
"(",
"cdna_alt",
")",
",",
"reverse_complement_dna",
"(",
"cdna_prefix",
")",
")",
"reference_sequence_before_variant",
"=",
"reference_context",
".",
"sequence_before_variant_locus",
"reference_sequence_after_variant",
"=",
"reference_context",
".",
"sequence_after_variant_locus",
"if",
"len",
"(",
"reference_sequence_before_variant",
")",
">",
"len",
"(",
"cdna_prefix",
")",
":",
"n_trimmed_from_reference",
"=",
"len",
"(",
"reference_sequence_before_variant",
")",
"-",
"len",
"(",
"cdna_prefix",
")",
"n_trimmed_from_variant",
"=",
"0",
"elif",
"len",
"(",
"reference_sequence_before_variant",
")",
"<",
"len",
"(",
"cdna_prefix",
")",
":",
"n_trimmed_from_variant",
"=",
"len",
"(",
"cdna_prefix",
")",
"-",
"len",
"(",
"reference_sequence_before_variant",
")",
"n_trimmed_from_reference",
"=",
"0",
"else",
":",
"n_trimmed_from_variant",
"=",
"0",
"n_trimmed_from_reference",
"=",
"0",
"reference_sequence_before_variant",
"=",
"reference_sequence_before_variant",
"[",
"n_trimmed_from_reference",
":",
"]",
"cdna_prefix",
"=",
"cdna_prefix",
"[",
"n_trimmed_from_variant",
":",
"]",
"return",
"(",
"cdna_prefix",
",",
"cdna_alt",
",",
"cdna_suffix",
",",
"reference_sequence_before_variant",
",",
"reference_sequence_after_variant",
",",
"n_trimmed_from_reference",
")"
] | A VariantSequence and ReferenceContext may contain a different number of
nucleotides before the variant locus. Furthermore, the VariantSequence is
always expressed in terms of the positive strand against which it aligned,
but reference transcripts may have sequences from the negative strand of the
genome. Take the reverse complement of the VariantSequence if the
ReferenceContext is from negative strand transcripts and trim either
sequence to ensure that the prefixes are of the same length.
Parameters
----------
variant_sequence : VariantSequence
reference_context : ReferenceContext
Returns a tuple with the following fields:
1) cDNA prefix of variant sequence, trimmed to be same length as the
reference prefix. If the reference context was on the negative
strand then this is the trimmed sequence *after* the variant from
the genomic DNA sequence.
2) cDNA sequence of the variant nucleotides, in reverse complement if
the reference context is on the negative strand.
3) cDNA sequence of the nucleotides after the variant nucleotides. If
the reference context is on the negative strand then this sequence
is the reverse complement of the original prefix sequence.
4) Reference sequence before the variant locus, trimmed to be the
same length as the variant prefix.
5) Reference sequence after the variant locus, untrimmed.
6) Number of nucleotides trimmed from the reference sequence, used
later for adjustint offset to first complete codon. | [
"A",
"VariantSequence",
"and",
"ReferenceContext",
"may",
"contain",
"a",
"different",
"number",
"of",
"nucleotides",
"before",
"the",
"variant",
"locus",
".",
"Furthermore",
"the",
"VariantSequence",
"is",
"always",
"expressed",
"in",
"terms",
"of",
"the",
"positive",
"strand",
"against",
"which",
"it",
"aligned",
"but",
"reference",
"transcripts",
"may",
"have",
"sequences",
"from",
"the",
"negative",
"strand",
"of",
"the",
"genome",
".",
"Take",
"the",
"reverse",
"complement",
"of",
"the",
"VariantSequence",
"if",
"the",
"ReferenceContext",
"is",
"from",
"negative",
"strand",
"transcripts",
"and",
"trim",
"either",
"sequence",
"to",
"ensure",
"that",
"the",
"prefixes",
"are",
"of",
"the",
"same",
"length",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L140-L213 | train |
openvax/isovar | isovar/variant_sequence_in_reading_frame.py | count_mismatches_before_variant | def count_mismatches_before_variant(reference_prefix, cdna_prefix):
"""
Computes the number of mismatching nucleotides between two cDNA sequences before a variant
locus.
Parameters
----------
reference_prefix : str
cDNA sequence of a reference transcript before a variant locus
cdna_prefix : str
cDNA sequence detected from RNAseq before a variant locus
"""
if len(reference_prefix) != len(cdna_prefix):
raise ValueError(
"Expected reference prefix '%s' to be same length as %s" % (
reference_prefix, cdna_prefix))
return sum(xi != yi for (xi, yi) in zip(reference_prefix, cdna_prefix)) | python | def count_mismatches_before_variant(reference_prefix, cdna_prefix):
"""
Computes the number of mismatching nucleotides between two cDNA sequences before a variant
locus.
Parameters
----------
reference_prefix : str
cDNA sequence of a reference transcript before a variant locus
cdna_prefix : str
cDNA sequence detected from RNAseq before a variant locus
"""
if len(reference_prefix) != len(cdna_prefix):
raise ValueError(
"Expected reference prefix '%s' to be same length as %s" % (
reference_prefix, cdna_prefix))
return sum(xi != yi for (xi, yi) in zip(reference_prefix, cdna_prefix)) | [
"def",
"count_mismatches_before_variant",
"(",
"reference_prefix",
",",
"cdna_prefix",
")",
":",
"if",
"len",
"(",
"reference_prefix",
")",
"!=",
"len",
"(",
"cdna_prefix",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected reference prefix '%s' to be same length as %s\"",
"%",
"(",
"reference_prefix",
",",
"cdna_prefix",
")",
")",
"return",
"sum",
"(",
"xi",
"!=",
"yi",
"for",
"(",
"xi",
",",
"yi",
")",
"in",
"zip",
"(",
"reference_prefix",
",",
"cdna_prefix",
")",
")"
] | Computes the number of mismatching nucleotides between two cDNA sequences before a variant
locus.
Parameters
----------
reference_prefix : str
cDNA sequence of a reference transcript before a variant locus
cdna_prefix : str
cDNA sequence detected from RNAseq before a variant locus | [
"Computes",
"the",
"number",
"of",
"mismatching",
"nucleotides",
"between",
"two",
"cDNA",
"sequences",
"before",
"a",
"variant",
"locus",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L216-L233 | train |
openvax/isovar | isovar/variant_sequence_in_reading_frame.py | count_mismatches_after_variant | def count_mismatches_after_variant(reference_suffix, cdna_suffix):
"""
Computes the number of mismatching nucleotides between two cDNA sequences after a variant locus.
Parameters
----------
reference_suffix : str
cDNA sequence of a reference transcript after a variant locus
cdna_suffix : str
cDNA sequence detected from RNAseq after a variant locus
"""
len_diff = len(cdna_suffix) - len(reference_suffix)
# if the reference is shorter than the read, the read runs into the intron - these count as
# mismatches
return sum(xi != yi for (xi, yi) in zip(reference_suffix, cdna_suffix)) + max(0, len_diff) | python | def count_mismatches_after_variant(reference_suffix, cdna_suffix):
"""
Computes the number of mismatching nucleotides between two cDNA sequences after a variant locus.
Parameters
----------
reference_suffix : str
cDNA sequence of a reference transcript after a variant locus
cdna_suffix : str
cDNA sequence detected from RNAseq after a variant locus
"""
len_diff = len(cdna_suffix) - len(reference_suffix)
# if the reference is shorter than the read, the read runs into the intron - these count as
# mismatches
return sum(xi != yi for (xi, yi) in zip(reference_suffix, cdna_suffix)) + max(0, len_diff) | [
"def",
"count_mismatches_after_variant",
"(",
"reference_suffix",
",",
"cdna_suffix",
")",
":",
"len_diff",
"=",
"len",
"(",
"cdna_suffix",
")",
"-",
"len",
"(",
"reference_suffix",
")",
"return",
"sum",
"(",
"xi",
"!=",
"yi",
"for",
"(",
"xi",
",",
"yi",
")",
"in",
"zip",
"(",
"reference_suffix",
",",
"cdna_suffix",
")",
")",
"+",
"max",
"(",
"0",
",",
"len_diff",
")"
] | Computes the number of mismatching nucleotides between two cDNA sequences after a variant locus.
Parameters
----------
reference_suffix : str
cDNA sequence of a reference transcript after a variant locus
cdna_suffix : str
cDNA sequence detected from RNAseq after a variant locus | [
"Computes",
"the",
"number",
"of",
"mismatching",
"nucleotides",
"between",
"two",
"cDNA",
"sequences",
"after",
"a",
"variant",
"locus",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L236-L253 | train |
openvax/isovar | isovar/variant_sequence_in_reading_frame.py | compute_offset_to_first_complete_codon | def compute_offset_to_first_complete_codon(
offset_to_first_complete_reference_codon,
n_trimmed_from_reference_sequence):
"""
Once we've aligned the variant sequence to the ReferenceContext, we need
to transfer reading frame from the reference transcripts to the variant
sequences.
Parameters
----------
offset_to_first_complete_reference_codon : int
n_trimmed_from_reference_sequence : int
Returns an offset into the variant sequence that starts from a complete
codon.
"""
if n_trimmed_from_reference_sequence <= offset_to_first_complete_reference_codon:
return (
offset_to_first_complete_reference_codon -
n_trimmed_from_reference_sequence)
else:
n_nucleotides_trimmed_after_first_codon = (
n_trimmed_from_reference_sequence -
offset_to_first_complete_reference_codon)
frame = n_nucleotides_trimmed_after_first_codon % 3
return (3 - frame) % 3 | python | def compute_offset_to_first_complete_codon(
offset_to_first_complete_reference_codon,
n_trimmed_from_reference_sequence):
"""
Once we've aligned the variant sequence to the ReferenceContext, we need
to transfer reading frame from the reference transcripts to the variant
sequences.
Parameters
----------
offset_to_first_complete_reference_codon : int
n_trimmed_from_reference_sequence : int
Returns an offset into the variant sequence that starts from a complete
codon.
"""
if n_trimmed_from_reference_sequence <= offset_to_first_complete_reference_codon:
return (
offset_to_first_complete_reference_codon -
n_trimmed_from_reference_sequence)
else:
n_nucleotides_trimmed_after_first_codon = (
n_trimmed_from_reference_sequence -
offset_to_first_complete_reference_codon)
frame = n_nucleotides_trimmed_after_first_codon % 3
return (3 - frame) % 3 | [
"def",
"compute_offset_to_first_complete_codon",
"(",
"offset_to_first_complete_reference_codon",
",",
"n_trimmed_from_reference_sequence",
")",
":",
"if",
"n_trimmed_from_reference_sequence",
"<=",
"offset_to_first_complete_reference_codon",
":",
"return",
"(",
"offset_to_first_complete_reference_codon",
"-",
"n_trimmed_from_reference_sequence",
")",
"else",
":",
"n_nucleotides_trimmed_after_first_codon",
"=",
"(",
"n_trimmed_from_reference_sequence",
"-",
"offset_to_first_complete_reference_codon",
")",
"frame",
"=",
"n_nucleotides_trimmed_after_first_codon",
"%",
"3",
"return",
"(",
"3",
"-",
"frame",
")",
"%",
"3"
] | Once we've aligned the variant sequence to the ReferenceContext, we need
to transfer reading frame from the reference transcripts to the variant
sequences.
Parameters
----------
offset_to_first_complete_reference_codon : int
n_trimmed_from_reference_sequence : int
Returns an offset into the variant sequence that starts from a complete
codon. | [
"Once",
"we",
"ve",
"aligned",
"the",
"variant",
"sequence",
"to",
"the",
"ReferenceContext",
"we",
"need",
"to",
"transfer",
"reading",
"frame",
"from",
"the",
"reference",
"transcripts",
"to",
"the",
"variant",
"sequences",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L256-L282 | train |
openvax/isovar | isovar/variant_sequence_in_reading_frame.py | match_variant_sequence_to_reference_context | def match_variant_sequence_to_reference_context(
variant_sequence,
reference_context,
min_transcript_prefix_length,
max_transcript_mismatches,
include_mismatches_after_variant=False,
max_trimming_attempts=2):
"""
Iteratively trim low-coverage subsequences of a variant sequence
until it either matches the given reference context or there
are too few nucleotides left in the variant sequence.
Parameters
----------
variant_sequence : VariantSequence
Assembled sequence from RNA reads, will need to be to be reverse
complemented if matching against a reference transcript on the
negative strand.
reference_context : ReferenceContext
Sequence of reference transcript before the variant and associated
metadata.
min_transcript_prefix_length : int
Minimum number of nucleotides we try to match against a reference
transcript.
max_transcript_mismatches : int
Maximum number of nucleotide differences between reference transcript
sequence and the variant sequence.
include_mismatches_after_variant : bool
Set to true if the number of mismatches after the variant locus should
count toward the total max_transcript_mismatches, which by default
only counts mismatches before the variant locus.
max_trimming_attempts : int
How many times do we try trimming the VariantSequence to higher
levels of coverage before giving up?
Returns VariantSequenceInReadingFrame or None
"""
variant_sequence_in_reading_frame = None
# if we can't get the variant sequence to match this reference
# context then keep trimming it by coverage until either
for i in range(max_trimming_attempts + 1):
# check the reverse-complemented prefix if the reference context is
# on the negative strand since variant sequence is aligned to
# genomic DNA (positive strand)
variant_sequence_too_short = (
(reference_context.strand == "+" and
len(variant_sequence.prefix) < min_transcript_prefix_length) or
(reference_context.strand == "-" and
len(variant_sequence.suffix) < min_transcript_prefix_length)
)
if variant_sequence_too_short:
logger.info(
"Variant sequence %s shorter than min allowed %d (iter=%d)",
variant_sequence,
min_transcript_prefix_length,
i + 1)
return None
variant_sequence_in_reading_frame = \
VariantSequenceInReadingFrame.from_variant_sequence_and_reference_context(
variant_sequence=variant_sequence,
reference_context=reference_context)
if variant_sequence_in_reading_frame is None:
return None
n_mismatch_before_variant = (
variant_sequence_in_reading_frame.number_mismatches_before_variant)
n_mismatch_after_variant = (
variant_sequence_in_reading_frame.number_mismatches_after_variant)
logger.info("Iter #%d/%d: %s" % (
i + 1,
max_trimming_attempts + 1,
variant_sequence_in_reading_frame))
total_mismatches = n_mismatch_before_variant
if include_mismatches_after_variant:
total_mismatches += n_mismatch_after_variant
if total_mismatches <= max_transcript_mismatches:
# if we got a variant sequence + reading frame with sufficiently
# few mismatches then call it a day
return variant_sequence_in_reading_frame
logger.info(
("Too many mismatches (%d) between variant sequence %s and "
"reference context %s (attempt=%d/%d)"),
n_mismatch_before_variant,
variant_sequence,
reference_context,
i + 1,
max_trimming_attempts + 1)
# if portions of the sequence are supported by only 1 read
# then try trimming to 2 to see if the better supported
# subsequence can be better matched against the reference
current_min_coverage = variant_sequence.min_coverage()
logger.info(
"Trimming to subsequence covered by at least %d reads",
current_min_coverage + 1)
variant_sequence = variant_sequence.trim_by_coverage(
current_min_coverage + 1)
return None | python | def match_variant_sequence_to_reference_context(
variant_sequence,
reference_context,
min_transcript_prefix_length,
max_transcript_mismatches,
include_mismatches_after_variant=False,
max_trimming_attempts=2):
"""
Iteratively trim low-coverage subsequences of a variant sequence
until it either matches the given reference context or there
are too few nucleotides left in the variant sequence.
Parameters
----------
variant_sequence : VariantSequence
Assembled sequence from RNA reads, will need to be to be reverse
complemented if matching against a reference transcript on the
negative strand.
reference_context : ReferenceContext
Sequence of reference transcript before the variant and associated
metadata.
min_transcript_prefix_length : int
Minimum number of nucleotides we try to match against a reference
transcript.
max_transcript_mismatches : int
Maximum number of nucleotide differences between reference transcript
sequence and the variant sequence.
include_mismatches_after_variant : bool
Set to true if the number of mismatches after the variant locus should
count toward the total max_transcript_mismatches, which by default
only counts mismatches before the variant locus.
max_trimming_attempts : int
How many times do we try trimming the VariantSequence to higher
levels of coverage before giving up?
Returns VariantSequenceInReadingFrame or None
"""
variant_sequence_in_reading_frame = None
# if we can't get the variant sequence to match this reference
# context then keep trimming it by coverage until either
for i in range(max_trimming_attempts + 1):
# check the reverse-complemented prefix if the reference context is
# on the negative strand since variant sequence is aligned to
# genomic DNA (positive strand)
variant_sequence_too_short = (
(reference_context.strand == "+" and
len(variant_sequence.prefix) < min_transcript_prefix_length) or
(reference_context.strand == "-" and
len(variant_sequence.suffix) < min_transcript_prefix_length)
)
if variant_sequence_too_short:
logger.info(
"Variant sequence %s shorter than min allowed %d (iter=%d)",
variant_sequence,
min_transcript_prefix_length,
i + 1)
return None
variant_sequence_in_reading_frame = \
VariantSequenceInReadingFrame.from_variant_sequence_and_reference_context(
variant_sequence=variant_sequence,
reference_context=reference_context)
if variant_sequence_in_reading_frame is None:
return None
n_mismatch_before_variant = (
variant_sequence_in_reading_frame.number_mismatches_before_variant)
n_mismatch_after_variant = (
variant_sequence_in_reading_frame.number_mismatches_after_variant)
logger.info("Iter #%d/%d: %s" % (
i + 1,
max_trimming_attempts + 1,
variant_sequence_in_reading_frame))
total_mismatches = n_mismatch_before_variant
if include_mismatches_after_variant:
total_mismatches += n_mismatch_after_variant
if total_mismatches <= max_transcript_mismatches:
# if we got a variant sequence + reading frame with sufficiently
# few mismatches then call it a day
return variant_sequence_in_reading_frame
logger.info(
("Too many mismatches (%d) between variant sequence %s and "
"reference context %s (attempt=%d/%d)"),
n_mismatch_before_variant,
variant_sequence,
reference_context,
i + 1,
max_trimming_attempts + 1)
# if portions of the sequence are supported by only 1 read
# then try trimming to 2 to see if the better supported
# subsequence can be better matched against the reference
current_min_coverage = variant_sequence.min_coverage()
logger.info(
"Trimming to subsequence covered by at least %d reads",
current_min_coverage + 1)
variant_sequence = variant_sequence.trim_by_coverage(
current_min_coverage + 1)
return None | [
"def",
"match_variant_sequence_to_reference_context",
"(",
"variant_sequence",
",",
"reference_context",
",",
"min_transcript_prefix_length",
",",
"max_transcript_mismatches",
",",
"include_mismatches_after_variant",
"=",
"False",
",",
"max_trimming_attempts",
"=",
"2",
")",
":",
"variant_sequence_in_reading_frame",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"max_trimming_attempts",
"+",
"1",
")",
":",
"variant_sequence_too_short",
"=",
"(",
"(",
"reference_context",
".",
"strand",
"==",
"\"+\"",
"and",
"len",
"(",
"variant_sequence",
".",
"prefix",
")",
"<",
"min_transcript_prefix_length",
")",
"or",
"(",
"reference_context",
".",
"strand",
"==",
"\"-\"",
"and",
"len",
"(",
"variant_sequence",
".",
"suffix",
")",
"<",
"min_transcript_prefix_length",
")",
")",
"if",
"variant_sequence_too_short",
":",
"logger",
".",
"info",
"(",
"\"Variant sequence %s shorter than min allowed %d (iter=%d)\"",
",",
"variant_sequence",
",",
"min_transcript_prefix_length",
",",
"i",
"+",
"1",
")",
"return",
"None",
"variant_sequence_in_reading_frame",
"=",
"VariantSequenceInReadingFrame",
".",
"from_variant_sequence_and_reference_context",
"(",
"variant_sequence",
"=",
"variant_sequence",
",",
"reference_context",
"=",
"reference_context",
")",
"if",
"variant_sequence_in_reading_frame",
"is",
"None",
":",
"return",
"None",
"n_mismatch_before_variant",
"=",
"(",
"variant_sequence_in_reading_frame",
".",
"number_mismatches_before_variant",
")",
"n_mismatch_after_variant",
"=",
"(",
"variant_sequence_in_reading_frame",
".",
"number_mismatches_after_variant",
")",
"logger",
".",
"info",
"(",
"\"Iter #%d/%d: %s\"",
"%",
"(",
"i",
"+",
"1",
",",
"max_trimming_attempts",
"+",
"1",
",",
"variant_sequence_in_reading_frame",
")",
")",
"total_mismatches",
"=",
"n_mismatch_before_variant",
"if",
"include_mismatches_after_variant",
":",
"total_mismatches",
"+=",
"n_mismatch_after_variant",
"if",
"total_mismatches",
"<=",
"max_transcript_mismatches",
":",
"return",
"variant_sequence_in_reading_frame",
"logger",
".",
"info",
"(",
"(",
"\"Too many mismatches (%d) between variant sequence %s and \"",
"\"reference context %s (attempt=%d/%d)\"",
")",
",",
"n_mismatch_before_variant",
",",
"variant_sequence",
",",
"reference_context",
",",
"i",
"+",
"1",
",",
"max_trimming_attempts",
"+",
"1",
")",
"current_min_coverage",
"=",
"variant_sequence",
".",
"min_coverage",
"(",
")",
"logger",
".",
"info",
"(",
"\"Trimming to subsequence covered by at least %d reads\"",
",",
"current_min_coverage",
"+",
"1",
")",
"variant_sequence",
"=",
"variant_sequence",
".",
"trim_by_coverage",
"(",
"current_min_coverage",
"+",
"1",
")",
"return",
"None"
] | Iteratively trim low-coverage subsequences of a variant sequence
until it either matches the given reference context or there
are too few nucleotides left in the variant sequence.
Parameters
----------
variant_sequence : VariantSequence
Assembled sequence from RNA reads, will need to be to be reverse
complemented if matching against a reference transcript on the
negative strand.
reference_context : ReferenceContext
Sequence of reference transcript before the variant and associated
metadata.
min_transcript_prefix_length : int
Minimum number of nucleotides we try to match against a reference
transcript.
max_transcript_mismatches : int
Maximum number of nucleotide differences between reference transcript
sequence and the variant sequence.
include_mismatches_after_variant : bool
Set to true if the number of mismatches after the variant locus should
count toward the total max_transcript_mismatches, which by default
only counts mismatches before the variant locus.
max_trimming_attempts : int
How many times do we try trimming the VariantSequence to higher
levels of coverage before giving up?
Returns VariantSequenceInReadingFrame or None | [
"Iteratively",
"trim",
"low",
"-",
"coverage",
"subsequences",
"of",
"a",
"variant",
"sequence",
"until",
"it",
"either",
"matches",
"the",
"given",
"reference",
"context",
"or",
"there",
"are",
"too",
"few",
"nucleotides",
"left",
"in",
"the",
"variant",
"sequence",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L285-L392 | train |
openvax/isovar | isovar/genetic_code.py | GeneticCode._check_codons | def _check_codons(self):
"""
If codon table is missing stop codons, then add them.
"""
for stop_codon in self.stop_codons:
if stop_codon in self.codon_table:
if self.codon_table[stop_codon] != "*":
raise ValueError(
("Codon '%s' not found in stop_codons, but codon table "
"indicates that it should be") % (stop_codon,))
else:
self.codon_table[stop_codon] = "*"
for start_codon in self.start_codons:
if start_codon not in self.codon_table:
raise ValueError(
"Start codon '%s' missing from codon table" % (
start_codon,))
for codon, amino_acid in self.codon_table.items():
if amino_acid == "*" and codon not in self.stop_codons:
raise ValueError(
"Non-stop codon '%s' can't translate to '*'" % (
codon,))
if len(self.codon_table) != 64:
raise ValueError(
"Expected 64 codons but found %d in codon table" % (
len(self.codon_table,))) | python | def _check_codons(self):
"""
If codon table is missing stop codons, then add them.
"""
for stop_codon in self.stop_codons:
if stop_codon in self.codon_table:
if self.codon_table[stop_codon] != "*":
raise ValueError(
("Codon '%s' not found in stop_codons, but codon table "
"indicates that it should be") % (stop_codon,))
else:
self.codon_table[stop_codon] = "*"
for start_codon in self.start_codons:
if start_codon not in self.codon_table:
raise ValueError(
"Start codon '%s' missing from codon table" % (
start_codon,))
for codon, amino_acid in self.codon_table.items():
if amino_acid == "*" and codon not in self.stop_codons:
raise ValueError(
"Non-stop codon '%s' can't translate to '*'" % (
codon,))
if len(self.codon_table) != 64:
raise ValueError(
"Expected 64 codons but found %d in codon table" % (
len(self.codon_table,))) | [
"def",
"_check_codons",
"(",
"self",
")",
":",
"for",
"stop_codon",
"in",
"self",
".",
"stop_codons",
":",
"if",
"stop_codon",
"in",
"self",
".",
"codon_table",
":",
"if",
"self",
".",
"codon_table",
"[",
"stop_codon",
"]",
"!=",
"\"*\"",
":",
"raise",
"ValueError",
"(",
"(",
"\"Codon '%s' not found in stop_codons, but codon table \"",
"\"indicates that it should be\"",
")",
"%",
"(",
"stop_codon",
",",
")",
")",
"else",
":",
"self",
".",
"codon_table",
"[",
"stop_codon",
"]",
"=",
"\"*\"",
"for",
"start_codon",
"in",
"self",
".",
"start_codons",
":",
"if",
"start_codon",
"not",
"in",
"self",
".",
"codon_table",
":",
"raise",
"ValueError",
"(",
"\"Start codon '%s' missing from codon table\"",
"%",
"(",
"start_codon",
",",
")",
")",
"for",
"codon",
",",
"amino_acid",
"in",
"self",
".",
"codon_table",
".",
"items",
"(",
")",
":",
"if",
"amino_acid",
"==",
"\"*\"",
"and",
"codon",
"not",
"in",
"self",
".",
"stop_codons",
":",
"raise",
"ValueError",
"(",
"\"Non-stop codon '%s' can't translate to '*'\"",
"%",
"(",
"codon",
",",
")",
")",
"if",
"len",
"(",
"self",
".",
"codon_table",
")",
"!=",
"64",
":",
"raise",
"ValueError",
"(",
"\"Expected 64 codons but found %d in codon table\"",
"%",
"(",
"len",
"(",
"self",
".",
"codon_table",
",",
")",
")",
")"
] | If codon table is missing stop codons, then add them. | [
"If",
"codon",
"table",
"is",
"missing",
"stop",
"codons",
"then",
"add",
"them",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/genetic_code.py#L26-L54 | train |
openvax/isovar | isovar/genetic_code.py | GeneticCode.copy | def copy(
self,
name,
start_codons=None,
stop_codons=None,
codon_table=None,
codon_table_changes=None):
"""
Make copy of this GeneticCode object with optional replacement
values for all fields.
"""
new_start_codons = (
self.start_codons.copy()
if start_codons is None
else start_codons)
new_stop_codons = (
self.stop_codons.copy()
if stop_codons is None
else stop_codons)
new_codon_table = (
self.codon_table.copy()
if codon_table is None
else codon_table)
if codon_table_changes is not None:
new_codon_table.update(codon_table_changes)
return GeneticCode(
name=name,
start_codons=new_start_codons,
stop_codons=new_stop_codons,
codon_table=new_codon_table) | python | def copy(
self,
name,
start_codons=None,
stop_codons=None,
codon_table=None,
codon_table_changes=None):
"""
Make copy of this GeneticCode object with optional replacement
values for all fields.
"""
new_start_codons = (
self.start_codons.copy()
if start_codons is None
else start_codons)
new_stop_codons = (
self.stop_codons.copy()
if stop_codons is None
else stop_codons)
new_codon_table = (
self.codon_table.copy()
if codon_table is None
else codon_table)
if codon_table_changes is not None:
new_codon_table.update(codon_table_changes)
return GeneticCode(
name=name,
start_codons=new_start_codons,
stop_codons=new_stop_codons,
codon_table=new_codon_table) | [
"def",
"copy",
"(",
"self",
",",
"name",
",",
"start_codons",
"=",
"None",
",",
"stop_codons",
"=",
"None",
",",
"codon_table",
"=",
"None",
",",
"codon_table_changes",
"=",
"None",
")",
":",
"new_start_codons",
"=",
"(",
"self",
".",
"start_codons",
".",
"copy",
"(",
")",
"if",
"start_codons",
"is",
"None",
"else",
"start_codons",
")",
"new_stop_codons",
"=",
"(",
"self",
".",
"stop_codons",
".",
"copy",
"(",
")",
"if",
"stop_codons",
"is",
"None",
"else",
"stop_codons",
")",
"new_codon_table",
"=",
"(",
"self",
".",
"codon_table",
".",
"copy",
"(",
")",
"if",
"codon_table",
"is",
"None",
"else",
"codon_table",
")",
"if",
"codon_table_changes",
"is",
"not",
"None",
":",
"new_codon_table",
".",
"update",
"(",
"codon_table_changes",
")",
"return",
"GeneticCode",
"(",
"name",
"=",
"name",
",",
"start_codons",
"=",
"new_start_codons",
",",
"stop_codons",
"=",
"new_stop_codons",
",",
"codon_table",
"=",
"new_codon_table",
")"
] | Make copy of this GeneticCode object with optional replacement
values for all fields. | [
"Make",
"copy",
"of",
"this",
"GeneticCode",
"object",
"with",
"optional",
"replacement",
"values",
"for",
"all",
"fields",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/genetic_code.py#L100-L133 | train |
centralniak/py-raildriver | raildriver/events.py | Listener.start | def start(self):
"""
Start listening to changes
"""
self.running = True
self.thread = threading.Thread(target=self._main_loop)
self.thread.start() | python | def start(self):
"""
Start listening to changes
"""
self.running = True
self.thread = threading.Thread(target=self._main_loop)
self.thread.start() | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"running",
"=",
"True",
"self",
".",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_main_loop",
")",
"self",
".",
"thread",
".",
"start",
"(",
")"
] | Start listening to changes | [
"Start",
"listening",
"to",
"changes"
] | c7f5f551e0436451b9507fc63a62e49a229282b9 | https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/events.py#L86-L92 | train |
centralniak/py-raildriver | raildriver/events.py | Listener.subscribe | def subscribe(self, field_names):
"""
Subscribe to given fields.
Special fields cannot be subscribed to and will be checked on every iteration. These include:
* loco name
* coordinates
* fuel level
* gradient
* current heading
* is in tunnel
* time
You can of course still receive notifications when those change.
It is important to understand that when the loco changes the set of possible controllers will likely change
too. Any missing field changes will stop triggering notifications.
:param field_names: list
:raises ValueError if field is not present on current loco
"""
available_controls = dict(self.raildriver.get_controller_list()).values()
for field in field_names:
if field not in available_controls:
raise ValueError('Cannot subscribe to a missing controller {}'.format(field))
self.subscribed_fields = field_names | python | def subscribe(self, field_names):
"""
Subscribe to given fields.
Special fields cannot be subscribed to and will be checked on every iteration. These include:
* loco name
* coordinates
* fuel level
* gradient
* current heading
* is in tunnel
* time
You can of course still receive notifications when those change.
It is important to understand that when the loco changes the set of possible controllers will likely change
too. Any missing field changes will stop triggering notifications.
:param field_names: list
:raises ValueError if field is not present on current loco
"""
available_controls = dict(self.raildriver.get_controller_list()).values()
for field in field_names:
if field not in available_controls:
raise ValueError('Cannot subscribe to a missing controller {}'.format(field))
self.subscribed_fields = field_names | [
"def",
"subscribe",
"(",
"self",
",",
"field_names",
")",
":",
"available_controls",
"=",
"dict",
"(",
"self",
".",
"raildriver",
".",
"get_controller_list",
"(",
")",
")",
".",
"values",
"(",
")",
"for",
"field",
"in",
"field_names",
":",
"if",
"field",
"not",
"in",
"available_controls",
":",
"raise",
"ValueError",
"(",
"'Cannot subscribe to a missing controller {}'",
".",
"format",
"(",
"field",
")",
")",
"self",
".",
"subscribed_fields",
"=",
"field_names"
] | Subscribe to given fields.
Special fields cannot be subscribed to and will be checked on every iteration. These include:
* loco name
* coordinates
* fuel level
* gradient
* current heading
* is in tunnel
* time
You can of course still receive notifications when those change.
It is important to understand that when the loco changes the set of possible controllers will likely change
too. Any missing field changes will stop triggering notifications.
:param field_names: list
:raises ValueError if field is not present on current loco | [
"Subscribe",
"to",
"given",
"fields",
"."
] | c7f5f551e0436451b9507fc63a62e49a229282b9 | https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/events.py#L101-L127 | train |
pyviz/imagen | imagen/patterngenerator.py | PatternGenerator.set_matrix_dimensions | def set_matrix_dimensions(self, bounds, xdensity, ydensity):
"""
Change the dimensions of the matrix into which the pattern
will be drawn. Users of this class should call this method
rather than changing the bounds, xdensity, and ydensity
parameters directly. Subclasses can override this method to
update any internal data structures that may depend on the
matrix dimensions.
"""
self.bounds = bounds
self.xdensity = xdensity
self.ydensity = ydensity
scs = SheetCoordinateSystem(bounds, xdensity, ydensity)
for of in self.output_fns:
if isinstance(of, TransferFn):
of.initialize(SCS=scs, shape=scs.shape) | python | def set_matrix_dimensions(self, bounds, xdensity, ydensity):
"""
Change the dimensions of the matrix into which the pattern
will be drawn. Users of this class should call this method
rather than changing the bounds, xdensity, and ydensity
parameters directly. Subclasses can override this method to
update any internal data structures that may depend on the
matrix dimensions.
"""
self.bounds = bounds
self.xdensity = xdensity
self.ydensity = ydensity
scs = SheetCoordinateSystem(bounds, xdensity, ydensity)
for of in self.output_fns:
if isinstance(of, TransferFn):
of.initialize(SCS=scs, shape=scs.shape) | [
"def",
"set_matrix_dimensions",
"(",
"self",
",",
"bounds",
",",
"xdensity",
",",
"ydensity",
")",
":",
"self",
".",
"bounds",
"=",
"bounds",
"self",
".",
"xdensity",
"=",
"xdensity",
"self",
".",
"ydensity",
"=",
"ydensity",
"scs",
"=",
"SheetCoordinateSystem",
"(",
"bounds",
",",
"xdensity",
",",
"ydensity",
")",
"for",
"of",
"in",
"self",
".",
"output_fns",
":",
"if",
"isinstance",
"(",
"of",
",",
"TransferFn",
")",
":",
"of",
".",
"initialize",
"(",
"SCS",
"=",
"scs",
",",
"shape",
"=",
"scs",
".",
"shape",
")"
] | Change the dimensions of the matrix into which the pattern
will be drawn. Users of this class should call this method
rather than changing the bounds, xdensity, and ydensity
parameters directly. Subclasses can override this method to
update any internal data structures that may depend on the
matrix dimensions. | [
"Change",
"the",
"dimensions",
"of",
"the",
"matrix",
"into",
"which",
"the",
"pattern",
"will",
"be",
"drawn",
".",
"Users",
"of",
"this",
"class",
"should",
"call",
"this",
"method",
"rather",
"than",
"changing",
"the",
"bounds",
"xdensity",
"and",
"ydensity",
"parameters",
"directly",
".",
"Subclasses",
"can",
"override",
"this",
"method",
"to",
"update",
"any",
"internal",
"data",
"structures",
"that",
"may",
"depend",
"on",
"the",
"matrix",
"dimensions",
"."
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L273-L288 | train |
pyviz/imagen | imagen/patterngenerator.py | PatternGenerator.state_push | def state_push(self):
"Save the state of the output functions, to be restored with state_pop."
for of in self.output_fns:
if hasattr(of,'state_push'):
of.state_push()
super(PatternGenerator, self).state_push() | python | def state_push(self):
"Save the state of the output functions, to be restored with state_pop."
for of in self.output_fns:
if hasattr(of,'state_push'):
of.state_push()
super(PatternGenerator, self).state_push() | [
"def",
"state_push",
"(",
"self",
")",
":",
"\"Save the state of the output functions, to be restored with state_pop.\"",
"for",
"of",
"in",
"self",
".",
"output_fns",
":",
"if",
"hasattr",
"(",
"of",
",",
"'state_push'",
")",
":",
"of",
".",
"state_push",
"(",
")",
"super",
"(",
"PatternGenerator",
",",
"self",
")",
".",
"state_push",
"(",
")"
] | Save the state of the output functions, to be restored with state_pop. | [
"Save",
"the",
"state",
"of",
"the",
"output",
"functions",
"to",
"be",
"restored",
"with",
"state_pop",
"."
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L290-L295 | train |
pyviz/imagen | imagen/patterngenerator.py | PatternGenerator.state_pop | def state_pop(self):
"Restore the state of the output functions saved by state_push."
for of in self.output_fns:
if hasattr(of,'state_pop'):
of.state_pop()
super(PatternGenerator, self).state_pop() | python | def state_pop(self):
"Restore the state of the output functions saved by state_push."
for of in self.output_fns:
if hasattr(of,'state_pop'):
of.state_pop()
super(PatternGenerator, self).state_pop() | [
"def",
"state_pop",
"(",
"self",
")",
":",
"\"Restore the state of the output functions saved by state_push.\"",
"for",
"of",
"in",
"self",
".",
"output_fns",
":",
"if",
"hasattr",
"(",
"of",
",",
"'state_pop'",
")",
":",
"of",
".",
"state_pop",
"(",
")",
"super",
"(",
"PatternGenerator",
",",
"self",
")",
".",
"state_pop",
"(",
")"
] | Restore the state of the output functions saved by state_push. | [
"Restore",
"the",
"state",
"of",
"the",
"output",
"functions",
"saved",
"by",
"state_push",
"."
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L298-L303 | train |
pyviz/imagen | imagen/patterngenerator.py | PatternGenerator.pil | def pil(self, **params_to_override):
"""Returns a PIL image for this pattern, overriding parameters if provided."""
from PIL.Image import fromarray
nchans = self.num_channels()
if nchans in [0, 1]:
mode, arr = None, self(**params_to_override)
arr = (255.0 / arr.max() * (arr - arr.min())).astype(np.uint8)
elif nchans in [3,4]:
mode = 'RGB' if nchans==3 else 'RGBA'
arr = np.dstack(self.channels(**params_to_override).values()[1:])
arr = (255.0*arr).astype(np.uint8)
else:
raise ValueError("Unsupported number of channels")
return fromarray(arr, mode) | python | def pil(self, **params_to_override):
"""Returns a PIL image for this pattern, overriding parameters if provided."""
from PIL.Image import fromarray
nchans = self.num_channels()
if nchans in [0, 1]:
mode, arr = None, self(**params_to_override)
arr = (255.0 / arr.max() * (arr - arr.min())).astype(np.uint8)
elif nchans in [3,4]:
mode = 'RGB' if nchans==3 else 'RGBA'
arr = np.dstack(self.channels(**params_to_override).values()[1:])
arr = (255.0*arr).astype(np.uint8)
else:
raise ValueError("Unsupported number of channels")
return fromarray(arr, mode) | [
"def",
"pil",
"(",
"self",
",",
"**",
"params_to_override",
")",
":",
"from",
"PIL",
".",
"Image",
"import",
"fromarray",
"nchans",
"=",
"self",
".",
"num_channels",
"(",
")",
"if",
"nchans",
"in",
"[",
"0",
",",
"1",
"]",
":",
"mode",
",",
"arr",
"=",
"None",
",",
"self",
"(",
"**",
"params_to_override",
")",
"arr",
"=",
"(",
"255.0",
"/",
"arr",
".",
"max",
"(",
")",
"*",
"(",
"arr",
"-",
"arr",
".",
"min",
"(",
")",
")",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"elif",
"nchans",
"in",
"[",
"3",
",",
"4",
"]",
":",
"mode",
"=",
"'RGB'",
"if",
"nchans",
"==",
"3",
"else",
"'RGBA'",
"arr",
"=",
"np",
".",
"dstack",
"(",
"self",
".",
"channels",
"(",
"**",
"params_to_override",
")",
".",
"values",
"(",
")",
"[",
"1",
":",
"]",
")",
"arr",
"=",
"(",
"255.0",
"*",
"arr",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported number of channels\"",
")",
"return",
"fromarray",
"(",
"arr",
",",
"mode",
")"
] | Returns a PIL image for this pattern, overriding parameters if provided. | [
"Returns",
"a",
"PIL",
"image",
"for",
"this",
"pattern",
"overriding",
"parameters",
"if",
"provided",
"."
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L394-L411 | train |
pyviz/imagen | imagen/patterngenerator.py | Composite.state_push | def state_push(self):
"""
Push the state of all generators
"""
super(Composite,self).state_push()
for gen in self.generators:
gen.state_push() | python | def state_push(self):
"""
Push the state of all generators
"""
super(Composite,self).state_push()
for gen in self.generators:
gen.state_push() | [
"def",
"state_push",
"(",
"self",
")",
":",
"super",
"(",
"Composite",
",",
"self",
")",
".",
"state_push",
"(",
")",
"for",
"gen",
"in",
"self",
".",
"generators",
":",
"gen",
".",
"state_push",
"(",
")"
] | Push the state of all generators | [
"Push",
"the",
"state",
"of",
"all",
"generators"
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L516-L522 | train |
pyviz/imagen | imagen/patterngenerator.py | Composite.state_pop | def state_pop(self):
"""
Pop the state of all generators
"""
super(Composite,self).state_pop()
for gen in self.generators:
gen.state_pop() | python | def state_pop(self):
"""
Pop the state of all generators
"""
super(Composite,self).state_pop()
for gen in self.generators:
gen.state_pop() | [
"def",
"state_pop",
"(",
"self",
")",
":",
"super",
"(",
"Composite",
",",
"self",
")",
".",
"state_pop",
"(",
")",
"for",
"gen",
"in",
"self",
".",
"generators",
":",
"gen",
".",
"state_pop",
"(",
")"
] | Pop the state of all generators | [
"Pop",
"the",
"state",
"of",
"all",
"generators"
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L524-L530 | train |
pyviz/imagen | imagen/patterngenerator.py | Composite.function | def function(self,p):
"""Constructs combined pattern out of the individual ones."""
generators = self._advance_pattern_generators(p)
assert hasattr(p.operator,'reduce'),repr(p.operator)+" does not support 'reduce'."
# CEBALERT: mask gets applied by all PGs including the Composite itself
# (leads to redundant calculations in current lissom_oo_or usage, but
# will lead to problems/limitations in the future).
patterns = [pg(xdensity=p.xdensity,ydensity=p.ydensity,
bounds=p.bounds,mask=p.mask,
x=p.x+p.size*(pg.x*np.cos(p.orientation)- pg.y*np.sin(p.orientation)),
y=p.y+p.size*(pg.x*np.sin(p.orientation)+ pg.y*np.cos(p.orientation)),
orientation=pg.orientation+p.orientation,
size=pg.size*p.size)
for pg in generators]
image_array = p.operator.reduce(patterns)
return image_array | python | def function(self,p):
"""Constructs combined pattern out of the individual ones."""
generators = self._advance_pattern_generators(p)
assert hasattr(p.operator,'reduce'),repr(p.operator)+" does not support 'reduce'."
# CEBALERT: mask gets applied by all PGs including the Composite itself
# (leads to redundant calculations in current lissom_oo_or usage, but
# will lead to problems/limitations in the future).
patterns = [pg(xdensity=p.xdensity,ydensity=p.ydensity,
bounds=p.bounds,mask=p.mask,
x=p.x+p.size*(pg.x*np.cos(p.orientation)- pg.y*np.sin(p.orientation)),
y=p.y+p.size*(pg.x*np.sin(p.orientation)+ pg.y*np.cos(p.orientation)),
orientation=pg.orientation+p.orientation,
size=pg.size*p.size)
for pg in generators]
image_array = p.operator.reduce(patterns)
return image_array | [
"def",
"function",
"(",
"self",
",",
"p",
")",
":",
"generators",
"=",
"self",
".",
"_advance_pattern_generators",
"(",
"p",
")",
"assert",
"hasattr",
"(",
"p",
".",
"operator",
",",
"'reduce'",
")",
",",
"repr",
"(",
"p",
".",
"operator",
")",
"+",
"\" does not support 'reduce'.\"",
"patterns",
"=",
"[",
"pg",
"(",
"xdensity",
"=",
"p",
".",
"xdensity",
",",
"ydensity",
"=",
"p",
".",
"ydensity",
",",
"bounds",
"=",
"p",
".",
"bounds",
",",
"mask",
"=",
"p",
".",
"mask",
",",
"x",
"=",
"p",
".",
"x",
"+",
"p",
".",
"size",
"*",
"(",
"pg",
".",
"x",
"*",
"np",
".",
"cos",
"(",
"p",
".",
"orientation",
")",
"-",
"pg",
".",
"y",
"*",
"np",
".",
"sin",
"(",
"p",
".",
"orientation",
")",
")",
",",
"y",
"=",
"p",
".",
"y",
"+",
"p",
".",
"size",
"*",
"(",
"pg",
".",
"x",
"*",
"np",
".",
"sin",
"(",
"p",
".",
"orientation",
")",
"+",
"pg",
".",
"y",
"*",
"np",
".",
"cos",
"(",
"p",
".",
"orientation",
")",
")",
",",
"orientation",
"=",
"pg",
".",
"orientation",
"+",
"p",
".",
"orientation",
",",
"size",
"=",
"pg",
".",
"size",
"*",
"p",
".",
"size",
")",
"for",
"pg",
"in",
"generators",
"]",
"image_array",
"=",
"p",
".",
"operator",
".",
"reduce",
"(",
"patterns",
")",
"return",
"image_array"
] | Constructs combined pattern out of the individual ones. | [
"Constructs",
"combined",
"pattern",
"out",
"of",
"the",
"individual",
"ones",
"."
] | 53c5685c880f54b42795964d8db50b02e8590e88 | https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L535-L552 | train |
portfoliome/postpy | postpy/ddl.py | compile_column | def compile_column(name: str, data_type: str, nullable: bool) -> str:
"""Create column definition statement."""
null_str = 'NULL' if nullable else 'NOT NULL'
return '{name} {data_type} {null},'.format(name=name,
data_type=data_type,
null=null_str) | python | def compile_column(name: str, data_type: str, nullable: bool) -> str:
"""Create column definition statement."""
null_str = 'NULL' if nullable else 'NOT NULL'
return '{name} {data_type} {null},'.format(name=name,
data_type=data_type,
null=null_str) | [
"def",
"compile_column",
"(",
"name",
":",
"str",
",",
"data_type",
":",
"str",
",",
"nullable",
":",
"bool",
")",
"->",
"str",
":",
"null_str",
"=",
"'NULL'",
"if",
"nullable",
"else",
"'NOT NULL'",
"return",
"'{name} {data_type} {null},'",
".",
"format",
"(",
"name",
"=",
"name",
",",
"data_type",
"=",
"data_type",
",",
"null",
"=",
"null_str",
")"
] | Create column definition statement. | [
"Create",
"column",
"definition",
"statement",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/ddl.py#L39-L46 | train |
portfoliome/postpy | postpy/ddl.py | MaterializedView.create | def create(self, no_data=False):
"""Declare materalized view."""
if self.query:
ddl_statement = self.compile_create_as()
else:
ddl_statement = self.compile_create()
if no_data:
ddl_statement += '\nWITH NO DATA'
return ddl_statement, self.query_values | python | def create(self, no_data=False):
"""Declare materalized view."""
if self.query:
ddl_statement = self.compile_create_as()
else:
ddl_statement = self.compile_create()
if no_data:
ddl_statement += '\nWITH NO DATA'
return ddl_statement, self.query_values | [
"def",
"create",
"(",
"self",
",",
"no_data",
"=",
"False",
")",
":",
"if",
"self",
".",
"query",
":",
"ddl_statement",
"=",
"self",
".",
"compile_create_as",
"(",
")",
"else",
":",
"ddl_statement",
"=",
"self",
".",
"compile_create",
"(",
")",
"if",
"no_data",
":",
"ddl_statement",
"+=",
"'\\nWITH NO DATA'",
"return",
"ddl_statement",
",",
"self",
".",
"query_values"
] | Declare materalized view. | [
"Declare",
"materalized",
"view",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/ddl.py#L97-L108 | train |
openvax/isovar | isovar/effect_prediction.py | predicted_effects_for_variant | def predicted_effects_for_variant(
variant,
transcript_id_whitelist=None,
only_coding_changes=True):
"""
For a given variant, return its set of predicted effects. Optionally
filter to transcripts where this variant results in a non-synonymous
change to the protein sequence.
Parameters
----------
variant : varcode.Variant
transcript_id_whitelist : set
Filter effect predictions to only include these transcripts
Returns a varcode.EffectCollection object
"""
effects = []
for transcript in variant.transcripts:
if only_coding_changes and not transcript.complete:
logger.info(
"Skipping transcript %s for variant %s because it's incomplete",
transcript.name,
variant)
continue
if transcript_id_whitelist and transcript.id not in transcript_id_whitelist:
logger.info(
"Skipping transcript %s for variant %s because it's not one of %d allowed",
transcript.name,
variant,
len(transcript_id_whitelist))
continue
effects.append(variant.effect_on_transcript(transcript))
effects = EffectCollection(effects)
n_total_effects = len(effects)
logger.info("Predicted total %d effects for variant %s" % (
n_total_effects,
variant))
if not only_coding_changes:
return effects
else:
nonsynonymous_coding_effects = effects.drop_silent_and_noncoding()
logger.info(
"Keeping %d/%d effects which affect protein coding sequence for %s: %s",
len(nonsynonymous_coding_effects),
n_total_effects,
variant,
nonsynonymous_coding_effects)
usable_effects = [
effect
for effect in nonsynonymous_coding_effects
if effect.mutant_protein_sequence is not None
]
logger.info(
"Keeping %d effects with predictable AA sequences for %s: %s",
len(usable_effects),
variant,
usable_effects)
return usable_effects | python | def predicted_effects_for_variant(
variant,
transcript_id_whitelist=None,
only_coding_changes=True):
"""
For a given variant, return its set of predicted effects. Optionally
filter to transcripts where this variant results in a non-synonymous
change to the protein sequence.
Parameters
----------
variant : varcode.Variant
transcript_id_whitelist : set
Filter effect predictions to only include these transcripts
Returns a varcode.EffectCollection object
"""
effects = []
for transcript in variant.transcripts:
if only_coding_changes and not transcript.complete:
logger.info(
"Skipping transcript %s for variant %s because it's incomplete",
transcript.name,
variant)
continue
if transcript_id_whitelist and transcript.id not in transcript_id_whitelist:
logger.info(
"Skipping transcript %s for variant %s because it's not one of %d allowed",
transcript.name,
variant,
len(transcript_id_whitelist))
continue
effects.append(variant.effect_on_transcript(transcript))
effects = EffectCollection(effects)
n_total_effects = len(effects)
logger.info("Predicted total %d effects for variant %s" % (
n_total_effects,
variant))
if not only_coding_changes:
return effects
else:
nonsynonymous_coding_effects = effects.drop_silent_and_noncoding()
logger.info(
"Keeping %d/%d effects which affect protein coding sequence for %s: %s",
len(nonsynonymous_coding_effects),
n_total_effects,
variant,
nonsynonymous_coding_effects)
usable_effects = [
effect
for effect in nonsynonymous_coding_effects
if effect.mutant_protein_sequence is not None
]
logger.info(
"Keeping %d effects with predictable AA sequences for %s: %s",
len(usable_effects),
variant,
usable_effects)
return usable_effects | [
"def",
"predicted_effects_for_variant",
"(",
"variant",
",",
"transcript_id_whitelist",
"=",
"None",
",",
"only_coding_changes",
"=",
"True",
")",
":",
"effects",
"=",
"[",
"]",
"for",
"transcript",
"in",
"variant",
".",
"transcripts",
":",
"if",
"only_coding_changes",
"and",
"not",
"transcript",
".",
"complete",
":",
"logger",
".",
"info",
"(",
"\"Skipping transcript %s for variant %s because it's incomplete\"",
",",
"transcript",
".",
"name",
",",
"variant",
")",
"continue",
"if",
"transcript_id_whitelist",
"and",
"transcript",
".",
"id",
"not",
"in",
"transcript_id_whitelist",
":",
"logger",
".",
"info",
"(",
"\"Skipping transcript %s for variant %s because it's not one of %d allowed\"",
",",
"transcript",
".",
"name",
",",
"variant",
",",
"len",
"(",
"transcript_id_whitelist",
")",
")",
"continue",
"effects",
".",
"append",
"(",
"variant",
".",
"effect_on_transcript",
"(",
"transcript",
")",
")",
"effects",
"=",
"EffectCollection",
"(",
"effects",
")",
"n_total_effects",
"=",
"len",
"(",
"effects",
")",
"logger",
".",
"info",
"(",
"\"Predicted total %d effects for variant %s\"",
"%",
"(",
"n_total_effects",
",",
"variant",
")",
")",
"if",
"not",
"only_coding_changes",
":",
"return",
"effects",
"else",
":",
"nonsynonymous_coding_effects",
"=",
"effects",
".",
"drop_silent_and_noncoding",
"(",
")",
"logger",
".",
"info",
"(",
"\"Keeping %d/%d effects which affect protein coding sequence for %s: %s\"",
",",
"len",
"(",
"nonsynonymous_coding_effects",
")",
",",
"n_total_effects",
",",
"variant",
",",
"nonsynonymous_coding_effects",
")",
"usable_effects",
"=",
"[",
"effect",
"for",
"effect",
"in",
"nonsynonymous_coding_effects",
"if",
"effect",
".",
"mutant_protein_sequence",
"is",
"not",
"None",
"]",
"logger",
".",
"info",
"(",
"\"Keeping %d effects with predictable AA sequences for %s: %s\"",
",",
"len",
"(",
"usable_effects",
")",
",",
"variant",
",",
"usable_effects",
")",
"return",
"usable_effects"
] | For a given variant, return its set of predicted effects. Optionally
filter to transcripts where this variant results in a non-synonymous
change to the protein sequence.
Parameters
----------
variant : varcode.Variant
transcript_id_whitelist : set
Filter effect predictions to only include these transcripts
Returns a varcode.EffectCollection object | [
"For",
"a",
"given",
"variant",
"return",
"its",
"set",
"of",
"predicted",
"effects",
".",
"Optionally",
"filter",
"to",
"transcripts",
"where",
"this",
"variant",
"results",
"in",
"a",
"non",
"-",
"synonymous",
"change",
"to",
"the",
"protein",
"sequence",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/effect_prediction.py#L24-L88 | train |
openvax/isovar | isovar/effect_prediction.py | reference_transcripts_for_variant | def reference_transcripts_for_variant(
variant,
transcript_id_whitelist=None,
only_coding_changes=True):
"""
For a given variant, find all the transcripts which overlap the
variant and for which it has a predictable effect on the amino acid
sequence of the protein.
"""
predicted_effects = predicted_effects_for_variant(
variant=variant,
transcript_id_whitelist=transcript_id_whitelist,
only_coding_changes=only_coding_changes)
return [effect.transcript for effect in predicted_effects] | python | def reference_transcripts_for_variant(
variant,
transcript_id_whitelist=None,
only_coding_changes=True):
"""
For a given variant, find all the transcripts which overlap the
variant and for which it has a predictable effect on the amino acid
sequence of the protein.
"""
predicted_effects = predicted_effects_for_variant(
variant=variant,
transcript_id_whitelist=transcript_id_whitelist,
only_coding_changes=only_coding_changes)
return [effect.transcript for effect in predicted_effects] | [
"def",
"reference_transcripts_for_variant",
"(",
"variant",
",",
"transcript_id_whitelist",
"=",
"None",
",",
"only_coding_changes",
"=",
"True",
")",
":",
"predicted_effects",
"=",
"predicted_effects_for_variant",
"(",
"variant",
"=",
"variant",
",",
"transcript_id_whitelist",
"=",
"transcript_id_whitelist",
",",
"only_coding_changes",
"=",
"only_coding_changes",
")",
"return",
"[",
"effect",
".",
"transcript",
"for",
"effect",
"in",
"predicted_effects",
"]"
] | For a given variant, find all the transcripts which overlap the
variant and for which it has a predictable effect on the amino acid
sequence of the protein. | [
"For",
"a",
"given",
"variant",
"find",
"all",
"the",
"transcripts",
"which",
"overlap",
"the",
"variant",
"and",
"for",
"which",
"it",
"has",
"a",
"predictable",
"effect",
"on",
"the",
"amino",
"acid",
"sequence",
"of",
"the",
"protein",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/effect_prediction.py#L91-L104 | train |
openvax/isovar | isovar/locus_reads.py | pileup_reads_at_position | def pileup_reads_at_position(samfile, chromosome, base0_position):
"""
Returns a pileup column at the specified position. Unclear if a function
like this is hiding somewhere in pysam API.
"""
# TODO: I want to pass truncate=True, stepper="all"
# but for some reason I get this error:
# pileup() got an unexpected keyword argument 'truncate'
# ...even though these options are listed in the docs for pysam 0.9.0
#
for column in samfile.pileup(
chromosome,
start=base0_position,
end=base0_position + 1):
if column.pos != base0_position:
# if this column isn't centered on the base before the
# variant then keep going
continue
return column.pileups
# if we get to this point then we never saw a pileup at the
# desired position
return [] | python | def pileup_reads_at_position(samfile, chromosome, base0_position):
"""
Returns a pileup column at the specified position. Unclear if a function
like this is hiding somewhere in pysam API.
"""
# TODO: I want to pass truncate=True, stepper="all"
# but for some reason I get this error:
# pileup() got an unexpected keyword argument 'truncate'
# ...even though these options are listed in the docs for pysam 0.9.0
#
for column in samfile.pileup(
chromosome,
start=base0_position,
end=base0_position + 1):
if column.pos != base0_position:
# if this column isn't centered on the base before the
# variant then keep going
continue
return column.pileups
# if we get to this point then we never saw a pileup at the
# desired position
return [] | [
"def",
"pileup_reads_at_position",
"(",
"samfile",
",",
"chromosome",
",",
"base0_position",
")",
":",
"for",
"column",
"in",
"samfile",
".",
"pileup",
"(",
"chromosome",
",",
"start",
"=",
"base0_position",
",",
"end",
"=",
"base0_position",
"+",
"1",
")",
":",
"if",
"column",
".",
"pos",
"!=",
"base0_position",
":",
"continue",
"return",
"column",
".",
"pileups",
"return",
"[",
"]"
] | Returns a pileup column at the specified position. Unclear if a function
like this is hiding somewhere in pysam API. | [
"Returns",
"a",
"pileup",
"column",
"at",
"the",
"specified",
"position",
".",
"Unclear",
"if",
"a",
"function",
"like",
"this",
"is",
"hiding",
"somewhere",
"in",
"pysam",
"API",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/locus_reads.py#L215-L240 | train |
openvax/isovar | isovar/locus_reads.py | locus_read_generator | def locus_read_generator(
samfile,
chromosome,
base1_position_before_variant,
base1_position_after_variant,
use_duplicate_reads=USE_DUPLICATE_READS,
use_secondary_alignments=USE_SECONDARY_ALIGNMENTS,
min_mapping_quality=MIN_READ_MAPPING_QUALITY):
"""
Generator that yields a sequence of ReadAtLocus records for reads which
contain the positions before and after a variant. The actual work to figure
out if what's between those positions matches a variant happens later in
the `variant_reads` module.
Parameters
----------
samfile : pysam.AlignmentFile
chromosome : str
base1_position_before_variant : int
Genomic position of reference nucleotide before a variant
base1_position_after_variant : int
Genomic position of reference nucleotide before a variant
use_duplicate_reads : bool
By default, we're ignoring any duplicate reads
use_secondary_alignments : bool
By default we are using secondary alignments, set this to False to
only use primary alignments of reads.
min_mapping_quality : int
Drop reads below this mapping quality
Yields ReadAtLocus objects
"""
logger.debug(
"Gathering reads at locus %s: %d-%d",
chromosome,
base1_position_before_variant,
base1_position_after_variant)
base0_position_before_variant = base1_position_before_variant - 1
base0_position_after_variant = base1_position_after_variant - 1
count = 0
# We get a pileup at the base before the variant and then check to make sure
# that reads also overlap the reference position after the variant.
#
# TODO: scan over a wider interval of pileups and collect reads that don't
# overlap the bases before/after a variant due to splicing
for pileup_element in pileup_reads_at_position(
samfile=samfile,
chromosome=chromosome,
base0_position=base0_position_before_variant):
read = LocusRead.from_pysam_pileup_element(
pileup_element,
base0_position_before_variant=base0_position_before_variant,
base0_position_after_variant=base0_position_after_variant,
use_secondary_alignments=use_secondary_alignments,
use_duplicate_reads=use_duplicate_reads,
min_mapping_quality=min_mapping_quality)
if read is not None:
count += 1
yield read
logger.info(
"Found %d reads overlapping locus %s: %d-%d",
count,
chromosome,
base1_position_before_variant,
base1_position_after_variant) | python | def locus_read_generator(
samfile,
chromosome,
base1_position_before_variant,
base1_position_after_variant,
use_duplicate_reads=USE_DUPLICATE_READS,
use_secondary_alignments=USE_SECONDARY_ALIGNMENTS,
min_mapping_quality=MIN_READ_MAPPING_QUALITY):
"""
Generator that yields a sequence of ReadAtLocus records for reads which
contain the positions before and after a variant. The actual work to figure
out if what's between those positions matches a variant happens later in
the `variant_reads` module.
Parameters
----------
samfile : pysam.AlignmentFile
chromosome : str
base1_position_before_variant : int
Genomic position of reference nucleotide before a variant
base1_position_after_variant : int
Genomic position of reference nucleotide before a variant
use_duplicate_reads : bool
By default, we're ignoring any duplicate reads
use_secondary_alignments : bool
By default we are using secondary alignments, set this to False to
only use primary alignments of reads.
min_mapping_quality : int
Drop reads below this mapping quality
Yields ReadAtLocus objects
"""
logger.debug(
"Gathering reads at locus %s: %d-%d",
chromosome,
base1_position_before_variant,
base1_position_after_variant)
base0_position_before_variant = base1_position_before_variant - 1
base0_position_after_variant = base1_position_after_variant - 1
count = 0
# We get a pileup at the base before the variant and then check to make sure
# that reads also overlap the reference position after the variant.
#
# TODO: scan over a wider interval of pileups and collect reads that don't
# overlap the bases before/after a variant due to splicing
for pileup_element in pileup_reads_at_position(
samfile=samfile,
chromosome=chromosome,
base0_position=base0_position_before_variant):
read = LocusRead.from_pysam_pileup_element(
pileup_element,
base0_position_before_variant=base0_position_before_variant,
base0_position_after_variant=base0_position_after_variant,
use_secondary_alignments=use_secondary_alignments,
use_duplicate_reads=use_duplicate_reads,
min_mapping_quality=min_mapping_quality)
if read is not None:
count += 1
yield read
logger.info(
"Found %d reads overlapping locus %s: %d-%d",
count,
chromosome,
base1_position_before_variant,
base1_position_after_variant) | [
"def",
"locus_read_generator",
"(",
"samfile",
",",
"chromosome",
",",
"base1_position_before_variant",
",",
"base1_position_after_variant",
",",
"use_duplicate_reads",
"=",
"USE_DUPLICATE_READS",
",",
"use_secondary_alignments",
"=",
"USE_SECONDARY_ALIGNMENTS",
",",
"min_mapping_quality",
"=",
"MIN_READ_MAPPING_QUALITY",
")",
":",
"logger",
".",
"debug",
"(",
"\"Gathering reads at locus %s: %d-%d\"",
",",
"chromosome",
",",
"base1_position_before_variant",
",",
"base1_position_after_variant",
")",
"base0_position_before_variant",
"=",
"base1_position_before_variant",
"-",
"1",
"base0_position_after_variant",
"=",
"base1_position_after_variant",
"-",
"1",
"count",
"=",
"0",
"for",
"pileup_element",
"in",
"pileup_reads_at_position",
"(",
"samfile",
"=",
"samfile",
",",
"chromosome",
"=",
"chromosome",
",",
"base0_position",
"=",
"base0_position_before_variant",
")",
":",
"read",
"=",
"LocusRead",
".",
"from_pysam_pileup_element",
"(",
"pileup_element",
",",
"base0_position_before_variant",
"=",
"base0_position_before_variant",
",",
"base0_position_after_variant",
"=",
"base0_position_after_variant",
",",
"use_secondary_alignments",
"=",
"use_secondary_alignments",
",",
"use_duplicate_reads",
"=",
"use_duplicate_reads",
",",
"min_mapping_quality",
"=",
"min_mapping_quality",
")",
"if",
"read",
"is",
"not",
"None",
":",
"count",
"+=",
"1",
"yield",
"read",
"logger",
".",
"info",
"(",
"\"Found %d reads overlapping locus %s: %d-%d\"",
",",
"count",
",",
"chromosome",
",",
"base1_position_before_variant",
",",
"base1_position_after_variant",
")"
] | Generator that yields a sequence of ReadAtLocus records for reads which
contain the positions before and after a variant. The actual work to figure
out if what's between those positions matches a variant happens later in
the `variant_reads` module.
Parameters
----------
samfile : pysam.AlignmentFile
chromosome : str
base1_position_before_variant : int
Genomic position of reference nucleotide before a variant
base1_position_after_variant : int
Genomic position of reference nucleotide before a variant
use_duplicate_reads : bool
By default, we're ignoring any duplicate reads
use_secondary_alignments : bool
By default we are using secondary alignments, set this to False to
only use primary alignments of reads.
min_mapping_quality : int
Drop reads below this mapping quality
Yields ReadAtLocus objects | [
"Generator",
"that",
"yields",
"a",
"sequence",
"of",
"ReadAtLocus",
"records",
"for",
"reads",
"which",
"contain",
"the",
"positions",
"before",
"and",
"after",
"a",
"variant",
".",
"The",
"actual",
"work",
"to",
"figure",
"out",
"if",
"what",
"s",
"between",
"those",
"positions",
"matches",
"a",
"variant",
"happens",
"later",
"in",
"the",
"variant_reads",
"module",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/locus_reads.py#L243-L317 | train |
openvax/isovar | isovar/locus_reads.py | locus_reads_dataframe | def locus_reads_dataframe(*args, **kwargs):
"""
Traverse a BAM file to find all the reads overlapping a specified locus.
Parameters are the same as those for read_locus_generator.
"""
df_builder = DataFrameBuilder(
LocusRead,
variant_columns=False,
converters={
"reference_positions": list_to_string,
"quality_scores": list_to_string,
})
for locus_read in locus_read_generator(*args, **kwargs):
df_builder.add(variant=None, element=locus_read)
return df_builder.to_dataframe() | python | def locus_reads_dataframe(*args, **kwargs):
"""
Traverse a BAM file to find all the reads overlapping a specified locus.
Parameters are the same as those for read_locus_generator.
"""
df_builder = DataFrameBuilder(
LocusRead,
variant_columns=False,
converters={
"reference_positions": list_to_string,
"quality_scores": list_to_string,
})
for locus_read in locus_read_generator(*args, **kwargs):
df_builder.add(variant=None, element=locus_read)
return df_builder.to_dataframe() | [
"def",
"locus_reads_dataframe",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"df_builder",
"=",
"DataFrameBuilder",
"(",
"LocusRead",
",",
"variant_columns",
"=",
"False",
",",
"converters",
"=",
"{",
"\"reference_positions\"",
":",
"list_to_string",
",",
"\"quality_scores\"",
":",
"list_to_string",
",",
"}",
")",
"for",
"locus_read",
"in",
"locus_read_generator",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"df_builder",
".",
"add",
"(",
"variant",
"=",
"None",
",",
"element",
"=",
"locus_read",
")",
"return",
"df_builder",
".",
"to_dataframe",
"(",
")"
] | Traverse a BAM file to find all the reads overlapping a specified locus.
Parameters are the same as those for read_locus_generator. | [
"Traverse",
"a",
"BAM",
"file",
"to",
"find",
"all",
"the",
"reads",
"overlapping",
"a",
"specified",
"locus",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/locus_reads.py#L320-L335 | train |
portfoliome/postpy | postpy/dml_copy.py | copy_from_csv_sql | def copy_from_csv_sql(qualified_name: str, delimiter=',', encoding='utf8',
null_str='', header=True, escape_str='\\', quote_char='"',
force_not_null=None, force_null=None):
"""Generate copy from csv statement."""
options = []
options.append("DELIMITER '%s'" % delimiter)
options.append("NULL '%s'" % null_str)
if header:
options.append('HEADER')
options.append("QUOTE '%s'" % quote_char)
options.append("ESCAPE '%s'" % escape_str)
if force_not_null:
options.append(_format_force_not_null(column_names=force_not_null))
if force_null:
options.append(_format_force_null(column_names=force_null))
postgres_encoding = get_postgres_encoding(encoding)
options.append("ENCODING '%s'" % postgres_encoding)
copy_sql = _format_copy_csv_sql(qualified_name, copy_options=options)
return copy_sql | python | def copy_from_csv_sql(qualified_name: str, delimiter=',', encoding='utf8',
null_str='', header=True, escape_str='\\', quote_char='"',
force_not_null=None, force_null=None):
"""Generate copy from csv statement."""
options = []
options.append("DELIMITER '%s'" % delimiter)
options.append("NULL '%s'" % null_str)
if header:
options.append('HEADER')
options.append("QUOTE '%s'" % quote_char)
options.append("ESCAPE '%s'" % escape_str)
if force_not_null:
options.append(_format_force_not_null(column_names=force_not_null))
if force_null:
options.append(_format_force_null(column_names=force_null))
postgres_encoding = get_postgres_encoding(encoding)
options.append("ENCODING '%s'" % postgres_encoding)
copy_sql = _format_copy_csv_sql(qualified_name, copy_options=options)
return copy_sql | [
"def",
"copy_from_csv_sql",
"(",
"qualified_name",
":",
"str",
",",
"delimiter",
"=",
"','",
",",
"encoding",
"=",
"'utf8'",
",",
"null_str",
"=",
"''",
",",
"header",
"=",
"True",
",",
"escape_str",
"=",
"'\\\\'",
",",
"quote_char",
"=",
"'\"'",
",",
"force_not_null",
"=",
"None",
",",
"force_null",
"=",
"None",
")",
":",
"options",
"=",
"[",
"]",
"options",
".",
"append",
"(",
"\"DELIMITER '%s'\"",
"%",
"delimiter",
")",
"options",
".",
"append",
"(",
"\"NULL '%s'\"",
"%",
"null_str",
")",
"if",
"header",
":",
"options",
".",
"append",
"(",
"'HEADER'",
")",
"options",
".",
"append",
"(",
"\"QUOTE '%s'\"",
"%",
"quote_char",
")",
"options",
".",
"append",
"(",
"\"ESCAPE '%s'\"",
"%",
"escape_str",
")",
"if",
"force_not_null",
":",
"options",
".",
"append",
"(",
"_format_force_not_null",
"(",
"column_names",
"=",
"force_not_null",
")",
")",
"if",
"force_null",
":",
"options",
".",
"append",
"(",
"_format_force_null",
"(",
"column_names",
"=",
"force_null",
")",
")",
"postgres_encoding",
"=",
"get_postgres_encoding",
"(",
"encoding",
")",
"options",
".",
"append",
"(",
"\"ENCODING '%s'\"",
"%",
"postgres_encoding",
")",
"copy_sql",
"=",
"_format_copy_csv_sql",
"(",
"qualified_name",
",",
"copy_options",
"=",
"options",
")",
"return",
"copy_sql"
] | Generate copy from csv statement. | [
"Generate",
"copy",
"from",
"csv",
"statement",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml_copy.py#L83-L109 | train |
openvax/isovar | isovar/protein_sequences.py | sort_protein_sequences | def sort_protein_sequences(protein_sequences):
"""
Sort protein sequences in decreasing order of priority
"""
return list(
sorted(
protein_sequences,
key=ProteinSequence.ascending_sort_key,
reverse=True)) | python | def sort_protein_sequences(protein_sequences):
"""
Sort protein sequences in decreasing order of priority
"""
return list(
sorted(
protein_sequences,
key=ProteinSequence.ascending_sort_key,
reverse=True)) | [
"def",
"sort_protein_sequences",
"(",
"protein_sequences",
")",
":",
"return",
"list",
"(",
"sorted",
"(",
"protein_sequences",
",",
"key",
"=",
"ProteinSequence",
".",
"ascending_sort_key",
",",
"reverse",
"=",
"True",
")",
")"
] | Sort protein sequences in decreasing order of priority | [
"Sort",
"protein",
"sequences",
"in",
"decreasing",
"order",
"of",
"priority"
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/protein_sequences.py#L182-L190 | train |
openvax/isovar | isovar/protein_sequences.py | reads_generator_to_protein_sequences_generator | def reads_generator_to_protein_sequences_generator(
variant_and_overlapping_reads_generator,
transcript_id_whitelist=None,
protein_sequence_length=PROTEIN_SEQUENCE_LENGTH,
min_alt_rna_reads=MIN_ALT_RNA_READS,
min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE,
min_transcript_prefix_length=MIN_TRANSCRIPT_PREFIX_LENGTH,
max_transcript_mismatches=MAX_REFERENCE_TRANSCRIPT_MISMATCHES,
include_mismatches_after_variant=INCLUDE_MISMATCHES_AFTER_VARIANT,
max_protein_sequences_per_variant=MAX_PROTEIN_SEQUENCES_PER_VARIANT,
variant_sequence_assembly=VARIANT_SEQUENCE_ASSEMBLY):
""""
Translates each coding variant in a collection to one or more
Translation objects, which are then aggregated into equivalent
ProteinSequence objects.
Parameters
----------
variant_and_overlapping_reads_generator : generator
Yields sequence of varcode.Variant objects paired with sequences
of AlleleRead objects that support that variant.
transcript_id_whitelist : set, optional
If given, expected to be a set of transcript IDs which we should use
for determining the reading frame around a variant. If omitted, then
try to use all overlapping reference transcripts.
protein_sequence_length : int
Try to translate protein sequences of this length, though sometimes
we'll have to return something shorter (depending on the RNAseq data,
and presence of stop codons).
min_alt_rna_reads : int
Drop variant sequences at loci with fewer than this number of reads
supporting the alt allele.
min_variant_sequence_coverage : int
Trim variant sequences to positions supported by at least this number
of RNA reads.
min_transcript_prefix_length : int
Minimum number of bases we need to try matching between the reference
context and variant sequence.
max_transcript_mismatches : int
Don't try to determine the reading frame for a transcript if more
than this number of bases differ.
include_mismatches_after_variant : bool
Include mismatches after the variant locus in the count compared
against max_transcript_mismatches.
max_protein_sequences_per_variant : int
Number of protein sequences to return for each ProteinSequence
variant_cdna_sequence_assembly : bool
If True, then assemble variant cDNA sequences based on overlap of
RNA reads. If False, then variant cDNA sequences must be fully spanned
and contained within RNA reads.
Yields pairs of a Variant and a list of ProteinSequence objects
"""
for (variant, overlapping_reads) in variant_and_overlapping_reads_generator:
overlapping_transcript_ids = [
t.id
for t in variant.transcripts
if t.is_protein_coding
]
_, ref, alt = trim_variant(variant)
overlapping_reads = list(overlapping_reads)
reads_grouped_by_allele = group_reads_by_allele(overlapping_reads)
ref_reads = reads_grouped_by_allele.get(ref, [])
alt_reads = reads_grouped_by_allele.get(alt, [])
translations = translate_variant_reads(
variant=variant,
variant_reads=alt_reads,
transcript_id_whitelist=transcript_id_whitelist,
protein_sequence_length=protein_sequence_length,
min_alt_rna_reads=min_alt_rna_reads,
min_variant_sequence_coverage=min_variant_sequence_coverage,
min_transcript_prefix_length=min_transcript_prefix_length,
max_transcript_mismatches=max_transcript_mismatches,
include_mismatches_after_variant=include_mismatches_after_variant,
variant_sequence_assembly=variant_sequence_assembly)
protein_sequences = []
for (key, equivalent_translations) in groupby(
translations, key_fn=Translation.as_translation_key).items():
# get the variant read names, transcript IDs and gene names for
# protein sequence we're about to construct
alt_reads_supporting_protein_sequence, group_transcript_ids, group_gene_names = \
ProteinSequence._summarize_translations(equivalent_translations)
logger.info(
"%s: %s alt reads supporting protein sequence (gene names = %s)",
key,
len(alt_reads_supporting_protein_sequence),
group_gene_names)
protein_sequence = ProteinSequence.from_translation_key(
translation_key=key,
translations=equivalent_translations,
overlapping_reads=overlapping_reads,
alt_reads=alt_reads,
ref_reads=ref_reads,
alt_reads_supporting_protein_sequence=alt_reads_supporting_protein_sequence,
transcripts_supporting_protein_sequence=group_transcript_ids,
transcripts_overlapping_variant=overlapping_transcript_ids,
gene=list(group_gene_names))
logger.info("%s: protein sequence = %s" % (key, protein_sequence.amino_acids))
protein_sequences.append(protein_sequence)
# sort protein sequences before returning the top results
protein_sequences = sort_protein_sequences(protein_sequences)
yield variant, protein_sequences[:max_protein_sequences_per_variant] | python | def reads_generator_to_protein_sequences_generator(
variant_and_overlapping_reads_generator,
transcript_id_whitelist=None,
protein_sequence_length=PROTEIN_SEQUENCE_LENGTH,
min_alt_rna_reads=MIN_ALT_RNA_READS,
min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE,
min_transcript_prefix_length=MIN_TRANSCRIPT_PREFIX_LENGTH,
max_transcript_mismatches=MAX_REFERENCE_TRANSCRIPT_MISMATCHES,
include_mismatches_after_variant=INCLUDE_MISMATCHES_AFTER_VARIANT,
max_protein_sequences_per_variant=MAX_PROTEIN_SEQUENCES_PER_VARIANT,
variant_sequence_assembly=VARIANT_SEQUENCE_ASSEMBLY):
""""
Translates each coding variant in a collection to one or more
Translation objects, which are then aggregated into equivalent
ProteinSequence objects.
Parameters
----------
variant_and_overlapping_reads_generator : generator
Yields sequence of varcode.Variant objects paired with sequences
of AlleleRead objects that support that variant.
transcript_id_whitelist : set, optional
If given, expected to be a set of transcript IDs which we should use
for determining the reading frame around a variant. If omitted, then
try to use all overlapping reference transcripts.
protein_sequence_length : int
Try to translate protein sequences of this length, though sometimes
we'll have to return something shorter (depending on the RNAseq data,
and presence of stop codons).
min_alt_rna_reads : int
Drop variant sequences at loci with fewer than this number of reads
supporting the alt allele.
min_variant_sequence_coverage : int
Trim variant sequences to positions supported by at least this number
of RNA reads.
min_transcript_prefix_length : int
Minimum number of bases we need to try matching between the reference
context and variant sequence.
max_transcript_mismatches : int
Don't try to determine the reading frame for a transcript if more
than this number of bases differ.
include_mismatches_after_variant : bool
Include mismatches after the variant locus in the count compared
against max_transcript_mismatches.
max_protein_sequences_per_variant : int
Number of protein sequences to return for each ProteinSequence
variant_cdna_sequence_assembly : bool
If True, then assemble variant cDNA sequences based on overlap of
RNA reads. If False, then variant cDNA sequences must be fully spanned
and contained within RNA reads.
Yields pairs of a Variant and a list of ProteinSequence objects
"""
for (variant, overlapping_reads) in variant_and_overlapping_reads_generator:
overlapping_transcript_ids = [
t.id
for t in variant.transcripts
if t.is_protein_coding
]
_, ref, alt = trim_variant(variant)
overlapping_reads = list(overlapping_reads)
reads_grouped_by_allele = group_reads_by_allele(overlapping_reads)
ref_reads = reads_grouped_by_allele.get(ref, [])
alt_reads = reads_grouped_by_allele.get(alt, [])
translations = translate_variant_reads(
variant=variant,
variant_reads=alt_reads,
transcript_id_whitelist=transcript_id_whitelist,
protein_sequence_length=protein_sequence_length,
min_alt_rna_reads=min_alt_rna_reads,
min_variant_sequence_coverage=min_variant_sequence_coverage,
min_transcript_prefix_length=min_transcript_prefix_length,
max_transcript_mismatches=max_transcript_mismatches,
include_mismatches_after_variant=include_mismatches_after_variant,
variant_sequence_assembly=variant_sequence_assembly)
protein_sequences = []
for (key, equivalent_translations) in groupby(
translations, key_fn=Translation.as_translation_key).items():
# get the variant read names, transcript IDs and gene names for
# protein sequence we're about to construct
alt_reads_supporting_protein_sequence, group_transcript_ids, group_gene_names = \
ProteinSequence._summarize_translations(equivalent_translations)
logger.info(
"%s: %s alt reads supporting protein sequence (gene names = %s)",
key,
len(alt_reads_supporting_protein_sequence),
group_gene_names)
protein_sequence = ProteinSequence.from_translation_key(
translation_key=key,
translations=equivalent_translations,
overlapping_reads=overlapping_reads,
alt_reads=alt_reads,
ref_reads=ref_reads,
alt_reads_supporting_protein_sequence=alt_reads_supporting_protein_sequence,
transcripts_supporting_protein_sequence=group_transcript_ids,
transcripts_overlapping_variant=overlapping_transcript_ids,
gene=list(group_gene_names))
logger.info("%s: protein sequence = %s" % (key, protein_sequence.amino_acids))
protein_sequences.append(protein_sequence)
# sort protein sequences before returning the top results
protein_sequences = sort_protein_sequences(protein_sequences)
yield variant, protein_sequences[:max_protein_sequences_per_variant] | [
"def",
"reads_generator_to_protein_sequences_generator",
"(",
"variant_and_overlapping_reads_generator",
",",
"transcript_id_whitelist",
"=",
"None",
",",
"protein_sequence_length",
"=",
"PROTEIN_SEQUENCE_LENGTH",
",",
"min_alt_rna_reads",
"=",
"MIN_ALT_RNA_READS",
",",
"min_variant_sequence_coverage",
"=",
"MIN_VARIANT_SEQUENCE_COVERAGE",
",",
"min_transcript_prefix_length",
"=",
"MIN_TRANSCRIPT_PREFIX_LENGTH",
",",
"max_transcript_mismatches",
"=",
"MAX_REFERENCE_TRANSCRIPT_MISMATCHES",
",",
"include_mismatches_after_variant",
"=",
"INCLUDE_MISMATCHES_AFTER_VARIANT",
",",
"max_protein_sequences_per_variant",
"=",
"MAX_PROTEIN_SEQUENCES_PER_VARIANT",
",",
"variant_sequence_assembly",
"=",
"VARIANT_SEQUENCE_ASSEMBLY",
")",
":",
"for",
"(",
"variant",
",",
"overlapping_reads",
")",
"in",
"variant_and_overlapping_reads_generator",
":",
"overlapping_transcript_ids",
"=",
"[",
"t",
".",
"id",
"for",
"t",
"in",
"variant",
".",
"transcripts",
"if",
"t",
".",
"is_protein_coding",
"]",
"_",
",",
"ref",
",",
"alt",
"=",
"trim_variant",
"(",
"variant",
")",
"overlapping_reads",
"=",
"list",
"(",
"overlapping_reads",
")",
"reads_grouped_by_allele",
"=",
"group_reads_by_allele",
"(",
"overlapping_reads",
")",
"ref_reads",
"=",
"reads_grouped_by_allele",
".",
"get",
"(",
"ref",
",",
"[",
"]",
")",
"alt_reads",
"=",
"reads_grouped_by_allele",
".",
"get",
"(",
"alt",
",",
"[",
"]",
")",
"translations",
"=",
"translate_variant_reads",
"(",
"variant",
"=",
"variant",
",",
"variant_reads",
"=",
"alt_reads",
",",
"transcript_id_whitelist",
"=",
"transcript_id_whitelist",
",",
"protein_sequence_length",
"=",
"protein_sequence_length",
",",
"min_alt_rna_reads",
"=",
"min_alt_rna_reads",
",",
"min_variant_sequence_coverage",
"=",
"min_variant_sequence_coverage",
",",
"min_transcript_prefix_length",
"=",
"min_transcript_prefix_length",
",",
"max_transcript_mismatches",
"=",
"max_transcript_mismatches",
",",
"include_mismatches_after_variant",
"=",
"include_mismatches_after_variant",
",",
"variant_sequence_assembly",
"=",
"variant_sequence_assembly",
")",
"protein_sequences",
"=",
"[",
"]",
"for",
"(",
"key",
",",
"equivalent_translations",
")",
"in",
"groupby",
"(",
"translations",
",",
"key_fn",
"=",
"Translation",
".",
"as_translation_key",
")",
".",
"items",
"(",
")",
":",
"alt_reads_supporting_protein_sequence",
",",
"group_transcript_ids",
",",
"group_gene_names",
"=",
"ProteinSequence",
".",
"_summarize_translations",
"(",
"equivalent_translations",
")",
"logger",
".",
"info",
"(",
"\"%s: %s alt reads supporting protein sequence (gene names = %s)\"",
",",
"key",
",",
"len",
"(",
"alt_reads_supporting_protein_sequence",
")",
",",
"group_gene_names",
")",
"protein_sequence",
"=",
"ProteinSequence",
".",
"from_translation_key",
"(",
"translation_key",
"=",
"key",
",",
"translations",
"=",
"equivalent_translations",
",",
"overlapping_reads",
"=",
"overlapping_reads",
",",
"alt_reads",
"=",
"alt_reads",
",",
"ref_reads",
"=",
"ref_reads",
",",
"alt_reads_supporting_protein_sequence",
"=",
"alt_reads_supporting_protein_sequence",
",",
"transcripts_supporting_protein_sequence",
"=",
"group_transcript_ids",
",",
"transcripts_overlapping_variant",
"=",
"overlapping_transcript_ids",
",",
"gene",
"=",
"list",
"(",
"group_gene_names",
")",
")",
"logger",
".",
"info",
"(",
"\"%s: protein sequence = %s\"",
"%",
"(",
"key",
",",
"protein_sequence",
".",
"amino_acids",
")",
")",
"protein_sequences",
".",
"append",
"(",
"protein_sequence",
")",
"protein_sequences",
"=",
"sort_protein_sequences",
"(",
"protein_sequences",
")",
"yield",
"variant",
",",
"protein_sequences",
"[",
":",
"max_protein_sequences_per_variant",
"]"
] | Translates each coding variant in a collection to one or more
Translation objects, which are then aggregated into equivalent
ProteinSequence objects.
Parameters
----------
variant_and_overlapping_reads_generator : generator
Yields sequence of varcode.Variant objects paired with sequences
of AlleleRead objects that support that variant.
transcript_id_whitelist : set, optional
If given, expected to be a set of transcript IDs which we should use
for determining the reading frame around a variant. If omitted, then
try to use all overlapping reference transcripts.
protein_sequence_length : int
Try to translate protein sequences of this length, though sometimes
we'll have to return something shorter (depending on the RNAseq data,
and presence of stop codons).
min_alt_rna_reads : int
Drop variant sequences at loci with fewer than this number of reads
supporting the alt allele.
min_variant_sequence_coverage : int
Trim variant sequences to positions supported by at least this number
of RNA reads.
min_transcript_prefix_length : int
Minimum number of bases we need to try matching between the reference
context and variant sequence.
max_transcript_mismatches : int
Don't try to determine the reading frame for a transcript if more
than this number of bases differ.
include_mismatches_after_variant : bool
Include mismatches after the variant locus in the count compared
against max_transcript_mismatches.
max_protein_sequences_per_variant : int
Number of protein sequences to return for each ProteinSequence
variant_cdna_sequence_assembly : bool
If True, then assemble variant cDNA sequences based on overlap of
RNA reads. If False, then variant cDNA sequences must be fully spanned
and contained within RNA reads.
Yields pairs of a Variant and a list of ProteinSequence objects | [
"Translates",
"each",
"coding",
"variant",
"in",
"a",
"collection",
"to",
"one",
"or",
"more",
"Translation",
"objects",
"which",
"are",
"then",
"aggregated",
"into",
"equivalent",
"ProteinSequence",
"objects",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/protein_sequences.py#L192-L311 | train |
openvax/isovar | isovar/protein_sequences.py | ProteinSequence.from_translation_key | def from_translation_key(
cls,
translation_key,
translations,
overlapping_reads,
ref_reads,
alt_reads,
alt_reads_supporting_protein_sequence,
transcripts_overlapping_variant,
transcripts_supporting_protein_sequence,
gene):
"""
Create a ProteinSequence object from a TranslationKey, along with
all the extra fields a ProteinSequence requires.
"""
return cls(
amino_acids=translation_key.amino_acids,
variant_aa_interval_start=translation_key.variant_aa_interval_start,
variant_aa_interval_end=translation_key.variant_aa_interval_end,
ends_with_stop_codon=translation_key.ends_with_stop_codon,
frameshift=translation_key.frameshift,
translations=translations,
overlapping_reads=overlapping_reads,
ref_reads=ref_reads,
alt_reads=alt_reads,
alt_reads_supporting_protein_sequence=(
alt_reads_supporting_protein_sequence),
transcripts_overlapping_variant=transcripts_overlapping_variant,
transcripts_supporting_protein_sequence=(
transcripts_supporting_protein_sequence),
gene=gene) | python | def from_translation_key(
cls,
translation_key,
translations,
overlapping_reads,
ref_reads,
alt_reads,
alt_reads_supporting_protein_sequence,
transcripts_overlapping_variant,
transcripts_supporting_protein_sequence,
gene):
"""
Create a ProteinSequence object from a TranslationKey, along with
all the extra fields a ProteinSequence requires.
"""
return cls(
amino_acids=translation_key.amino_acids,
variant_aa_interval_start=translation_key.variant_aa_interval_start,
variant_aa_interval_end=translation_key.variant_aa_interval_end,
ends_with_stop_codon=translation_key.ends_with_stop_codon,
frameshift=translation_key.frameshift,
translations=translations,
overlapping_reads=overlapping_reads,
ref_reads=ref_reads,
alt_reads=alt_reads,
alt_reads_supporting_protein_sequence=(
alt_reads_supporting_protein_sequence),
transcripts_overlapping_variant=transcripts_overlapping_variant,
transcripts_supporting_protein_sequence=(
transcripts_supporting_protein_sequence),
gene=gene) | [
"def",
"from_translation_key",
"(",
"cls",
",",
"translation_key",
",",
"translations",
",",
"overlapping_reads",
",",
"ref_reads",
",",
"alt_reads",
",",
"alt_reads_supporting_protein_sequence",
",",
"transcripts_overlapping_variant",
",",
"transcripts_supporting_protein_sequence",
",",
"gene",
")",
":",
"return",
"cls",
"(",
"amino_acids",
"=",
"translation_key",
".",
"amino_acids",
",",
"variant_aa_interval_start",
"=",
"translation_key",
".",
"variant_aa_interval_start",
",",
"variant_aa_interval_end",
"=",
"translation_key",
".",
"variant_aa_interval_end",
",",
"ends_with_stop_codon",
"=",
"translation_key",
".",
"ends_with_stop_codon",
",",
"frameshift",
"=",
"translation_key",
".",
"frameshift",
",",
"translations",
"=",
"translations",
",",
"overlapping_reads",
"=",
"overlapping_reads",
",",
"ref_reads",
"=",
"ref_reads",
",",
"alt_reads",
"=",
"alt_reads",
",",
"alt_reads_supporting_protein_sequence",
"=",
"(",
"alt_reads_supporting_protein_sequence",
")",
",",
"transcripts_overlapping_variant",
"=",
"transcripts_overlapping_variant",
",",
"transcripts_supporting_protein_sequence",
"=",
"(",
"transcripts_supporting_protein_sequence",
")",
",",
"gene",
"=",
"gene",
")"
] | Create a ProteinSequence object from a TranslationKey, along with
all the extra fields a ProteinSequence requires. | [
"Create",
"a",
"ProteinSequence",
"object",
"from",
"a",
"TranslationKey",
"along",
"with",
"all",
"the",
"extra",
"fields",
"a",
"ProteinSequence",
"requires",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/protein_sequences.py#L131-L161 | train |
portfoliome/postpy | postpy/base.py | make_delete_table | def make_delete_table(table: Table, delete_prefix='delete_from__') -> Table:
"""Table referencing a delete from using primary key join."""
name = delete_prefix + table.name
primary_key = table.primary_key
key_names = set(primary_key.column_names)
columns = [column for column in table.columns if column.name in key_names]
table = Table(name, columns, primary_key)
return table | python | def make_delete_table(table: Table, delete_prefix='delete_from__') -> Table:
"""Table referencing a delete from using primary key join."""
name = delete_prefix + table.name
primary_key = table.primary_key
key_names = set(primary_key.column_names)
columns = [column for column in table.columns if column.name in key_names]
table = Table(name, columns, primary_key)
return table | [
"def",
"make_delete_table",
"(",
"table",
":",
"Table",
",",
"delete_prefix",
"=",
"'delete_from__'",
")",
"->",
"Table",
":",
"name",
"=",
"delete_prefix",
"+",
"table",
".",
"name",
"primary_key",
"=",
"table",
".",
"primary_key",
"key_names",
"=",
"set",
"(",
"primary_key",
".",
"column_names",
")",
"columns",
"=",
"[",
"column",
"for",
"column",
"in",
"table",
".",
"columns",
"if",
"column",
".",
"name",
"in",
"key_names",
"]",
"table",
"=",
"Table",
"(",
"name",
",",
"columns",
",",
"primary_key",
")",
"return",
"table"
] | Table referencing a delete from using primary key join. | [
"Table",
"referencing",
"a",
"delete",
"from",
"using",
"primary",
"key",
"join",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/base.py#L136-L145 | train |
openvax/isovar | isovar/variant_helpers.py | trim_variant_fields | def trim_variant_fields(location, ref, alt):
"""
Trims common prefixes from the ref and alt sequences
Parameters
----------
location : int
Position (starting from 1) on some chromosome
ref : str
Reference nucleotides
alt : str
Alternate (mutant) nucleotide
Returns adjusted triplet (location, ref, alt)
"""
if len(alt) > 0 and ref.startswith(alt):
# if alt is a prefix of the ref sequence then we actually have a
# deletion like:
# g.10 GTT > GT
# which can be trimmed to
# g.12 'T'>''
ref = ref[len(alt):]
location += len(alt)
alt = ""
if len(ref) > 0 and alt.startswith(ref):
# if ref sequence is a prefix of the alt sequence then we actually have
# an insertion like:
# g.10 GT>GTT
# which can be trimmed to
# g.11 ''>'T'
# Note that we are selecting the position *before* the insertion
# (as an arbitrary convention)
alt = alt[len(ref):]
location += len(ref) - 1
ref = ""
return location, ref, alt | python | def trim_variant_fields(location, ref, alt):
"""
Trims common prefixes from the ref and alt sequences
Parameters
----------
location : int
Position (starting from 1) on some chromosome
ref : str
Reference nucleotides
alt : str
Alternate (mutant) nucleotide
Returns adjusted triplet (location, ref, alt)
"""
if len(alt) > 0 and ref.startswith(alt):
# if alt is a prefix of the ref sequence then we actually have a
# deletion like:
# g.10 GTT > GT
# which can be trimmed to
# g.12 'T'>''
ref = ref[len(alt):]
location += len(alt)
alt = ""
if len(ref) > 0 and alt.startswith(ref):
# if ref sequence is a prefix of the alt sequence then we actually have
# an insertion like:
# g.10 GT>GTT
# which can be trimmed to
# g.11 ''>'T'
# Note that we are selecting the position *before* the insertion
# (as an arbitrary convention)
alt = alt[len(ref):]
location += len(ref) - 1
ref = ""
return location, ref, alt | [
"def",
"trim_variant_fields",
"(",
"location",
",",
"ref",
",",
"alt",
")",
":",
"if",
"len",
"(",
"alt",
")",
">",
"0",
"and",
"ref",
".",
"startswith",
"(",
"alt",
")",
":",
"ref",
"=",
"ref",
"[",
"len",
"(",
"alt",
")",
":",
"]",
"location",
"+=",
"len",
"(",
"alt",
")",
"alt",
"=",
"\"\"",
"if",
"len",
"(",
"ref",
")",
">",
"0",
"and",
"alt",
".",
"startswith",
"(",
"ref",
")",
":",
"alt",
"=",
"alt",
"[",
"len",
"(",
"ref",
")",
":",
"]",
"location",
"+=",
"len",
"(",
"ref",
")",
"-",
"1",
"ref",
"=",
"\"\"",
"return",
"location",
",",
"ref",
",",
"alt"
] | Trims common prefixes from the ref and alt sequences
Parameters
----------
location : int
Position (starting from 1) on some chromosome
ref : str
Reference nucleotides
alt : str
Alternate (mutant) nucleotide
Returns adjusted triplet (location, ref, alt) | [
"Trims",
"common",
"prefixes",
"from",
"the",
"ref",
"and",
"alt",
"sequences"
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_helpers.py#L27-L64 | train |
openvax/isovar | isovar/variant_helpers.py | base0_interval_for_variant | def base0_interval_for_variant(variant):
"""
Inteval of interbase offsets of the affected reference positions for a
particular variant.
Parameters
----------
variant : varcode.Variant
Returns triplet of (base1_location, ref, alt)
"""
base1_location, ref, alt = trim_variant(variant)
return base0_interval_for_variant_fields(
base1_location=base1_location,
ref=ref,
alt=alt) | python | def base0_interval_for_variant(variant):
"""
Inteval of interbase offsets of the affected reference positions for a
particular variant.
Parameters
----------
variant : varcode.Variant
Returns triplet of (base1_location, ref, alt)
"""
base1_location, ref, alt = trim_variant(variant)
return base0_interval_for_variant_fields(
base1_location=base1_location,
ref=ref,
alt=alt) | [
"def",
"base0_interval_for_variant",
"(",
"variant",
")",
":",
"base1_location",
",",
"ref",
",",
"alt",
"=",
"trim_variant",
"(",
"variant",
")",
"return",
"base0_interval_for_variant_fields",
"(",
"base1_location",
"=",
"base1_location",
",",
"ref",
"=",
"ref",
",",
"alt",
"=",
"alt",
")"
] | Inteval of interbase offsets of the affected reference positions for a
particular variant.
Parameters
----------
variant : varcode.Variant
Returns triplet of (base1_location, ref, alt) | [
"Inteval",
"of",
"interbase",
"offsets",
"of",
"the",
"affected",
"reference",
"positions",
"for",
"a",
"particular",
"variant",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_helpers.py#L110-L125 | train |
openvax/isovar | isovar/variant_helpers.py | interbase_range_affected_by_variant_on_transcript | def interbase_range_affected_by_variant_on_transcript(variant, transcript):
"""
Convert from a variant's position in global genomic coordinates on the
forward strand to an interval of interbase offsets on a particular
transcript's mRNA.
Parameters
----------
variant : varcode.Variant
transcript : pyensembl.Transcript
Assumes that the transcript overlaps the variant.
Returns (start, end) tuple of offsets into the transcript's cDNA sequence
which indicates which bases in the reference sequence are affected by a
variant.
Example:
The insertion of "TTT" into the middle of an exon would result in an
offset pair such as (100,100) since no reference bases are changed
or deleted by an insertion.
On the other hand, deletion the preceding "CGG" at that same locus could
result in an offset pair such as (97, 100)
"""
if variant.is_insertion:
if transcript.strand == "+":
# base-1 position of an insertion is the genomic nucleotide
# before any inserted mutant nucleotides, so the start offset
# of the actual inserted nucleotides is one past that reference
# position
start_offset = transcript.spliced_offset(variant.start) + 1
else:
# on the negative strand the genomic base-1 position actually
# refers to the transcript base *after* the insertion, so we can
# use that as the interbase coordinate for where the insertion
# occurs
start_offset = transcript.spliced_offset(variant.start)
# an insertion happens *between* two reference bases
# so the start:end offsets coincide
end_offset = start_offset
else:
# reference bases affected by substitution or deletion defined by
# range starting at first affected base
offsets = []
assert len(variant.ref) > 0
for dna_pos in range(variant.start, variant.start + len(variant.ref)):
try:
offsets.append(transcript.spliced_offset(dna_pos))
except ValueError:
logger.info(
"Couldn't find position %d from %s on exons of %s",
dna_pos,
variant,
transcript)
if len(offsets) == 0:
raise ValueError(
"Couldn't find any exonic reference bases affected by %s on %s",
variant,
transcript)
start_offset = min(offsets)
end_offset = max(offsets) + 1
return (start_offset, end_offset) | python | def interbase_range_affected_by_variant_on_transcript(variant, transcript):
"""
Convert from a variant's position in global genomic coordinates on the
forward strand to an interval of interbase offsets on a particular
transcript's mRNA.
Parameters
----------
variant : varcode.Variant
transcript : pyensembl.Transcript
Assumes that the transcript overlaps the variant.
Returns (start, end) tuple of offsets into the transcript's cDNA sequence
which indicates which bases in the reference sequence are affected by a
variant.
Example:
The insertion of "TTT" into the middle of an exon would result in an
offset pair such as (100,100) since no reference bases are changed
or deleted by an insertion.
On the other hand, deletion the preceding "CGG" at that same locus could
result in an offset pair such as (97, 100)
"""
if variant.is_insertion:
if transcript.strand == "+":
# base-1 position of an insertion is the genomic nucleotide
# before any inserted mutant nucleotides, so the start offset
# of the actual inserted nucleotides is one past that reference
# position
start_offset = transcript.spliced_offset(variant.start) + 1
else:
# on the negative strand the genomic base-1 position actually
# refers to the transcript base *after* the insertion, so we can
# use that as the interbase coordinate for where the insertion
# occurs
start_offset = transcript.spliced_offset(variant.start)
# an insertion happens *between* two reference bases
# so the start:end offsets coincide
end_offset = start_offset
else:
# reference bases affected by substitution or deletion defined by
# range starting at first affected base
offsets = []
assert len(variant.ref) > 0
for dna_pos in range(variant.start, variant.start + len(variant.ref)):
try:
offsets.append(transcript.spliced_offset(dna_pos))
except ValueError:
logger.info(
"Couldn't find position %d from %s on exons of %s",
dna_pos,
variant,
transcript)
if len(offsets) == 0:
raise ValueError(
"Couldn't find any exonic reference bases affected by %s on %s",
variant,
transcript)
start_offset = min(offsets)
end_offset = max(offsets) + 1
return (start_offset, end_offset) | [
"def",
"interbase_range_affected_by_variant_on_transcript",
"(",
"variant",
",",
"transcript",
")",
":",
"if",
"variant",
".",
"is_insertion",
":",
"if",
"transcript",
".",
"strand",
"==",
"\"+\"",
":",
"start_offset",
"=",
"transcript",
".",
"spliced_offset",
"(",
"variant",
".",
"start",
")",
"+",
"1",
"else",
":",
"start_offset",
"=",
"transcript",
".",
"spliced_offset",
"(",
"variant",
".",
"start",
")",
"end_offset",
"=",
"start_offset",
"else",
":",
"offsets",
"=",
"[",
"]",
"assert",
"len",
"(",
"variant",
".",
"ref",
")",
">",
"0",
"for",
"dna_pos",
"in",
"range",
"(",
"variant",
".",
"start",
",",
"variant",
".",
"start",
"+",
"len",
"(",
"variant",
".",
"ref",
")",
")",
":",
"try",
":",
"offsets",
".",
"append",
"(",
"transcript",
".",
"spliced_offset",
"(",
"dna_pos",
")",
")",
"except",
"ValueError",
":",
"logger",
".",
"info",
"(",
"\"Couldn't find position %d from %s on exons of %s\"",
",",
"dna_pos",
",",
"variant",
",",
"transcript",
")",
"if",
"len",
"(",
"offsets",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Couldn't find any exonic reference bases affected by %s on %s\"",
",",
"variant",
",",
"transcript",
")",
"start_offset",
"=",
"min",
"(",
"offsets",
")",
"end_offset",
"=",
"max",
"(",
"offsets",
")",
"+",
"1",
"return",
"(",
"start_offset",
",",
"end_offset",
")"
] | Convert from a variant's position in global genomic coordinates on the
forward strand to an interval of interbase offsets on a particular
transcript's mRNA.
Parameters
----------
variant : varcode.Variant
transcript : pyensembl.Transcript
Assumes that the transcript overlaps the variant.
Returns (start, end) tuple of offsets into the transcript's cDNA sequence
which indicates which bases in the reference sequence are affected by a
variant.
Example:
The insertion of "TTT" into the middle of an exon would result in an
offset pair such as (100,100) since no reference bases are changed
or deleted by an insertion.
On the other hand, deletion the preceding "CGG" at that same locus could
result in an offset pair such as (97, 100) | [
"Convert",
"from",
"a",
"variant",
"s",
"position",
"in",
"global",
"genomic",
"coordinates",
"on",
"the",
"forward",
"strand",
"to",
"an",
"interval",
"of",
"interbase",
"offsets",
"on",
"a",
"particular",
"transcript",
"s",
"mRNA",
"."
] | b39b684920e3f6b344851d6598a1a1c67bce913b | https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_helpers.py#L127-L190 | train |
portfoliome/postpy | postpy/dml.py | insert | def insert(conn, qualified_name: str, column_names, records):
"""Insert a collection of namedtuple records."""
query = create_insert_statement(qualified_name, column_names)
with conn:
with conn.cursor(cursor_factory=NamedTupleCursor) as cursor:
for record in records:
cursor.execute(query, record) | python | def insert(conn, qualified_name: str, column_names, records):
"""Insert a collection of namedtuple records."""
query = create_insert_statement(qualified_name, column_names)
with conn:
with conn.cursor(cursor_factory=NamedTupleCursor) as cursor:
for record in records:
cursor.execute(query, record) | [
"def",
"insert",
"(",
"conn",
",",
"qualified_name",
":",
"str",
",",
"column_names",
",",
"records",
")",
":",
"query",
"=",
"create_insert_statement",
"(",
"qualified_name",
",",
"column_names",
")",
"with",
"conn",
":",
"with",
"conn",
".",
"cursor",
"(",
"cursor_factory",
"=",
"NamedTupleCursor",
")",
"as",
"cursor",
":",
"for",
"record",
"in",
"records",
":",
"cursor",
".",
"execute",
"(",
"query",
",",
"record",
")"
] | Insert a collection of namedtuple records. | [
"Insert",
"a",
"collection",
"of",
"namedtuple",
"records",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L30-L38 | train |
portfoliome/postpy | postpy/dml.py | insert_many | def insert_many(conn, tablename, column_names, records, chunksize=2500):
"""Insert many records by chunking data into insert statements.
Notes
-----
records should be Iterable collection of namedtuples or tuples.
"""
groups = chunks(records, chunksize)
column_str = ','.join(column_names)
insert_template = 'INSERT INTO {table} ({columns}) VALUES {values}'.format(
table=tablename, columns=column_str, values='{0}')
with conn:
with conn.cursor() as cursor:
for recs in groups:
record_group = list(recs)
records_template_str = ','.join(['%s'] * len(record_group))
insert_query = insert_template.format(records_template_str)
cursor.execute(insert_query, record_group) | python | def insert_many(conn, tablename, column_names, records, chunksize=2500):
"""Insert many records by chunking data into insert statements.
Notes
-----
records should be Iterable collection of namedtuples or tuples.
"""
groups = chunks(records, chunksize)
column_str = ','.join(column_names)
insert_template = 'INSERT INTO {table} ({columns}) VALUES {values}'.format(
table=tablename, columns=column_str, values='{0}')
with conn:
with conn.cursor() as cursor:
for recs in groups:
record_group = list(recs)
records_template_str = ','.join(['%s'] * len(record_group))
insert_query = insert_template.format(records_template_str)
cursor.execute(insert_query, record_group) | [
"def",
"insert_many",
"(",
"conn",
",",
"tablename",
",",
"column_names",
",",
"records",
",",
"chunksize",
"=",
"2500",
")",
":",
"groups",
"=",
"chunks",
"(",
"records",
",",
"chunksize",
")",
"column_str",
"=",
"','",
".",
"join",
"(",
"column_names",
")",
"insert_template",
"=",
"'INSERT INTO {table} ({columns}) VALUES {values}'",
".",
"format",
"(",
"table",
"=",
"tablename",
",",
"columns",
"=",
"column_str",
",",
"values",
"=",
"'{0}'",
")",
"with",
"conn",
":",
"with",
"conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"for",
"recs",
"in",
"groups",
":",
"record_group",
"=",
"list",
"(",
"recs",
")",
"records_template_str",
"=",
"','",
".",
"join",
"(",
"[",
"'%s'",
"]",
"*",
"len",
"(",
"record_group",
")",
")",
"insert_query",
"=",
"insert_template",
".",
"format",
"(",
"records_template_str",
")",
"cursor",
".",
"execute",
"(",
"insert_query",
",",
"record_group",
")"
] | Insert many records by chunking data into insert statements.
Notes
-----
records should be Iterable collection of namedtuples or tuples. | [
"Insert",
"many",
"records",
"by",
"chunking",
"data",
"into",
"insert",
"statements",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L41-L60 | train |
portfoliome/postpy | postpy/dml.py | upsert_records | def upsert_records(conn, records, upsert_statement):
"""Upsert records."""
with conn:
with conn.cursor() as cursor:
for record in records:
cursor.execute(upsert_statement, record) | python | def upsert_records(conn, records, upsert_statement):
"""Upsert records."""
with conn:
with conn.cursor() as cursor:
for record in records:
cursor.execute(upsert_statement, record) | [
"def",
"upsert_records",
"(",
"conn",
",",
"records",
",",
"upsert_statement",
")",
":",
"with",
"conn",
":",
"with",
"conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"for",
"record",
"in",
"records",
":",
"cursor",
".",
"execute",
"(",
"upsert_statement",
",",
"record",
")"
] | Upsert records. | [
"Upsert",
"records",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L63-L69 | train |
portfoliome/postpy | postpy/dml.py | delete_joined_table_sql | def delete_joined_table_sql(qualified_name, removing_qualified_name, primary_key):
"""SQL statement for a joined delete from.
Generate SQL statement for deleting the intersection of rows between
both tables from table referenced by tablename.
"""
condition_template = 't.{}=d.{}'
where_clause = ' AND '.join(condition_template.format(pkey, pkey)
for pkey in primary_key)
delete_statement = (
'DELETE FROM {table} t'
' USING {delete_table} d'
' WHERE {where_clause}').format(table=qualified_name,
delete_table=removing_qualified_name,
where_clause=where_clause)
return delete_statement | python | def delete_joined_table_sql(qualified_name, removing_qualified_name, primary_key):
"""SQL statement for a joined delete from.
Generate SQL statement for deleting the intersection of rows between
both tables from table referenced by tablename.
"""
condition_template = 't.{}=d.{}'
where_clause = ' AND '.join(condition_template.format(pkey, pkey)
for pkey in primary_key)
delete_statement = (
'DELETE FROM {table} t'
' USING {delete_table} d'
' WHERE {where_clause}').format(table=qualified_name,
delete_table=removing_qualified_name,
where_clause=where_clause)
return delete_statement | [
"def",
"delete_joined_table_sql",
"(",
"qualified_name",
",",
"removing_qualified_name",
",",
"primary_key",
")",
":",
"condition_template",
"=",
"'t.{}=d.{}'",
"where_clause",
"=",
"' AND '",
".",
"join",
"(",
"condition_template",
".",
"format",
"(",
"pkey",
",",
"pkey",
")",
"for",
"pkey",
"in",
"primary_key",
")",
"delete_statement",
"=",
"(",
"'DELETE FROM {table} t'",
"' USING {delete_table} d'",
"' WHERE {where_clause}'",
")",
".",
"format",
"(",
"table",
"=",
"qualified_name",
",",
"delete_table",
"=",
"removing_qualified_name",
",",
"where_clause",
"=",
"where_clause",
")",
"return",
"delete_statement"
] | SQL statement for a joined delete from.
Generate SQL statement for deleting the intersection of rows between
both tables from table referenced by tablename. | [
"SQL",
"statement",
"for",
"a",
"joined",
"delete",
"from",
".",
"Generate",
"SQL",
"statement",
"for",
"deleting",
"the",
"intersection",
"of",
"rows",
"between",
"both",
"tables",
"from",
"table",
"referenced",
"by",
"tablename",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L165-L180 | train |
portfoliome/postpy | postpy/dml.py | copy_from_csv | def copy_from_csv(conn, file, qualified_name: str, delimiter=',', encoding='utf8',
null_str='', header=True, escape_str='\\', quote_char='"',
force_not_null=None, force_null=None):
"""Copy file-like object to database table.
Notes
-----
Implementation defaults to postgres standard except for encoding.
Postgres falls back on client encoding, while function defaults to utf-8.
References
----------
https://www.postgresql.org/docs/current/static/sql-copy.html
"""
copy_sql = copy_from_csv_sql(qualified_name, delimiter, encoding,
null_str=null_str, header=header,
escape_str=escape_str, quote_char=quote_char,
force_not_null=force_not_null,
force_null=force_null)
with conn:
with conn.cursor() as cursor:
cursor.copy_expert(copy_sql, file) | python | def copy_from_csv(conn, file, qualified_name: str, delimiter=',', encoding='utf8',
null_str='', header=True, escape_str='\\', quote_char='"',
force_not_null=None, force_null=None):
"""Copy file-like object to database table.
Notes
-----
Implementation defaults to postgres standard except for encoding.
Postgres falls back on client encoding, while function defaults to utf-8.
References
----------
https://www.postgresql.org/docs/current/static/sql-copy.html
"""
copy_sql = copy_from_csv_sql(qualified_name, delimiter, encoding,
null_str=null_str, header=header,
escape_str=escape_str, quote_char=quote_char,
force_not_null=force_not_null,
force_null=force_null)
with conn:
with conn.cursor() as cursor:
cursor.copy_expert(copy_sql, file) | [
"def",
"copy_from_csv",
"(",
"conn",
",",
"file",
",",
"qualified_name",
":",
"str",
",",
"delimiter",
"=",
"','",
",",
"encoding",
"=",
"'utf8'",
",",
"null_str",
"=",
"''",
",",
"header",
"=",
"True",
",",
"escape_str",
"=",
"'\\\\'",
",",
"quote_char",
"=",
"'\"'",
",",
"force_not_null",
"=",
"None",
",",
"force_null",
"=",
"None",
")",
":",
"copy_sql",
"=",
"copy_from_csv_sql",
"(",
"qualified_name",
",",
"delimiter",
",",
"encoding",
",",
"null_str",
"=",
"null_str",
",",
"header",
"=",
"header",
",",
"escape_str",
"=",
"escape_str",
",",
"quote_char",
"=",
"quote_char",
",",
"force_not_null",
"=",
"force_not_null",
",",
"force_null",
"=",
"force_null",
")",
"with",
"conn",
":",
"with",
"conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"copy_expert",
"(",
"copy_sql",
",",
"file",
")"
] | Copy file-like object to database table.
Notes
-----
Implementation defaults to postgres standard except for encoding.
Postgres falls back on client encoding, while function defaults to utf-8.
References
----------
https://www.postgresql.org/docs/current/static/sql-copy.html | [
"Copy",
"file",
"-",
"like",
"object",
"to",
"database",
"table",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L237-L261 | train |
portfoliome/postpy | postpy/admin.py | get_user_tables | def get_user_tables(conn):
"""Retrieve all user tables."""
query_string = "select schemaname, relname from pg_stat_user_tables;"
with conn.cursor() as cursor:
cursor.execute(query_string)
tables = cursor.fetchall()
return tables | python | def get_user_tables(conn):
"""Retrieve all user tables."""
query_string = "select schemaname, relname from pg_stat_user_tables;"
with conn.cursor() as cursor:
cursor.execute(query_string)
tables = cursor.fetchall()
return tables | [
"def",
"get_user_tables",
"(",
"conn",
")",
":",
"query_string",
"=",
"\"select schemaname, relname from pg_stat_user_tables;\"",
"with",
"conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"query_string",
")",
"tables",
"=",
"cursor",
".",
"fetchall",
"(",
")",
"return",
"tables"
] | Retrieve all user tables. | [
"Retrieve",
"all",
"user",
"tables",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L13-L21 | train |
portfoliome/postpy | postpy/admin.py | get_column_metadata | def get_column_metadata(conn, table: str, schema='public'):
"""Returns column data following db.Column parameter specification."""
query = """\
SELECT
attname as name,
format_type(atttypid, atttypmod) AS data_type,
NOT attnotnull AS nullable
FROM pg_catalog.pg_attribute
WHERE attrelid=%s::regclass
AND attnum > 0 AND NOT attisdropped
ORDER BY attnum;"""
qualified_name = compile_qualified_name(table, schema=schema)
for record in select_dict(conn, query, params=(qualified_name,)):
yield record | python | def get_column_metadata(conn, table: str, schema='public'):
"""Returns column data following db.Column parameter specification."""
query = """\
SELECT
attname as name,
format_type(atttypid, atttypmod) AS data_type,
NOT attnotnull AS nullable
FROM pg_catalog.pg_attribute
WHERE attrelid=%s::regclass
AND attnum > 0 AND NOT attisdropped
ORDER BY attnum;"""
qualified_name = compile_qualified_name(table, schema=schema)
for record in select_dict(conn, query, params=(qualified_name,)):
yield record | [
"def",
"get_column_metadata",
"(",
"conn",
",",
"table",
":",
"str",
",",
"schema",
"=",
"'public'",
")",
":",
"query",
"=",
"qualified_name",
"=",
"compile_qualified_name",
"(",
"table",
",",
"schema",
"=",
"schema",
")",
"for",
"record",
"in",
"select_dict",
"(",
"conn",
",",
"query",
",",
"params",
"=",
"(",
"qualified_name",
",",
")",
")",
":",
"yield",
"record"
] | Returns column data following db.Column parameter specification. | [
"Returns",
"column",
"data",
"following",
"db",
".",
"Column",
"parameter",
"specification",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L47-L62 | train |
portfoliome/postpy | postpy/admin.py | reflect_table | def reflect_table(conn, table_name, schema='public'):
"""Reflect basic table attributes."""
column_meta = list(get_column_metadata(conn, table_name, schema=schema))
primary_key_columns = list(get_primary_keys(conn, table_name, schema=schema))
columns = [Column(**column_data) for column_data in column_meta]
primary_key = PrimaryKey(primary_key_columns)
return Table(table_name, columns, primary_key, schema=schema) | python | def reflect_table(conn, table_name, schema='public'):
"""Reflect basic table attributes."""
column_meta = list(get_column_metadata(conn, table_name, schema=schema))
primary_key_columns = list(get_primary_keys(conn, table_name, schema=schema))
columns = [Column(**column_data) for column_data in column_meta]
primary_key = PrimaryKey(primary_key_columns)
return Table(table_name, columns, primary_key, schema=schema) | [
"def",
"reflect_table",
"(",
"conn",
",",
"table_name",
",",
"schema",
"=",
"'public'",
")",
":",
"column_meta",
"=",
"list",
"(",
"get_column_metadata",
"(",
"conn",
",",
"table_name",
",",
"schema",
"=",
"schema",
")",
")",
"primary_key_columns",
"=",
"list",
"(",
"get_primary_keys",
"(",
"conn",
",",
"table_name",
",",
"schema",
"=",
"schema",
")",
")",
"columns",
"=",
"[",
"Column",
"(",
"**",
"column_data",
")",
"for",
"column_data",
"in",
"column_meta",
"]",
"primary_key",
"=",
"PrimaryKey",
"(",
"primary_key_columns",
")",
"return",
"Table",
"(",
"table_name",
",",
"columns",
",",
"primary_key",
",",
"schema",
"=",
"schema",
")"
] | Reflect basic table attributes. | [
"Reflect",
"basic",
"table",
"attributes",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L65-L74 | train |
portfoliome/postpy | postpy/admin.py | reset | def reset(db_name):
"""Reset database."""
conn = psycopg2.connect(database='postgres')
db = Database(db_name)
conn.autocommit = True
with conn.cursor() as cursor:
cursor.execute(db.drop_statement())
cursor.execute(db.create_statement())
conn.close() | python | def reset(db_name):
"""Reset database."""
conn = psycopg2.connect(database='postgres')
db = Database(db_name)
conn.autocommit = True
with conn.cursor() as cursor:
cursor.execute(db.drop_statement())
cursor.execute(db.create_statement())
conn.close() | [
"def",
"reset",
"(",
"db_name",
")",
":",
"conn",
"=",
"psycopg2",
".",
"connect",
"(",
"database",
"=",
"'postgres'",
")",
"db",
"=",
"Database",
"(",
"db_name",
")",
"conn",
".",
"autocommit",
"=",
"True",
"with",
"conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"db",
".",
"drop_statement",
"(",
")",
")",
"cursor",
".",
"execute",
"(",
"db",
".",
"create_statement",
"(",
")",
")",
"conn",
".",
"close",
"(",
")"
] | Reset database. | [
"Reset",
"database",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L77-L87 | train |
portfoliome/postpy | postpy/admin.py | install_extensions | def install_extensions(extensions, **connection_parameters):
"""Install Postgres extension if available.
Notes
-----
- superuser is generally required for installing extensions.
- Currently does not support specific schema.
"""
from postpy.connections import connect
conn = connect(**connection_parameters)
conn.autocommit = True
for extension in extensions:
install_extension(conn, extension) | python | def install_extensions(extensions, **connection_parameters):
"""Install Postgres extension if available.
Notes
-----
- superuser is generally required for installing extensions.
- Currently does not support specific schema.
"""
from postpy.connections import connect
conn = connect(**connection_parameters)
conn.autocommit = True
for extension in extensions:
install_extension(conn, extension) | [
"def",
"install_extensions",
"(",
"extensions",
",",
"**",
"connection_parameters",
")",
":",
"from",
"postpy",
".",
"connections",
"import",
"connect",
"conn",
"=",
"connect",
"(",
"**",
"connection_parameters",
")",
"conn",
".",
"autocommit",
"=",
"True",
"for",
"extension",
"in",
"extensions",
":",
"install_extension",
"(",
"conn",
",",
"extension",
")"
] | Install Postgres extension if available.
Notes
-----
- superuser is generally required for installing extensions.
- Currently does not support specific schema. | [
"Install",
"Postgres",
"extension",
"if",
"available",
"."
] | fe26199131b15295fc5f669a0ad2a7f47bf490ee | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L90-L105 | train |
daskos/mentor | mentor/proxies/executor.py | ExecutorDriverProxy.update | def update(self, status):
"""Sends a status update to the framework scheduler.
Retrying as necessary until an acknowledgement has been received or the
executor is terminated (in which case, a TASK_LOST status update will be
sent).
See Scheduler.statusUpdate for more information about status update
acknowledgements.
"""
logging.info('Executor sends status update {} for task {}'.format(
status.state, status.task_id))
return self.driver.sendStatusUpdate(encode(status)) | python | def update(self, status):
"""Sends a status update to the framework scheduler.
Retrying as necessary until an acknowledgement has been received or the
executor is terminated (in which case, a TASK_LOST status update will be
sent).
See Scheduler.statusUpdate for more information about status update
acknowledgements.
"""
logging.info('Executor sends status update {} for task {}'.format(
status.state, status.task_id))
return self.driver.sendStatusUpdate(encode(status)) | [
"def",
"update",
"(",
"self",
",",
"status",
")",
":",
"logging",
".",
"info",
"(",
"'Executor sends status update {} for task {}'",
".",
"format",
"(",
"status",
".",
"state",
",",
"status",
".",
"task_id",
")",
")",
"return",
"self",
".",
"driver",
".",
"sendStatusUpdate",
"(",
"encode",
"(",
"status",
")",
")"
] | Sends a status update to the framework scheduler.
Retrying as necessary until an acknowledgement has been received or the
executor is terminated (in which case, a TASK_LOST status update will be
sent).
See Scheduler.statusUpdate for more information about status update
acknowledgements. | [
"Sends",
"a",
"status",
"update",
"to",
"the",
"framework",
"scheduler",
"."
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/executor.py#L108-L119 | train |
daskos/mentor | mentor/proxies/executor.py | ExecutorDriverProxy.message | def message(self, data):
"""Sends a message to the framework scheduler.
These messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion.
"""
logging.info('Driver sends framework message {}'.format(data))
return self.driver.sendFrameworkMessage(data) | python | def message(self, data):
"""Sends a message to the framework scheduler.
These messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion.
"""
logging.info('Driver sends framework message {}'.format(data))
return self.driver.sendFrameworkMessage(data) | [
"def",
"message",
"(",
"self",
",",
"data",
")",
":",
"logging",
".",
"info",
"(",
"'Driver sends framework message {}'",
".",
"format",
"(",
"data",
")",
")",
"return",
"self",
".",
"driver",
".",
"sendFrameworkMessage",
"(",
"data",
")"
] | Sends a message to the framework scheduler.
These messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion. | [
"Sends",
"a",
"message",
"to",
"the",
"framework",
"scheduler",
"."
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/executor.py#L121-L128 | train |
centralniak/py-raildriver | raildriver/library.py | RailDriver.get_current_time | def get_current_time(self):
"""
Get current time
:return: datetime.time
"""
hms = [int(self.get_current_controller_value(i)) for i in range(406, 409)]
return datetime.time(*hms) | python | def get_current_time(self):
"""
Get current time
:return: datetime.time
"""
hms = [int(self.get_current_controller_value(i)) for i in range(406, 409)]
return datetime.time(*hms) | [
"def",
"get_current_time",
"(",
"self",
")",
":",
"hms",
"=",
"[",
"int",
"(",
"self",
".",
"get_current_controller_value",
"(",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"406",
",",
"409",
")",
"]",
"return",
"datetime",
".",
"time",
"(",
"*",
"hms",
")"
] | Get current time
:return: datetime.time | [
"Get",
"current",
"time"
] | c7f5f551e0436451b9507fc63a62e49a229282b9 | https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/library.py#L134-L141 | train |
centralniak/py-raildriver | raildriver/library.py | RailDriver.get_loco_name | def get_loco_name(self):
"""
Returns the Provider, Product and Engine name.
:return list
"""
ret_str = self.dll.GetLocoName().decode()
if not ret_str:
return
return ret_str.split('.:.') | python | def get_loco_name(self):
"""
Returns the Provider, Product and Engine name.
:return list
"""
ret_str = self.dll.GetLocoName().decode()
if not ret_str:
return
return ret_str.split('.:.') | [
"def",
"get_loco_name",
"(",
"self",
")",
":",
"ret_str",
"=",
"self",
".",
"dll",
".",
"GetLocoName",
"(",
")",
".",
"decode",
"(",
")",
"if",
"not",
"ret_str",
":",
"return",
"return",
"ret_str",
".",
"split",
"(",
"'.:.'",
")"
] | Returns the Provider, Product and Engine name.
:return list | [
"Returns",
"the",
"Provider",
"Product",
"and",
"Engine",
"name",
"."
] | c7f5f551e0436451b9507fc63a62e49a229282b9 | https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/library.py#L143-L152 | train |
centralniak/py-raildriver | raildriver/library.py | RailDriver.set_controller_value | def set_controller_value(self, index_or_name, value):
"""
Sets controller value
:param index_or_name integer index or string name
:param value float
"""
if not isinstance(index_or_name, int):
index = self.get_controller_index(index_or_name)
else:
index = index_or_name
self.dll.SetControllerValue(index, ctypes.c_float(value)) | python | def set_controller_value(self, index_or_name, value):
"""
Sets controller value
:param index_or_name integer index or string name
:param value float
"""
if not isinstance(index_or_name, int):
index = self.get_controller_index(index_or_name)
else:
index = index_or_name
self.dll.SetControllerValue(index, ctypes.c_float(value)) | [
"def",
"set_controller_value",
"(",
"self",
",",
"index_or_name",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"index_or_name",
",",
"int",
")",
":",
"index",
"=",
"self",
".",
"get_controller_index",
"(",
"index_or_name",
")",
"else",
":",
"index",
"=",
"index_or_name",
"self",
".",
"dll",
".",
"SetControllerValue",
"(",
"index",
",",
"ctypes",
".",
"c_float",
"(",
"value",
")",
")"
] | Sets controller value
:param index_or_name integer index or string name
:param value float | [
"Sets",
"controller",
"value"
] | c7f5f551e0436451b9507fc63a62e49a229282b9 | https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/library.py#L172-L183 | train |
daskos/mentor | mentor/proxies/scheduler.py | SchedulerDriverProxy.stop | def stop(self, failover=False):
"""Stops the scheduler driver.
If the 'failover' flag is set to False then it is expected that this
framework will never reconnect to Mesos and all of its executors and
tasks can be terminated. Otherwise, all executors and tasks will
remain running (for some framework specific failover timeout) allowing
the scheduler to reconnect (possibly in the same process, or from a
different process, for example, on a different machine.)
"""
logging.info('Stops Scheduler Driver')
return self.driver.stop(failover) | python | def stop(self, failover=False):
"""Stops the scheduler driver.
If the 'failover' flag is set to False then it is expected that this
framework will never reconnect to Mesos and all of its executors and
tasks can be terminated. Otherwise, all executors and tasks will
remain running (for some framework specific failover timeout) allowing
the scheduler to reconnect (possibly in the same process, or from a
different process, for example, on a different machine.)
"""
logging.info('Stops Scheduler Driver')
return self.driver.stop(failover) | [
"def",
"stop",
"(",
"self",
",",
"failover",
"=",
"False",
")",
":",
"logging",
".",
"info",
"(",
"'Stops Scheduler Driver'",
")",
"return",
"self",
".",
"driver",
".",
"stop",
"(",
"failover",
")"
] | Stops the scheduler driver.
If the 'failover' flag is set to False then it is expected that this
framework will never reconnect to Mesos and all of its executors and
tasks can be terminated. Otherwise, all executors and tasks will
remain running (for some framework specific failover timeout) allowing
the scheduler to reconnect (possibly in the same process, or from a
different process, for example, on a different machine.) | [
"Stops",
"the",
"scheduler",
"driver",
"."
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L86-L97 | train |
daskos/mentor | mentor/proxies/scheduler.py | SchedulerDriverProxy.request | def request(self, requests):
"""Requests resources from Mesos.
(see mesos.proto for a description of Request and how, for example, to
request resources from specific slaves.)
Any resources available are offered to the framework via
Scheduler.resourceOffers callback, asynchronously.
"""
logging.info('Request resources from Mesos')
return self.driver.requestResources(map(encode, requests)) | python | def request(self, requests):
"""Requests resources from Mesos.
(see mesos.proto for a description of Request and how, for example, to
request resources from specific slaves.)
Any resources available are offered to the framework via
Scheduler.resourceOffers callback, asynchronously.
"""
logging.info('Request resources from Mesos')
return self.driver.requestResources(map(encode, requests)) | [
"def",
"request",
"(",
"self",
",",
"requests",
")",
":",
"logging",
".",
"info",
"(",
"'Request resources from Mesos'",
")",
"return",
"self",
".",
"driver",
".",
"requestResources",
"(",
"map",
"(",
"encode",
",",
"requests",
")",
")"
] | Requests resources from Mesos.
(see mesos.proto for a description of Request and how, for example, to
request resources from specific slaves.)
Any resources available are offered to the framework via
Scheduler.resourceOffers callback, asynchronously. | [
"Requests",
"resources",
"from",
"Mesos",
"."
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L121-L131 | train |
daskos/mentor | mentor/proxies/scheduler.py | SchedulerDriverProxy.launch | def launch(self, offer_id, tasks, filters=Filters()):
"""Launches the given set of tasks.
Any resources remaining (i.e., not used by the tasks or their executors)
will be considered declined.
The specified filters are applied on all unused resources (see
mesos.proto for a description of Filters). Available resources are
aggregated when multiple offers are provided. Note that all offers must
belong to the same slave. Invoking this function with an empty
collection of tasks declines the offers in entirety (see
Scheduler.decline).
Note that passing a single offer is also supported.
"""
logging.info('Launches tasks {}'.format(tasks))
return self.driver.launchTasks(encode(offer_id),
map(encode, tasks),
encode(filters)) | python | def launch(self, offer_id, tasks, filters=Filters()):
"""Launches the given set of tasks.
Any resources remaining (i.e., not used by the tasks or their executors)
will be considered declined.
The specified filters are applied on all unused resources (see
mesos.proto for a description of Filters). Available resources are
aggregated when multiple offers are provided. Note that all offers must
belong to the same slave. Invoking this function with an empty
collection of tasks declines the offers in entirety (see
Scheduler.decline).
Note that passing a single offer is also supported.
"""
logging.info('Launches tasks {}'.format(tasks))
return self.driver.launchTasks(encode(offer_id),
map(encode, tasks),
encode(filters)) | [
"def",
"launch",
"(",
"self",
",",
"offer_id",
",",
"tasks",
",",
"filters",
"=",
"Filters",
"(",
")",
")",
":",
"logging",
".",
"info",
"(",
"'Launches tasks {}'",
".",
"format",
"(",
"tasks",
")",
")",
"return",
"self",
".",
"driver",
".",
"launchTasks",
"(",
"encode",
"(",
"offer_id",
")",
",",
"map",
"(",
"encode",
",",
"tasks",
")",
",",
"encode",
"(",
"filters",
")",
")"
] | Launches the given set of tasks.
Any resources remaining (i.e., not used by the tasks or their executors)
will be considered declined.
The specified filters are applied on all unused resources (see
mesos.proto for a description of Filters). Available resources are
aggregated when multiple offers are provided. Note that all offers must
belong to the same slave. Invoking this function with an empty
collection of tasks declines the offers in entirety (see
Scheduler.decline).
Note that passing a single offer is also supported. | [
"Launches",
"the",
"given",
"set",
"of",
"tasks",
"."
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L133-L150 | train |
daskos/mentor | mentor/proxies/scheduler.py | SchedulerDriverProxy.kill | def kill(self, task_id):
"""Kills the specified task.
Note that attempting to kill a task is currently not reliable.
If, for example, a scheduler fails over while it was attempting to kill
a task it will need to retry in the future.
Likewise, if unregistered / disconnected, the request will be dropped
(these semantics may be changed in the future).
"""
logging.info('Kills task {}'.format(task_id))
return self.driver.killTask(encode(task_id)) | python | def kill(self, task_id):
"""Kills the specified task.
Note that attempting to kill a task is currently not reliable.
If, for example, a scheduler fails over while it was attempting to kill
a task it will need to retry in the future.
Likewise, if unregistered / disconnected, the request will be dropped
(these semantics may be changed in the future).
"""
logging.info('Kills task {}'.format(task_id))
return self.driver.killTask(encode(task_id)) | [
"def",
"kill",
"(",
"self",
",",
"task_id",
")",
":",
"logging",
".",
"info",
"(",
"'Kills task {}'",
".",
"format",
"(",
"task_id",
")",
")",
"return",
"self",
".",
"driver",
".",
"killTask",
"(",
"encode",
"(",
"task_id",
")",
")"
] | Kills the specified task.
Note that attempting to kill a task is currently not reliable.
If, for example, a scheduler fails over while it was attempting to kill
a task it will need to retry in the future.
Likewise, if unregistered / disconnected, the request will be dropped
(these semantics may be changed in the future). | [
"Kills",
"the",
"specified",
"task",
"."
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L152-L162 | train |
daskos/mentor | mentor/proxies/scheduler.py | SchedulerDriverProxy.reconcile | def reconcile(self, statuses):
"""Allows the framework to query the status for non-terminal tasks.
This causes the master to send back the latest task status for each task
in 'statuses', if possible. Tasks that are no longer known will result
in a TASK_LOST update. If statuses is empty, then the master will send
the latest status for each task currently known.
"""
logging.info('Reconciles task statuses {}'.format(statuses))
return self.driver.reconcileTasks(map(encode, statuses)) | python | def reconcile(self, statuses):
"""Allows the framework to query the status for non-terminal tasks.
This causes the master to send back the latest task status for each task
in 'statuses', if possible. Tasks that are no longer known will result
in a TASK_LOST update. If statuses is empty, then the master will send
the latest status for each task currently known.
"""
logging.info('Reconciles task statuses {}'.format(statuses))
return self.driver.reconcileTasks(map(encode, statuses)) | [
"def",
"reconcile",
"(",
"self",
",",
"statuses",
")",
":",
"logging",
".",
"info",
"(",
"'Reconciles task statuses {}'",
".",
"format",
"(",
"statuses",
")",
")",
"return",
"self",
".",
"driver",
".",
"reconcileTasks",
"(",
"map",
"(",
"encode",
",",
"statuses",
")",
")"
] | Allows the framework to query the status for non-terminal tasks.
This causes the master to send back the latest task status for each task
in 'statuses', if possible. Tasks that are no longer known will result
in a TASK_LOST update. If statuses is empty, then the master will send
the latest status for each task currently known. | [
"Allows",
"the",
"framework",
"to",
"query",
"the",
"status",
"for",
"non",
"-",
"terminal",
"tasks",
"."
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L164-L173 | train |
daskos/mentor | mentor/proxies/scheduler.py | SchedulerDriverProxy.accept | def accept(self, offer_ids, operations, filters=Filters()):
"""Accepts the given offers and performs a sequence of operations
on those accepted offers.
See Offer.Operation in mesos.proto for the set of available operations.
Available resources are aggregated when multiple offers are provided.
Note that all offers must belong to the same slave. Any unused resources
will be considered declined. The specified filters are applied on all
unused resources (see mesos.proto for a description of Filters).
"""
logging.info('Accepts offers {}'.format(offer_ids))
return self.driver.acceptOffers(map(encode, offer_ids),
map(encode, operations),
encode(filters)) | python | def accept(self, offer_ids, operations, filters=Filters()):
"""Accepts the given offers and performs a sequence of operations
on those accepted offers.
See Offer.Operation in mesos.proto for the set of available operations.
Available resources are aggregated when multiple offers are provided.
Note that all offers must belong to the same slave. Any unused resources
will be considered declined. The specified filters are applied on all
unused resources (see mesos.proto for a description of Filters).
"""
logging.info('Accepts offers {}'.format(offer_ids))
return self.driver.acceptOffers(map(encode, offer_ids),
map(encode, operations),
encode(filters)) | [
"def",
"accept",
"(",
"self",
",",
"offer_ids",
",",
"operations",
",",
"filters",
"=",
"Filters",
"(",
")",
")",
":",
"logging",
".",
"info",
"(",
"'Accepts offers {}'",
".",
"format",
"(",
"offer_ids",
")",
")",
"return",
"self",
".",
"driver",
".",
"acceptOffers",
"(",
"map",
"(",
"encode",
",",
"offer_ids",
")",
",",
"map",
"(",
"encode",
",",
"operations",
")",
",",
"encode",
"(",
"filters",
")",
")"
] | Accepts the given offers and performs a sequence of operations
on those accepted offers.
See Offer.Operation in mesos.proto for the set of available operations.
Available resources are aggregated when multiple offers are provided.
Note that all offers must belong to the same slave. Any unused resources
will be considered declined. The specified filters are applied on all
unused resources (see mesos.proto for a description of Filters). | [
"Accepts",
"the",
"given",
"offers",
"and",
"performs",
"a",
"sequence",
"of",
"operations",
"on",
"those",
"accepted",
"offers",
"."
] | b5fd64e3a3192f5664fa5c03e8517cacb4e0590f | https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L187-L201 | train |
Subsets and Splits