code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def start_optimisation(self, rounds, temp=298.15):
self._generate_initial_model()
self._mmc_loop(rounds, temp=temp)
return | Begin the optimisation run.
Parameters
----------
rounds : int
The number of rounds of optimisation to perform.
temp : float, optional
The temperature (in K) used during the optimisation. |
def _generate_initial_model(self):
initial_parameters = [p.current_value for p in self.current_parameters]
try:
initial_model = self.specification(*initial_parameters)
except TypeError:
raise TypeError(
'Failed to build initial model. Make sure that the input '
'parameters match the number and order of arguements '
'expected by the input specification.')
initial_model.pack_new_sequences(self.sequences)
self.current_energy = self.eval_function(initial_model)
self.best_energy = copy.deepcopy(self.current_energy)
self.best_parameters = copy.deepcopy(self.current_parameters)
self.best_model = initial_model
return | Creates the initial model for the optimistation.
Raises
------
TypeError
Raised if the model failed to build. This could be due to
parameters being passed to the specification in the wrong
format. |
def _initialize_pop(self, pop_size):
self.toolbox.register("individual", self._generate)
self.toolbox.register("population", tools.initRepeat,
list, self.toolbox.individual)
self.population = self.toolbox.population(n=pop_size)
if self.neighbours:
for i in range(len(self.population)):
self.population[i].ident = i
self.population[i].neighbours = list(
set(
[(i - x) % len(self.population)
for x in range(1, self.neighbours + 1)] +
[(i + x) % len(self.population)
for x in range(1, self.neighbours + 1)]
))
self.assign_fitnesses(self.population)
return | Assigns indices to individuals in population. |
def _crossover(self, ind):
if self.neighbours:
a, b, c = random.sample([self.population[i]
for i in ind.neighbours], 3)
else:
a, b, c = random.sample(self.population, 3)
y = self.toolbox.clone(a)
y.ident = ind.ident
y.neighbours = ind.neighbours
del y.fitness.values
# y should now be a copy of ind with the vector elements from a
ident = random.randrange(len(self.value_means))
for i, value in enumerate(y):
if i == ident or random.random() < self.cxpb:
entry = a[i] + random.lognormvariate(-1.2, 0.5) * \
self.diff_weight * (b[i] - c[i])
tries = 0
while abs(entry) > 1.0:
tries += 1
entry = a[i] + random.lognormvariate(-1.2, 0.5) * \
self.diff_weight * (b[i] - c[i])
if tries > 10000:
entry = a[i]
y[i] = entry
return y | Used by the evolution process to generate a new individual.
Notes
-----
This is a tweaked version of the classical DE crossover
algorithm, the main difference that candidate parameters are
generated using a lognormal distribution. Bound handling is
achieved by resampling where the candidate solution exceeds +/-1
Parameters
----------
ind : deap individual
Returns
-------
y : deap individual
An individual representing a candidate solution, to be
assigned a fitness. |
def _update_pop(self, pop_size):
candidates = []
for ind in self.population:
candidates.append(self._crossover(ind))
self._model_count += len(candidates)
self.assign_fitnesses(candidates)
for i in range(len(self.population)):
if candidates[i].fitness > self.population[i].fitness:
self.population[i] = candidates[i]
return | Updates population according to crossover and fitness criteria. |
def _generate(self):
part = creator.Particle(
[random.uniform(-1, 1)
for _ in range(len(self.value_means))])
part.speed = [
random.uniform(-self.max_speed, self.max_speed)
for _ in range(len(self.value_means))]
part.smin = -self.max_speed
part.smax = self.max_speed
part.ident = None
part.neighbours = None
return part | Generates a particle using the creator function.
Notes
-----
Position and speed are uniformly randomly seeded within
allowed bounds. The particle also has speed limit settings
taken from global values.
Returns
-------
part : particle object
A particle used during optimisation. |
def update_particle(self, part, chi=0.729843788, c=2.05):
neighbour_pool = [self.population[i] for i in part.neighbours]
best_neighbour = max(neighbour_pool, key=lambda x: x.best.fitness)
ce1 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce2 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce1_p = map(operator.mul, ce1, map(operator.sub, part.best, part))
ce2_g = map(operator.mul, ce2, map(
operator.sub, best_neighbour.best, part))
chi_list = [chi] * len(part)
chi_list2 = [1 - chi] * len(part)
a = map(operator.sub,
map(operator.mul, chi_list, map(operator.add, ce1_p, ce2_g)),
map(operator.mul, chi_list2, part.speed))
part.speed = list(map(operator.add, part.speed, a))
for i, speed in enumerate(part.speed):
if speed < part.smin:
part.speed[i] = part.smin
elif speed > part.smax:
part.speed[i] = part.smax
part[:] = list(map(operator.add, part, part.speed))
return | Constriction factor update particle method.
Notes
-----
Looks for a list of neighbours attached to a particle and
uses the particle's best position and that of the best
neighbour. |
def _update_pop(self, pop_size):
valid_particles = []
invalid_particles = []
for part in self.population:
if any(x > 1 or x < -1 for x in part):
invalid_particles.append(part)
else:
valid_particles.append(part)
self._model_count += len(valid_particles)
for part in valid_particles:
self.update_particle(part)
self.assign_fitnesses(valid_particles)
for part in valid_particles:
if part.fitness > part.best.fitness:
part.best = creator.Particle(part)
part.best.fitness = part.fitness
for part in invalid_particles:
self.update_particle(part)
self.population[:] = valid_particles + invalid_particles
self.population.sort(key=lambda x: x.ident) # shouldn't need to sort?
return | Assigns fitnesses to particles that are within bounds. |
def _initialize_pop(self, pop_size):
self.toolbox.register("individual", self._generate)
self.toolbox.register("population", tools.initRepeat,
list, self.toolbox.individual)
self.population = self.toolbox.population(n=pop_size)
self.assign_fitnesses(self.population)
self._model_count += len(self.population)
return | Assigns indices to individuals in population. |
def _update_pop(self, pop_size):
offspring = list(map(self.toolbox.clone, self.population))
for _ in range(pop_size // 2):
if random.random() < self.cxpb:
child1, child2 = self.toolbox.select(self.population, 2, 6)
temp1 = self.toolbox.clone(child1)
temp2 = self.toolbox.clone(child2)
self.toolbox.mate(temp1, temp2)
del temp1.fitness.values
del temp2.fitness.values
offspring.append(temp1)
offspring.append(temp2)
for mutant in offspring:
if random.random() < self.mutpb:
self.toolbox.mutate(mutant)
del mutant.fitness.values
# simple bound checking
for i in range(len(offspring)):
for j in range(len(offspring[i])):
if offspring[i][j] > 1:
offspring[i][j] = 1
if offspring[i][j] < -1:
offspring[i][j] = -1
self._model_count += len(
[ind for ind in offspring if not ind.fitness.values])
self.assign_fitnesses(
[ind for ind in offspring if not ind.fitness.valid])
offspring.sort(reverse=True, key=lambda x: x.fitness)
if len(self.halloffame) != 0:
# elitism- if none beat best so far it is reinserted
if offspring[0].fitness < self.halloffame[0].fitness:
offspring.insert(0, self.halloffame[0])
self.population[:] = offspring[:pop_size]
return | Updates population according to crossover and fitness criteria. |
def _initialize_pop(self, pop_size):
self.initialize_cma_es(pop_size)
self.toolbox.register("individual", self._make_individual)
self.toolbox.register("generate", self._generate,
self.toolbox.individual)
self.toolbox.register("population", tools.initRepeat,
list, self._initial_individual)
self.toolbox.register("update", self.update)
self.population = self.toolbox.population(n=pop_size)
self.assign_fitnesses(self.population)
self._model_count += len(self.population)
return | Generates the initial population and assigns fitnesses. |
def _initial_individual(self):
ind = creator.Individual(
[random.uniform(-1, 1)
for _ in range(len(self.value_means))])
return ind | Generates an individual with random parameters within bounds. |
def _update_pop(self, pop_size):
self.toolbox.generate()
# simple bound checking
for i in range(len(self.population)):
for j in range(len(self.population[i])):
if self.population[i][j] > 1:
self.population[i][j] = 1
if self.population[i][j] < -1:
self.population[i][j] = -1
self.assign_fitnesses(self.population)
self.toolbox.update(self.population)
self._model_count += len(self.population)
return | Updates population according to crossover and fitness criteria. |
def _make_individual(self, paramlist):
part = creator.Individual(paramlist)
part.ident = None
return part | Makes an individual particle. |
def initialize_cma_es(self, lambda_):
# Create a centroid as a numpy array
self.centroid = numpy.array([0] * len(self.value_means))
self.dim = len(self.centroid)
self.pc = numpy.zeros(self.dim)
self.ps = numpy.zeros(self.dim)
self.chiN = numpy.sqrt(self.dim) * (
1 - 1. / (4. * self.dim) + 1. / (21. * self.dim ** 2))
self.C = numpy.identity(self.dim)
self.diagD, self.B = numpy.linalg.eigh(self.C)
indx = numpy.argsort(self.diagD)
self.diagD = self.diagD[indx] ** 0.5
self.B = self.B[:, indx]
self.BD = self.B * self.diagD
self.cond = self.diagD[indx[-1]] / self.diagD[indx[0]]
self.lambda_ = lambda_
self.update_count = 0
self.compute_params()
return | A strategy that will keep track of the basic parameters.
Parameters
----------
centroid:
An iterable object that indicates where to start the
evolution.
parameter:
One or more parameter to pass to the strategy as
described in the following table, optional. |
def _generate(self, func):
arz = numpy.random.standard_normal((self.lambda_, self.dim))
arz = self.centroid + self.sigma * numpy.dot(arz, self.BD.T)
self.population = list(map(func, arz))
return | Generate a population of :math:`\lambda` individuals.
Notes
-----
Individuals are of type *ind_init* from the current strategy.
Parameters
----------
ind_init:
A function object that is able to initialize an
individual from a list. |
def number_of_mmols(code):
# If num_mmols is already known, return it
if mmols_numbers:
if code in mmols_numbers.keys():
mmol = mmols_numbers[code][0]
return mmol
counter = 1
while True:
pdbe_url = "http://www.ebi.ac.uk/pdbe/static/entry/download/{0}-assembly-{1}.cif.gz".format(code, counter)
r = requests.get(pdbe_url)
if r.status_code == 200:
counter += 1
else:
break
if counter == 1:
while True:
pdb_url = "http://www.rcsb.org/pdb/files/{0}.pdb{1}.gz".format(code.upper(), counter)
r = requests.get(pdb_url)
if r.status_code == 200 and r.encoding is None:
counter += 1
else:
break
if counter == 1:
pdb_url = "http://files.rcsb.org/download/{0}.pdb".format(code.upper())
r = requests.get(pdb_url)
if r.status_code == 200:
counter += 1
num_mmols = counter - 1
if num_mmols == 0:
raise ValueError('Could not access ANY .mmol files for {0}'.format(code))
return num_mmols | Number of .mmol files associated with code in the PDBE.
Notes
-----
This function makes a series of calls to the PDBE website using the requests module. This can make it slow!
Parameters
----------
code : str
PDB code.
Returns
-------
num_mmols : int
Raises
------
ValueError
If no .mmol files are found at all.
Could be due to erroneous input argument, or a problem with connecting to the PDBE. |
def get_cif(code, mmol_number, outfile=None):
pdbe_url = "http://www.ebi.ac.uk/pdbe/static/entry/download/{0}-assembly-{1}.cif.gz".format(code, mmol_number)
r = requests.get(pdbe_url)
if r.status_code == 200:
temp_gz = tempfile.NamedTemporaryFile()
temp_gz.write(r.content)
with gzip.open(temp_gz.name, 'rb') as foo:
cif_string = foo.read().decode()
else:
print("Could not download cif file for {0}".format(code))
return None
# Write to file.
if outfile and cif_string:
with open(outfile, 'w') as foo:
foo.write(cif_string)
return cif_string | Parameters
----------
code : str
PDB code.
mmol_number : int
mmol number (biological assembly number) of file to download. Numbers from PDBe.
If None, defaults to the preferred biological assembly listed for code on the PDBe.
outfile : str
Filepath. Writes returned value to this file.
Returns
-------
cif_string : str, or None
Content of the cif file as a string.
None if unable to download the cif_file from the pdbe site. |
def get_mmcif(code, outfile=None):
pdbe_url = "http://www.ebi.ac.uk/pdbe/entry-files/download/{0}.cif".format(code)
r = requests.get(pdbe_url)
if r.status_code == 200:
mmcif_string = r.text
else:
print("Could not download mmcif file for {0}".format(code))
mmcif_string = None
# Write to file.
if outfile and mmcif_string:
with open(outfile, 'w') as foo:
foo.write(mmcif_string)
return mmcif_string | Get mmcif file associated with code from PDBE.
Parameters
----------
code : str
PDB code.
outfile : str
Filepath. Writes returned value to this file.
Returns
-------
mmcif_file : str
Filepath to the mmcif file. |
def pdbe_status_code(code):
url = 'http://www.ebi.ac.uk/pdbe/entry-files/download/{0}_1.mmol'.format(code)
r = requests.head(url=url)
return r.status_code | Check if a PDB code has structure files on the PDBE site.
Parameters
----------
code : str
PDB code to check for on PDBE.
Returns
-------
status_code : int
HTTP status code of PDBE url associated with input code. |
def current_codes_from_pdb():
url = 'http://www.rcsb.org/pdb/rest/getCurrent'
r = requests.get(url)
if r.status_code == 200:
pdb_codes = [x.lower() for x in r.text.split('"') if len(x) == 4]
else:
print('Request for {0} failed with status code {1}'.format(url, r.status_code))
return
return pdb_codes | Get list of all PDB codes currently listed in the PDB.
Returns
-------
pdb_codes : list(str)
List of PDB codes (in lower case). |
def local_pdb_codes(data_dir=None):
if not data_dir:
data_dir = global_settings["structural_database"]["path"]
p = Path(data_dir)
pdb_parent_dirs = [x for x in p.iterdir() if x.is_dir() and len(x.parts[-1]) == 2]
pdb_folders = [x for test in pdb_parent_dirs for x in test.iterdir() if x.is_dir()]
pdb_code_list = [x.parts[-1] for x in pdb_folders if len(x.parts[-1]) == 4]
return pdb_code_list | Get list of PDB codes stored in a folder (FileSystem folder hierarchy expected within data_dir).
If no folder is specified, use the database_dir defined in settings.json.
Parameters
----------
data_dir: str
Filepath to a folder containing the PDB folder hierarchy (eg data_dir/eb/2ebo)
Returns
-------
pdb_code_list : list(str)
PDB codes present in data_dir. |
def make_code_obsolete(code):
fs = FileSystem(code=code)
if os.path.exists(fs.parent_dir):
# Move to obsolete folder
destination_dir = os.path.join(fs._data_dir, 'obsolete', code[1:3], code)
if os.path.exists(destination_dir):
shutil.rmtree(destination_dir)
shutil.move(fs.parent_dir, destination_dir)
# Remove containing (two-letter) folder if empty, else pass.
two_letter_dir = os.path.dirname(fs.parent_dir)
try:
os.rmdir(two_letter_dir)
except OSError:
pass
return | Moves folders associated with PDB code to obsolete folder in global_settings["database_dir"]
Parameters
----------
code : str
PDB accession code
Returns
-------
None |
def mmols(self):
mmols_dict = {}
mmol_dir = os.path.join(self.parent_dir, 'structures')
if not os.path.exists(mmol_dir):
os.makedirs(mmol_dir)
mmol_file_names = ['{0}_{1}.mmol'.format(self.code, i) for i in range(1, self.number_of_mmols + 1)]
mmol_files = [os.path.join(mmol_dir, x) for x in mmol_file_names]
for i, mmol_file in enumerate(mmol_files):
mmols_dict[i + 1] = mmol_file
# If file does not exist yet, download the mmol and write to mmol_file.
if not os.path.exists(mmol_file):
get_mmol(self.code, mmol_number=i + 1, outfile=mmol_file)
return mmols_dict | Dict of filepaths for all mmol files associated with code.
Notes
-----
Downloads mmol files if not already present.
Returns
-------
mmols_dict : dict, or None.
Keys : int
mmol number
Values : str
Filepath for the corresponding mmol file. |
def dssps(self):
dssps_dict = {}
dssp_dir = os.path.join(self.parent_dir, 'dssp')
if not os.path.exists(dssp_dir):
os.makedirs(dssp_dir)
for i, mmol_file in self.mmols.items():
dssp_file_name = '{0}.dssp'.format(os.path.basename(mmol_file))
dssp_file = os.path.join(dssp_dir, dssp_file_name)
if not os.path.exists(dssp_file):
dssp_out = run_dssp(pdb=mmol_file, path=True, outfile=dssp_file)
if len(dssp_out) == 0:
raise Warning("dssp file {0} is empty".format(dssp_file))
dssps_dict[i] = dssp_file
return dssps_dict | Dict of filepaths for all dssp files associated with code.
Notes
-----
Runs dssp and stores writes output to files if not already present.
Also downloads mmol files if not already present.
Calls isambard.external_programs.dssp and so needs dssp to be installed.
Returns
-------
dssps_dict : dict, or None.
Keys : int
mmol number
Values : str
Filepath for the corresponding dssp file.
Raises
------
Warning
If any of the dssp files are empty. |
def fastas(self, download=False):
fastas_dict = {}
fasta_dir = os.path.join(self.parent_dir, 'fasta')
if not os.path.exists(fasta_dir):
os.makedirs(fasta_dir)
for i, mmol_file in self.mmols.items():
mmol_name = os.path.basename(mmol_file)
fasta_file_name = '{0}.fasta'.format(mmol_name)
fasta_file = os.path.join(fasta_dir, fasta_file_name)
if not os.path.exists(fasta_file):
if download:
pdb_url = "http://www.rcsb.org/pdb/files/fasta.txt?structureIdList={0}".format(self.code.upper())
r = requests.get(pdb_url)
if r.status_code == 200:
fasta_string = r.text
else:
fasta_string = None
else:
a = convert_pdb_to_ampal(mmol_file)
# take first object if AmpalContainer (i.e. NMR structure).
if type(a) == AmpalContainer:
a = a[0]
fasta_string = a.fasta
with open(fasta_file, 'w') as foo:
foo.write(fasta_string)
fastas_dict[i] = fasta_file
return fastas_dict | Dict of filepaths for all fasta files associated with code.
Parameters
----------
download : bool
If True, downloads the fasta file from the PDB.
If False, uses the ampal Protein.fasta property
Defaults to False - this is definitely the recommended behaviour.
Notes
-----
Calls self.mmols, and so downloads mmol files if not already present.
See .fasta property of isambard.ampal.base_ampal.Protein for more information.
Returns
-------
fastas_dict : dict, or None.
Keys : int
mmol number
Values : str
Filepath for the corresponding fasta file. |
def mmcif(self):
mmcif_dir = os.path.join(self.parent_dir, 'mmcif')
if not os.path.exists(mmcif_dir):
os.makedirs(mmcif_dir)
mmcif_file_name = '{0}.cif'.format(self.code)
mmcif_file = os.path.join(mmcif_dir, mmcif_file_name)
if not os.path.exists(mmcif_file):
get_mmcif(code=self.code, outfile=mmcif_file)
return mmcif_file | Filepath for mmcif file associated with code.
Notes
-----
Downloads mmcif file if not already present.
Returns
-------
mmcif_file : str
Filepath for the mmcif file. |
def categories(self):
category_dict = {}
for ligand in self:
if ligand.category in category_dict:
category_dict[ligand.category].append(ligand)
else:
category_dict[ligand.category] = [ligand]
return category_dict | Returns the categories of `Ligands` in `LigandGroup`. |
def category_count(self):
category_dict = self.categories
count_dict = {category: len(
category_dict[category]) for category in category_dict}
return count_dict | Returns the number of categories in `categories`. |
def sequence_molecular_weight(seq):
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
return sum(
[residue_mwt[aa] * n for aa, n in Counter(seq).items()]) + water_mass | Returns the molecular weight of the polypeptide sequence.
Notes
-----
Units = Daltons
Parameters
----------
seq : str
Sequence of amino acids. |
def sequence_molar_extinction_280(seq):
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
return sum([residue_ext_280[aa] * n for aa, n in Counter(seq).items()]) | Returns the molar extinction coefficient of the sequence at 280 nm.
Notes
-----
Units = M/cm
Parameters
----------
seq : str
Sequence of amino acids. |
def partial_charge(aa, pH):
difference = pH - residue_pka[aa]
if residue_charge[aa] > 0:
difference *= -1
ratio = (10 ** difference) / (1 + 10 ** difference)
return ratio | Calculates the partial charge of the amino acid.
Parameters
----------
aa : str
Amino acid single-letter code.
pH : float
pH of interest. |
def sequence_charge(seq, pH=7.4):
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
adj_protein_charge = sum(
[partial_charge(aa, pH) * residue_charge[aa] * n
for aa, n in Counter(seq).items()])
adj_protein_charge += (
partial_charge('N-term', pH) * residue_charge['N-term'])
adj_protein_charge += (
partial_charge('C-term', pH) * residue_charge['C-term'])
return adj_protein_charge | Calculates the total charge of the input polypeptide sequence.
Parameters
----------
seq : str
Sequence of amino acids.
pH : float
pH of interest. |
def charge_series(seq, granularity=0.1):
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
ph_range = numpy.arange(1, 13, granularity)
charge_at_ph = [sequence_charge(seq, ph) for ph in ph_range]
return ph_range, charge_at_ph | Calculates the charge for pH 1-13.
Parameters
----------
seq : str
Sequence of amino acids.
granularity : float, optional
Granularity of pH values i.e. if 0.1 pH = [1.0, 1.1, 1.2...] |
def sequence_isoelectric_point(seq, granularity=0.1):
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
ph_range, charge_at_ph = charge_series(seq, granularity)
abs_charge_at_ph = [abs(ch) for ch in charge_at_ph]
pi_index = min(enumerate(abs_charge_at_ph), key=lambda x: x[1])[0]
return ph_range[pi_index] | Calculates the isoelectric point of the sequence for ph 1-13.
Parameters
----------
seq : str
Sequence of amino acids.
granularity : float, optional
Granularity of pH values i.e. if 0.1 pH = [1.0, 1.1, 1.2...] |
def measure_sidechain_torsion_angles(residue, verbose=True):
chi_angles = []
aa = residue.mol_code
if aa not in side_chain_dihedrals:
if verbose:
print("Amino acid {} has no known side-chain dihedral".format(aa))
else:
for set_atoms in side_chain_dihedrals[aa]:
required_for_dihedral = set_atoms[0:4]
try:
angle = dihedral(
residue[required_for_dihedral[0]]._vector,
residue[required_for_dihedral[1]]._vector,
residue[required_for_dihedral[2]]._vector,
residue[required_for_dihedral[3]]._vector)
chi_angles.append(angle)
except KeyError as k:
print("{0} atom missing from residue {1} {2} "
"- can't assign dihedral".format(
k, residue.mol_code, residue.id))
chi_angles.append(None)
return chi_angles | Calculates sidechain dihedral angles for a residue
Parameters
----------
residue : [ampal.Residue]
`Residue` object.
verbose : bool, optional
If `true`, tells you when a residue does not have any known
dihedral angles to measure.
Returns
-------
chi_angles: [float]
Length depends on residue type, in range [-pi, pi]
[0] = chi1 [if applicable]
[1] = chi2 [if applicable]
[2] = chi3 [if applicable]
[3] = chi4 [if applicable] |
def cc_to_local_params(pitch, radius, oligo):
rloc = numpy.sin(numpy.pi / oligo) * radius
alpha = numpy.arctan((2 * numpy.pi * radius) / pitch)
alphaloc = numpy.cos((numpy.pi / 2) - ((numpy.pi) / oligo)) * alpha
pitchloc = (2 * numpy.pi * rloc) / numpy.tan(alphaloc)
return pitchloc, rloc, numpy.rad2deg(alphaloc) | Returns local parameters for an oligomeric assembly.
Parameters
----------
pitch : float
Pitch of assembly
radius : float
Radius of assembly
oligo : int
Oligomeric state of assembly
Returns
-------
pitchloc : float
Local pitch of assembly (between 2 adjacent component helices)
rloc : float
Local radius of assembly
alphaloc : float
Local pitch-angle of assembly |
def residues_per_turn(p):
cas = p.get_reference_coords()
prim_cas = p.primitive.coordinates
dhs = [abs(dihedral(cas[i], prim_cas[i], prim_cas[i + 1], cas[i + 1]))
for i in range(len(prim_cas) - 1)]
rpts = [360.0 / dh for dh in dhs]
rpts.append(None)
return rpts | The number of residues per turn at each Monomer in the Polymer.
Notes
-----
Each element of the returned list is the number of residues
per turn, at a point on the Polymer primitive. Calculated using
the relative positions of the CA atoms and the primitive of the
Polymer. Element i is the calculated from the dihedral angle using
the CA atoms of the Monomers with indices [i, i+1] and the
corresponding atoms of the primitive. The final value is None.
Parameters
----------
p : ampal.Polypeptide
`Polypeptide` from which residues per turn will be calculated.
Returns
-------
rpts : [float]
Residue per turn values. |
def crick_angles(p, reference_axis, tag=True, reference_axis_name='ref_axis'):
if not len(p) == len(reference_axis):
raise ValueError(
"The reference axis must contain the same number of points"
" as the Polymer primitive.")
prim_cas = p.primitive.coordinates
p_cas = p.get_reference_coords()
ref_points = reference_axis.coordinates
cr_angles = [
dihedral(ref_points[i], prim_cas[i], prim_cas[i + 1], p_cas[i])
for i in range(len(prim_cas) - 1)]
cr_angles.append(None)
if tag:
p.tags[reference_axis_name] = reference_axis
monomer_tag_name = 'crick_angle_{0}'.format(reference_axis_name)
for m, c in zip(p._monomers, cr_angles):
m.tags[monomer_tag_name] = c
return cr_angles | Returns the Crick angle for each CA atom in the `Polymer`.
Notes
-----
The final value is in the returned list is `None`, since the angle
calculation requires pairs of points on both the primitive and
reference_axis.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
reference_axis : list(numpy.array or tuple or list)
Length of reference_axis must equal length of the Polymer.
Each element of reference_axis represents a point in R^3.
tag : bool, optional
If `True`, tags the `Polymer` with the reference axis coordinates
and each Residue with its Crick angle. Crick angles are stored
at the Residue level, but are calculated using the CA atom.
reference_axis_name : str, optional
Used to name the keys in tags at Chain and Residue level.
Returns
-------
cr_angles : list(float)
The crick angles in degrees for each CA atom of the Polymer.
Raises
------
ValueError
If the Polymer and the reference_axis have unequal length. |
def alpha_angles(p, reference_axis, tag=True, reference_axis_name='ref_axis'):
if not len(p) == len(reference_axis):
raise ValueError(
"The reference axis must contain the same number of points "
"as the Polymer primitive.")
prim_cas = p.primitive.coordinates
ref_points = reference_axis.coordinates
alphas = [abs(dihedral(ref_points[i + 1], ref_points[i], prim_cas[i], prim_cas[i + 1]))
for i in range(len(prim_cas) - 1)]
alphas.append(None)
if tag:
p.tags[reference_axis_name] = reference_axis
monomer_tag_name = 'alpha_angle_{0}'.format(reference_axis_name)
for m, a in zip(p._monomers, alphas):
m.tags[monomer_tag_name] = a
return alphas | Alpha angle calculated using points on the primitive of helix and axis.
Notes
-----
The final value is None, since the angle calculation requires pairs
of points along the primitive and axis. This is a generalisation
of the calculation used to measure the tilt of a helix in a
coiled-coil with respect to the central axis of the coiled coil.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
reference_axis : list(numpy.array or tuple or list)
Length of reference_axis must equal length of the Polymer.
Each element of reference_axis represents a point in R^3.
tag : bool, optional
If `True`, tags the Chain with the reference axis coordinates
and each Residue with its alpha angle. Alpha angles are stored
at the Residue level, but are calculated using the CA atom.
reference_axis_name : str, optional
Used to name the keys in tags at Chain and Residue level.
Returns
-------
alphas : list of float
The alpha angle for the Polymer at each point of its primitive,
in degrees.
Raises
------
ValueError
If the Polymer and the reference_axis have unequal length. |
def polypeptide_vector(p, start_index=0, end_index=-1, unit=True):
if len(p) <= 1:
raise ValueError(
"Polymer should have length greater than 1. Polymer length = {0}".format(len(p)))
try:
prim_cas = p.primitive.coordinates
direction_vector = prim_cas[end_index] - prim_cas[start_index]
except ValueError:
direction_vector = p[end_index]['CA'].array - \
p[start_index]['CA'].array
if unit:
direction_vector = unit_vector(direction_vector)
return direction_vector | Vector along the Chain primitive (default is from N-terminus to C-terminus).
Notes
-----
`start_index` and `end_index` can be changed to examine smaller
sections of the Chain, or reversed to change the direction of
the vector.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
start_index : int, optional
Default is 0 (start at the N-terminus of the Chain)
end_index : int, optional
Default is -1 (start at the C-terminus of the Chain)
unit : bool
If True, the vector returned has a magnitude of 1.
Returns
-------
vector : a numpy.array
vector has shape (1, 3) |
def reference_axis_from_chains(chains):
if not len(set([len(x) for x in chains])) == 1:
raise ValueError("All chains must be of the same length")
# First array in coords is the primitive coordinates of the first chain.
# The orientation of the first chain orients the reference_axis.
coords = [numpy.array(chains[0].primitive.coordinates)]
orient_vector = polypeptide_vector(chains[0])
# Append the coordinates for the remaining chains, reversing the
# direction in antiparallel arrangements.
for i, c in enumerate(chains[1:]):
if is_acute(polypeptide_vector(c), orient_vector):
coords.append(numpy.array(c.primitive.coordinates))
else:
coords.append(numpy.flipud(numpy.array(c.primitive.coordinates)))
# Average across the x, y and z coordinates to get the reference_axis
# coordinates
reference_axis = numpy.mean(numpy.array(coords), axis=0)
return Primitive.from_coordinates(reference_axis) | Average coordinates from a set of primitives calculated from Chains.
Parameters
----------
chains : list(Chain)
Returns
-------
reference_axis : numpy.array
The averaged (x, y, z) coordinates of the primitives for
the list of Chains. In the case of a coiled coil barrel,
this would give the central axis for calculating e.g. Crick
angles.
Raises
------
ValueError :
If the Chains are not all of the same length. |
def flip_reference_axis_if_antiparallel(
p, reference_axis, start_index=0, end_index=-1):
p_vector = polypeptide_vector(
p, start_index=start_index, end_index=end_index)
if is_acute(p_vector,
reference_axis[end_index] - reference_axis[start_index]):
reference_axis = numpy.flipud(reference_axis)
return reference_axis | Flips reference axis if direction opposes the direction of the `Polymer`.
Notes
-----
If the angle between the vector for the Polymer and the vector
for the reference_axis is > 90 degrees, then the reference axis
is reversed. This is useful to run before running
polymer_to_reference_axis_distances, crick_angles, or alpha_angles.
For more information on the start and end indices, see chain_vector.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
reference_axis : list(numpy.array or tuple or list)
Length of reference_axis must equal length of the Polymer.
Each element of reference_axis represents a point in R^3.
start_index : int, optional
Default is 0 (start at the N-terminus of the Polymer)
end_index : int, optional
Default is -1 (start at the C-terminus of the Polymer)
Returns
-------
reference_axis : list(numpy.array or tuple or list) |
def make_primitive(cas_coords, window_length=3):
if len(cas_coords) >= window_length:
primitive = []
count = 0
for _ in cas_coords[:-(window_length - 1)]:
group = cas_coords[count:count + window_length]
average_x = sum([x[0] for x in group]) / window_length
average_y = sum([y[1] for y in group]) / window_length
average_z = sum([z[2] for z in group]) / window_length
primitive.append(numpy.array([average_x, average_y, average_z]))
count += 1
else:
raise ValueError(
'A primitive cannot be generated for {0} atoms using a (too large) '
'averaging window_length of {1}.'.format(
len(cas_coords), window_length))
return primitive | Calculates running average of cas_coords with a fixed averaging window_length.
Parameters
----------
cas_coords : list(numpy.array or float or tuple)
Each element of the list must have length 3.
window_length : int, optional
The number of coordinate sets to average each time.
Returns
-------
s_primitive : list(numpy.array)
Each array has length 3.
Raises
------
ValueError
If the length of cas_coords is smaller than the window_length. |
def make_primitive_smoothed(cas_coords, smoothing_level=2):
try:
s_primitive = make_primitive(cas_coords)
for x in range(smoothing_level):
s_primitive = make_primitive(s_primitive)
except ValueError:
raise ValueError(
'Smoothing level {0} too high, try reducing the number of rounds'
' or give a longer Chain (curent length = {1}).'.format(
smoothing_level, len(cas_coords)))
return s_primitive | Generates smoothed primitive from a list of coordinates.
Parameters
----------
cas_coords : list(numpy.array or float or tuple)
Each element of the list must have length 3.
smoothing_level : int, optional
Number of times to run the averaging.
Returns
-------
s_primitive : list(numpy.array)
Each array has length 3.
Raises
------
ValueError
If the smoothing level is too great compared to the length
of cas_coords. |
def extend(self, ampal_container):
if isinstance(ampal_container, AmpalContainer):
self._ampal_objects.extend(ampal_container)
else:
raise TypeError(
'Only AmpalContainer objects may be merged with '
'an AmpalContainer.')
return | Extends an `AmpalContainer` with another `AmpalContainer`. |
def pdb(self):
header_title = '{:<80}\n'.format('HEADER {}'.format(self.id))
data_type = '{:<80}\n'.format('EXPDTA ISAMBARD Model')
pdb_strs = []
for ampal in self:
if isinstance(ampal, Assembly):
pdb_str = ampal.make_pdb(header=False, footer=False)
else:
pdb_str = ampal.make_pdb()
pdb_strs.append(pdb_str)
merged_strs = 'ENDMDL\n'.join(pdb_strs) + 'ENDMDL\n'
merged_pdb = ''.join([header_title, data_type, merged_strs])
return merged_pdb | Compiles the PDB strings for each state into a single file. |
def sort_by_tag(self, tag):
return AmpalContainer(sorted(self, key=lambda x: x.tags[tag])) | Sorts the `AmpalContainer` by a tag on the component objects.
Parameters
----------
tag : str
Key of tag used for sorting. |
def append(self, item):
if isinstance(item, Polymer):
self._molecules.append(item)
else:
raise TypeError(
'Only Polymer objects can be appended to an Assembly.')
return | Adds a `Polymer` to the `Assembly`.
Raises
------
TypeError
Raised if other is any type other than `Polymer`. |
def extend(self, assembly):
if isinstance(assembly, Assembly):
self._molecules.extend(assembly)
else:
raise TypeError(
'Only Assembly objects may be merged with an Assembly.')
return | Extends the `Assembly` with the contents of another `Assembly`.
Raises
------
TypeError
Raised if other is any type other than `Assembly`. |
def get_monomers(self, ligands=True, pseudo_group=False):
base_filters = dict(ligands=ligands, pseudo_group=pseudo_group)
restricted_mol_types = [x[0] for x in base_filters.items() if not x[1]]
in_groups = [x for x in self.filter_mol_types(restricted_mol_types)]
monomers = itertools.chain(
*(p.get_monomers(ligands=ligands) for p in in_groups))
return monomers | Retrieves all the `Monomers` from the `Assembly` object.
Parameters
----------
ligands : bool, optional
If `true`, will include ligand `Monomers`.
pseudo_group : bool, optional
If `True`, will include pseudo atoms. |
def get_ligands(self, solvent=True):
if solvent:
ligand_list = [x for x in self.get_monomers()
if isinstance(x, Ligand)]
else:
ligand_list = [x for x in self.get_monomers() if isinstance(
x, Ligand) and not x.is_solvent]
return LigandGroup(monomers=ligand_list) | Retrieves all ligands from the `Assembly`.
Parameters
----------
solvent : bool, optional
If `True`, solvent molecules will be included. |
def get_atoms(self, ligands=True, pseudo_group=False, inc_alt_states=False):
atoms = itertools.chain(
*(list(m.get_atoms(inc_alt_states=inc_alt_states))
for m in self.get_monomers(ligands=ligands,
pseudo_group=pseudo_group)))
return atoms | Flat list of all the `Atoms` in the `Assembly`.
Parameters
----------
ligands : bool, optional
Include ligand `Atoms`.
pseudo_group : bool, optional
Include pseudo_group `Atoms`.
inc_alt_states : bool, optional
Include alternate sidechain conformations.
Returns
-------
atoms : itertools.chain
All the `Atoms` as a iterator. |
def is_within(self, cutoff_dist, point, ligands=True):
return find_atoms_within_distance(self.get_atoms(ligands=ligands), cutoff_dist, point) | Returns all atoms in AMPAL object within `cut-off` distance from the `point`. |
def relabel_polymers(self, labels=None):
if labels:
if len(self._molecules) == len(labels):
for polymer, label in zip(self._molecules, labels):
polymer.id = label
else:
raise ValueError('Number of polymers ({}) and number of labels ({}) must be equal.'.format(
len(self._molecules), len(labels)))
else:
for i, polymer in enumerate(self._molecules):
polymer.id = chr(i + 65)
return | Relabels the component Polymers either in alphabetical order or using a list of labels.
Parameters
----------
labels : list, optional
A list of new labels.
Raises
------
ValueError
Raised if the number of labels does not match the number of component Polymer objects. |
def relabel_atoms(self, start=1):
counter = start
for atom in self.get_atoms(ligands=True):
atom.id = counter
counter += 1
return | Relabels all Atoms in numerical order, offset by the start parameter.
Parameters
----------
start : int, optional
Defines an offset for the labelling. |
def make_pdb(self, ligands=True, alt_states=False, pseudo_group=False, header=True, footer=True):
base_filters = dict(ligands=ligands, pseudo_group=pseudo_group)
restricted_mol_types = [x[0] for x in base_filters.items() if not x[1]]
in_groups = [x for x in self.filter_mol_types(restricted_mol_types)]
pdb_header = 'HEADER {:<80}\n'.format(
'ISAMBARD Model {}'.format(self.id)) if header else ''
pdb_body = ''.join([x.make_pdb(
alt_states=alt_states, inc_ligands=ligands) + '{:<80}\n'.format('TER') for x in in_groups])
pdb_footer = '{:<80}\n'.format('END') if footer else ''
pdb_str = ''.join([pdb_header, pdb_body, pdb_footer])
return pdb_str | Generates a PDB string for the Assembly.
Parameters
----------
ligands : bool, optional
If `True`, will include ligands in the output.
alt_states : bool, optional
If `True`, will include alternate conformations in the output.
pseudo_group : bool, optional
If `True`, will include pseudo atoms in the output.
header : bool, optional
If `True` will write a header for output.
footer : bool, optional
If `True` will write a footer for output.
Returns
-------
pdb_str : str
String of the pdb for the Assembly. Generated by collating
Polymer().pdb calls for the component Polymers. |
def backbone(self):
bb_molecules = [
p.backbone for p in self._molecules if hasattr(p, 'backbone')]
bb_assembly = Assembly(bb_molecules, assembly_id=self.id)
return bb_assembly | Generates a new `Assembly` containing only the backbone atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Sequence data is retained, but only the main chain atoms are
retained.
Returns
-------
bb_assembly : ampal.Protein
`Assembly` containing only the backbone atoms of the original
`Assembly`. |
def primitives(self):
prim_molecules = [
p.primitive for p in self._molecules if hasattr(p, 'primitive')]
prim_assembly = Assembly(molecules=prim_molecules, assembly_id=self.id)
return prim_assembly | Generates a new `Assembly` containing the primitives of each Polymer.
Notes
-----
Metadata is not currently preserved from the parent object.
Returns
-------
prim_assembly : ampal.Protein
`Assembly` containing only the primitives of the `Polymers`
in the original `Assembly`. |
def helices(self):
hel_molecules = list(itertools.chain(
*[p.helices._molecules
for p in self._molecules if hasattr(p, 'helices')]))
hel_assembly = Assembly(molecules=hel_molecules, assembly_id=self.id)
return hel_assembly | Generates new `Assembly` containing just α-helices.
Notes
-----
Metadata is not currently preserved from the parent object.
Returns
-------
hel_assembly : ampal.Protein
`Assembly` containing only the α-helices of the original `Assembly`. |
def strands(self):
strand_molecules = list(itertools.chain(
*[p.strands._molecules for p in self._molecules if hasattr(p, 'strands')]))
strand_assembly = Assembly(
molecules=strand_molecules, assembly_id=self.id)
return strand_assembly | Generates a new `Assembly` containing only the β-strands.
Notes
-----
Metadata is not currently preserved from the parent object.
Returns
-------
strand_assembly : ampal.Protein
`Assembly` containing only the β-strands of the original `Assembly`. |
def sequences(self):
seqs = [x.sequence for x in self._molecules if hasattr(x, 'sequence')]
return seqs | Returns the sequence of each `Polymer` in the `Assembly` as a list.
Returns
-------
sequences : [str]
List of sequences. |
def fasta(self):
fasta_str = ''
max_line_length = 79
for p in self._molecules:
if hasattr(p, 'sequence'):
fasta_str += '>{0}:{1}|PDBID|CHAIN|SEQUENCE\n'.format(
self.id.upper(), p.id)
seq = p.sequence
split_seq = [seq[i: i + max_line_length]
for i in range(0, len(seq), max_line_length)]
for seq_part in split_seq:
fasta_str += '{0}\n'.format(seq_part)
return fasta_str | Generates a FASTA string for the `Assembly`.
Notes
-----
Explanation of FASTA format: https://en.wikipedia.org/wiki/FASTA_format
Recommendation that all lines of text be shorter than 80
characters is adhered to. Format of PDBID|CHAIN|SEQUENCE is
consistent with files downloaded from the PDB. Uppercase
PDBID used for consistency with files downloaded from the PDB.
Useful for feeding into cdhit and then running sequence clustering.
Returns
-------
fasta_str : str
String of the fasta file for the `Assembly`. |
def get_interaction_energy(self, assign_ff=True, ff=None, mol2=False,
force_ff_assign=False):
if not ff:
ff = global_settings['buff']['force_field']
if assign_ff:
for molecule in self._molecules:
if hasattr(molecule, 'update_ff'):
molecule.update_ff(
ff, mol2=mol2, force_ff_assign=force_ff_assign)
else:
raise AttributeError(
'The following molecule does not have a update_ff'
'method:\n{}\nIf this is a custom molecule type it'
'should inherit from BaseAmpal:'.format(molecule))
interactions = find_inter_ampal(self, ff.distance_cutoff)
buff_score = score_interactions(interactions, ff)
return buff_score | Calculates the interaction energy of the AMPAL object.
Parameters
----------
assign_ff: bool, optional
If true the force field will be updated if required.
ff: BuffForceField, optional
The force field to be used for scoring.
mol2: bool, optional
If true, mol2 style labels will also be used.
force_ff_assign: bool, optional
If true, the force field will be completely reassigned,
ignoring the cached parameters.
Returns
-------
buff_score: buff.BUFFScore
A BUFFScore object with information about each of the
interactions and the `Atoms` involved.
Raises
------
AttributeError
Raise if a component molecule does not have an `update_ff`
method. |
def repack_all(self):
non_na_sequences = [s for s in self.sequences if ' ' not in s]
self.pack_new_sequences(non_na_sequences)
return | Repacks the side chains of all Polymers in the Assembly. |
def tag_secondary_structure(self, force=False):
for polymer in self._molecules:
if polymer.molecule_type == 'protein':
polymer.tag_secondary_structure(force=force)
return | Tags each `Monomer` in the `Assembly` with it's secondary structure.
Notes
-----
DSSP must be available to call. Check by running
`isambard.external_programs.dssp.test_dssp`. If DSSP is not
available, please follow instruction here to add it:
https://github.com/woolfson-group/isambard#external-programs
For more information on DSSP see [1].
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If True the tag will be run even if `Monomers` are already tagged |
def tag_dssp_solvent_accessibility(self, force=False):
for polymer in self._molecules:
polymer.tag_dssp_solvent_accessibility(force=force)
return | Tags each `Monomer` in the Assembly with its solvent accessibility.
Notes
-----
For more about DSSP's solvent accessibilty metric, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
DSSP must be available to call. Check by running
`isambard.external_programs.dssp.test_dssp`. If DSSP is not
available, please follow instruction here to add it:
https://github.com/woolfson-group/isambard#external-programs
For more information on DSSP see [1].
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If True the tag will be run even if Monomers are already tagged |
def tag_torsion_angles(self, force=False):
for polymer in self._molecules:
if polymer.molecule_type == 'protein':
polymer.tag_torsion_angles(force=force)
return | Tags each `Monomer` in the `Assembly` with its torsion angles.
Parameters
----------
force : bool, optional
If `True`, the tag will be run even if `Monomers` are already
tagged. |
def tag_ca_geometry(self, force=False, reference_axis=None,
reference_axis_name='ref_axis'):
for polymer in self._molecules:
if polymer.molecule_type == 'protein':
polymer.tag_ca_geometry(
force=force, reference_axis=reference_axis,
reference_axis_name=reference_axis_name)
return | Tags each `Monomer` in the `Assembly` with its helical geometry.
Parameters
----------
force : bool, optional
If True the tag will be run even if `Monomers` are already tagged.
reference_axis : list(numpy.array or tuple or list), optional
Coordinates to feed to geometry functions that depend on
having a reference axis.
reference_axis_name : str, optional
Used to name the keys in tags at `Chain` and `Residue` level. |
def tag_atoms_unique_ids(self, force=False):
tagged = ['unique_id' in x.tags.keys() for x in self.get_atoms()]
if (not all(tagged)) or force:
for m in self.get_monomers():
for atom_type, atom in m.atoms.items():
atom.tags['unique_id'] = (m.unique_id, atom_type)
return | Tags each Atom in the Assembly with its unique_id.
Notes
-----
The unique_id for each atom is a tuple (a double). `unique_id[0]`
is the unique_id for its parent `Monomer` (see `Monomer.unique_id`
for more information). `unique_id[1]` is the atom_type in the
`Assembly` as a string, e.g. 'CA', 'CD2'.
Parameters
----------
force : bool, optional
If True the tag will be run even if Atoms are already tagged.
If False, only runs if at least one Atom is not tagged. |
def align_nab(tar, ref):
rot_trans_1 = find_transformations(
tar['N'].array, tar['CA'].array, ref['N'].array, ref['CA'].array)
apply_trans_rot(tar, *rot_trans_1)
rot_ang_ca_cb = dihedral(tar['CB'], ref['CA'], ref['N'], ref['CB'])
tar.rotate(rot_ang_ca_cb, ref['N'].array - ref['CA'].array, ref['N'].array)
return | Aligns the N-CA and CA-CB vector of the target monomer.
Parameters
----------
tar: ampal.Residue
The residue that will be aligned to the reference.
ref: ampal.Residue
The reference residue for the alignment. |
def apply_trans_rot(ampal, translation, angle, axis, point, radians=False):
if not numpy.isclose(angle, 0.0):
ampal.rotate(angle=angle, axis=axis, point=point, radians=radians)
ampal.translate(vector=translation)
return | Applies a translation and rotation to an AMPAL object. |
def find_ss_regions_polymer(polymer, ss):
if isinstance(ss, str):
ss = [ss[:]]
tag_key = 'secondary_structure'
monomers = [x for x in polymer if tag_key in x.tags.keys()]
if len(monomers) == 0:
return Assembly()
if (len(ss) == 1) and (all([m.tags[tag_key] == ss[0] for m in monomers])):
return Assembly(polymer)
previous_monomer = None
fragment = Polypeptide(ampal_parent=polymer)
fragments = Assembly()
poly_id = 0
for monomer in monomers:
current_monomer = monomer.tags[tag_key]
if (current_monomer == previous_monomer) or (not previous_monomer):
fragment.append(monomer)
else:
if previous_monomer in ss:
fragment.tags[tag_key] = monomer.tags[tag_key]
fragment.id = chr(poly_id + 65)
fragments.append(fragment)
poly_id += 1
fragment = Polypeptide(ampal_parent=polymer)
fragment.append(monomer)
previous_monomer = monomer.tags[tag_key]
return fragments | Returns an `Assembly` of regions tagged as secondary structure.
Parameters
----------
polymer : Polypeptide
`Polymer` object to be searched secondary structure regions.
ss : list
List of secondary structure tags to be separate i.e. ['H']
would return helices, ['H', 'E'] would return helices
and strands.
Returns
-------
fragments : Assembly
`Assembly` containing a `Polymer` for each region of specified
secondary structure. |
def flat_list_to_polymer(atom_list, atom_group_s=4):
atom_labels = ['N', 'CA', 'C', 'O', 'CB']
atom_elements = ['N', 'C', 'C', 'O', 'C']
atoms_coords = [atom_list[x:x + atom_group_s]
for x in range(0, len(atom_list), atom_group_s)]
atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)]
for y in atoms_coords]
if atom_group_s == 5:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'ALA')
for x in atoms]
elif atom_group_s == 4:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'GLY')
for x in atoms]
else:
raise ValueError(
'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.')
polymer = Polypeptide(monomers=monomers)
return polymer | Takes a flat list of atomic coordinates and converts it to a `Polymer`.
Parameters
----------
atom_list : [Atom]
Flat list of coordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coords converted `Monomers`.
Raises
------
ValueError
Raised if `atom_group_s` != 4 or 5 |
def align(target, mobile, target_i=0, mobile_i=0):
# First, align N->CA vectors.
s1, e1, s2, e2 = [x._vector
for x in [mobile[mobile_i]['N'], mobile[mobile_i]['CA'],
target[target_i]['N'], target[target_i]['CA']]]
translation, angle, axis, point = find_transformations(
s1, e1, s2, e2, radians=False)
# Rotation first, Then translation.
mobile.rotate(angle=angle, axis=axis, point=point, radians=False)
mobile.translate(vector=translation)
# Second, rotate about N->CA axis to align CA->C vectors.
angle = dihedral(mobile[mobile_i]['C'], mobile[mobile_i]
['N'], mobile[mobile_i]['CA'], target[target_i]['C'])
axis = target[target_i]['CA'] - target[target_i]['N']
point = target[target_i]['N']._vector
mobile.rotate(angle=angle, axis=axis, point=point)
return | Aligns one Polypeptide (mobile) to another (target).
Notes
-----
This function directly modifies atoms of the mobile Polypeptide!
It does not return a new object.
Parameters
----------
target : Polypeptide
Polypeptide to be aligned to.
mobile : Polypeptide
Polypeptide to be moved during alignment.
target_i : int, optional
Index of `Residue` in target to align to.
mobile_i : int, optional
Index of `Residue` in mobile to be aligned. |
def get_slice_from_res_id(self, start, end):
id_dict = {str(m.id): m for m in self._monomers}
slice_polymer = Polypeptide(
[id_dict[str(x)] for x in range(int(start), int(end) + 1)], self.id)
return slice_polymer | Returns a new `Polypeptide` containing the `Residues` in start/end range.
Parameters
----------
start : str
string representing start residue id (PDB numbering)
end : str
string representing end residue id (PDB numbering)
Returns
-------
slice_polymer : Polymer
Polymer containing the residue range specified by start-end |
def backbone(self):
bb_poly = Polypeptide([x.backbone for x in self._monomers], self.id)
return bb_poly | Returns a new `Polymer` containing only the backbone atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Sequence data is retained, but only the main chain atoms are retained.
Returns
-------
bb_poly : Polypeptide
Polymer containing only the backbone atoms of the original
Polymer. |
def pack_new_sequence(self, sequence):
# This import is here to prevent a circular import.
from ampal.pdb_parser import convert_pdb_to_ampal
polymer_bb = self.backbone
if len(sequence) != len(polymer_bb):
raise ValueError(
'Sequence length ({}) does not match Polymer length ({}).'.format(
len(sequence), len(polymer_bb)))
scwrl_out = pack_sidechains(self.backbone.pdb, sequence)
if scwrl_out is None:
return
else:
packed_structure, scwrl_score = scwrl_out
new_assembly = convert_pdb_to_ampal(packed_structure, path=False)
self._monomers = new_assembly[0]._monomers[:]
self.tags['scwrl_score'] = scwrl_score
self.assign_force_field(global_settings['buff']['force_field'])
return | Packs a new sequence onto the polymer using Scwrl4.
Parameters
----------
sequence : str
String containing the amino acid sequence. This must
be the same length as the Polymer
Raises
------
ValueError
Raised if the sequence length does not match the
number of monomers in the Polymer. |
def sequence(self):
seq = [x.mol_letter for x in self._monomers]
return ''.join(seq) | Returns the sequence of the `Polymer` as a string.
Returns
-------
sequence : str
String of the `Residue` sequence of the `Polypeptide`. |
def backbone_bond_lengths(self):
bond_lengths = dict(
n_ca=[distance(r['N'], r['CA'])
for r in self.get_monomers(ligands=False)],
ca_c=[distance(r['CA'], r['C'])
for r in self.get_monomers(ligands=False)],
c_o=[distance(r['C'], r['O'])
for r in self.get_monomers(ligands=False)],
c_n=[distance(r1['C'], r2['N']) for r1, r2 in [
(self[i], self[i + 1]) for i in range(len(self) - 1)]],
)
return bond_lengths | Dictionary containing backbone bond lengths as lists of floats.
Returns
-------
bond_lengths : dict
Keys are `n_ca`, `ca_c`, `c_o` and `c_n`, referring to the
N-CA, CA-C, C=O and C-N bonds respectively. Values are
lists of floats : the bond lengths in Angstroms.
The lists of n_ca, ca_c and c_o are of length k for
a Polypeptide containing k Residues. The list of c_n bonds
is of length k-1 for a Polypeptide containing k Residues
(C-N formed between successive `Residue` pairs). |
def backbone_bond_angles(self):
bond_angles = dict(
n_ca_c=[angle_between_vectors(r['N'] - r['CA'], r['C'] - r['CA'])
for r in self.get_monomers(ligands=False)],
ca_c_o=[angle_between_vectors(r['CA'] - r['C'], r['O'] - r['C'])
for r in self.get_monomers(ligands=False)],
ca_c_n=[angle_between_vectors(r1['CA'] - r1['C'], r2['N'] - r1['C'])
for r1, r2 in [(self[i], self[i + 1]) for i in range(len(self) - 1)]],
c_n_ca=[angle_between_vectors(r1['C'] - r2['N'], r2['CA'] - r2['N'])
for r1, r2 in [(self[i], self[i + 1]) for i in range(len(self) - 1)]],
)
return bond_angles | Dictionary containing backbone bond angles as lists of floats.
Returns
-------
bond_angles : dict
Keys are `n_ca_c`, `ca_c_o`, `ca_c_n` and `c_n_ca`, referring
to the N-CA-C, CA-C=O, CA-C-N and C-N-CA angles respectively.
Values are lists of floats : the bond angles in degrees.
The lists of n_ca_c, ca_c_o are of length k for a `Polypeptide`
containing k `Residues`. The list of ca_c_n and c_n_ca are of
length k-1 for a `Polypeptide` containing k `Residues` (These
angles are across the peptide bond, and are therefore formed
between successive `Residue` pairs). |
def tag_secondary_structure(self, force=False):
tagged = ['secondary_structure' in x.tags.keys()
for x in self._monomers]
if (not all(tagged)) or force:
dssp_out = run_dssp(self.pdb, path=False)
if dssp_out is None:
return
dssp_ss_list = extract_all_ss_dssp(dssp_out, path=False)
for monomer, dssp_ss in zip(self._monomers, dssp_ss_list):
monomer.tags['secondary_structure'] = dssp_ss[1]
return | Tags each `Residue` of the `Polypeptide` with secondary structure.
Notes
-----
DSSP must be available to call. Check by running
`isambard.external_programs.dssp.test_dssp`. If DSSP is not
available, please follow instruction here to add it:
https://github.com/woolfson-group/isambard#external-programs
For more information on DSSP see [1].
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged. |
def tag_dssp_solvent_accessibility(self, force=False):
tagged = ['dssp_acc' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
dssp_out = run_dssp(self.pdb, path=False)
if dssp_out is None:
return
dssp_acc_list = extract_solvent_accessibility_dssp(
dssp_out, path=False)
for monomer, dssp_acc in zip(self._monomers, dssp_acc_list):
monomer.tags['dssp_acc'] = dssp_acc[-1]
return | Tags each `Residues` Polymer with its solvent accessibility.
Notes
-----
For more about DSSP's solvent accessibilty metric, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged. |
def tag_sidechain_dihedrals(self, force=False):
tagged = ['chi_angles' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
for monomer in self._monomers:
chi_angles = measure_sidechain_torsion_angles(
monomer, verbose=False)
monomer.tags['chi_angles'] = chi_angles
return | Tags each monomer with side-chain dihedral angles
force: bool, optional
If `True` the tag will be run even if `Residues` are
already tagged. |
def tag_torsion_angles(self, force=False):
tagged = ['omega' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
tas = measure_torsion_angles(self._monomers)
for monomer, (omega, phi, psi) in zip(self._monomers, tas):
monomer.tags['omega'] = omega
monomer.tags['phi'] = phi
monomer.tags['psi'] = psi
monomer.tags['tas'] = (omega, phi, psi)
return | Tags each Monomer of the Polymer with its omega, phi and psi torsion angle.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged. |
def tag_ca_geometry(self, force=False, reference_axis=None,
reference_axis_name='ref_axis'):
tagged = ['rise_per_residue' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
# Assign tags None if Polymer is too short to have a primitive.
if len(self) < 7:
rprs = [None] * len(self)
rocs = [None] * len(self)
rpts = [None] * len(self)
else:
rprs = self.rise_per_residue()
rocs = self.radii_of_curvature()
rpts = residues_per_turn(self)
for monomer, rpr, roc, rpt in zip(self._monomers, rprs, rocs, rpts):
monomer.tags['rise_per_residue'] = rpr
monomer.tags['radius_of_curvature'] = roc
monomer.tags['residues_per_turn'] = rpt
# Functions that require a reference_axis.
if (reference_axis is not None) and (len(reference_axis) == len(self)):
# Set up arguments to pass to functions.
ref_axis_args = dict(p=self,
reference_axis=reference_axis,
tag=True,
reference_axis_name=reference_axis_name)
# Run the functions.
polymer_to_reference_axis_distances(**ref_axis_args)
crick_angles(**ref_axis_args)
alpha_angles(**ref_axis_args)
return | Tags each `Residue` with rise_per_residue, radius_of_curvature and residues_per_turn.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are already
tagged.
reference_axis : list(numpy.array or tuple or list), optional
Coordinates to feed to geometry functions that depend on
having a reference axis.
reference_axis_name : str, optional
Used to name the keys in tags at `Polypeptide` and `Residue` level. |
def valid_backbone_bond_lengths(self, atol=0.1):
bond_lengths = self.backbone_bond_lengths
a1 = numpy.allclose(bond_lengths['n_ca'],
[ideal_backbone_bond_lengths['n_ca']] * len(self),
atol=atol)
a2 = numpy.allclose(bond_lengths['ca_c'],
[ideal_backbone_bond_lengths['ca_c']] * len(self),
atol=atol)
a3 = numpy.allclose(bond_lengths['c_o'],
[ideal_backbone_bond_lengths['c_o']] * len(self),
atol=atol)
a4 = numpy.allclose(bond_lengths['c_n'],
[ideal_backbone_bond_lengths['c_n']] *
(len(self) - 1),
atol=atol)
return all([a1, a2, a3, a4]) | True if all backbone bonds are within atol Angstroms of the expected distance.
Notes
-----
Ideal bond lengths taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in Angstoms for the absolute deviation
away from ideal backbone bond lengths. |
def valid_backbone_bond_angles(self, atol=20):
bond_angles = self.backbone_bond_angles
omegas = [x[0] for x in measure_torsion_angles(self)]
trans = ['trans' if (omega is None) or (
abs(omega) >= 90) else 'cis' for omega in omegas]
ideal_n_ca_c = [ideal_backbone_bond_angles[x]['n_ca_c'] for x in trans]
ideal_ca_c_o = [ideal_backbone_bond_angles[trans[i + 1]]
['ca_c_o'] for i in range(len(trans) - 1)]
ideal_ca_c_o.append(ideal_backbone_bond_angles['trans']['ca_c_o'])
ideal_ca_c_n = [ideal_backbone_bond_angles[x]['ca_c_n']
for x in trans[1:]]
ideal_c_n_ca = [ideal_backbone_bond_angles[x]['c_n_ca']
for x in trans[1:]]
a1 = numpy.allclose(bond_angles['n_ca_c'], [ideal_n_ca_c], atol=atol)
a2 = numpy.allclose(bond_angles['ca_c_o'], [ideal_ca_c_o], atol=atol)
a3 = numpy.allclose(bond_angles['ca_c_n'], [ideal_ca_c_n], atol=atol)
a4 = numpy.allclose(bond_angles['c_n_ca'], [ideal_c_n_ca], atol=atol)
return all([a1, a2, a3, a4]) | True if all backbone bond angles are within atol degrees of their expected values.
Notes
-----
Ideal bond angles taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in degrees for the absolute deviation
away from ideal backbone bond angles. |
def backbone(self):
try:
backbone = OrderedDict([('N', self.atoms['N']),
('CA', self.atoms['CA']),
('C', self.atoms['C']),
('O', self.atoms['O'])])
except KeyError:
missing_atoms = filter(lambda x: x not in self.atoms.keys(),
('N', 'CA', 'C', 'O')
)
raise KeyError('Error in residue {} {} {}, missing ({}) atoms. '
'`atoms` must be an `OrderedDict` with coordinates '
'defined for the backbone (N, CA, C, O) atoms.'
.format(self.ampal_parent.id, self.mol_code,
self.id, ', '.join(missing_atoms)))
bb_monomer = Residue(backbone, self.mol_code, monomer_id=self.id,
insertion_code=self.insertion_code,
is_hetero=self.is_hetero)
return bb_monomer | Returns a new `Residue` containing only the backbone atoms.
Returns
-------
bb_monomer : Residue
`Residue` containing only the backbone atoms of the original
`Monomer`.
Raises
------
IndexError
Raise if the `atoms` dict does not contain the backbone
atoms (N, CA, C, O). |
def unique_id(self):
if self.is_hetero:
if self.mol_code == 'HOH':
hetero_flag = 'W'
else:
hetero_flag = 'H_{0}'.format(self.mol_code)
else:
hetero_flag = ' '
return self.ampal_parent.id, (hetero_flag, self.id, self.insertion_code) | Generates a tuple that uniquely identifies a `Monomer` in an `Assembly`.
Notes
-----
The unique_id will uniquely identify each monomer within a polymer.
If each polymer in an assembly has a distinct id, it will uniquely
identify each monomer within the assembly.
The hetero-flag is defined as in Biopython as a string that is
either a single whitespace in the case of a non-hetero atom,
or 'H_' plus the name of the hetero-residue (e.g. 'H_GLC' in
the case of a glucose molecule), or 'W' in the case of a water
molecule.
For more information, see the Biopython documentation or this
Biopython wiki page:
http://biopython.org/wiki/The_Biopython_Structural_Bioinformatics_FAQ
Returns
-------
unique_id : tuple
unique_id[0] is the polymer_id unique_id[1] is a triple
of the hetero-flag, the monomer id (residue number) and the
insertion code. |
def side_chain(self):
side_chain_atoms = []
if self.mol_code != 'GLY':
covalent_bond_graph = generate_covalent_bond_graph(
find_covalent_bonds(self))
try:
subgraphs = generate_bond_subgraphs_from_break(
covalent_bond_graph, self['CA'], self['CB'])
if len(subgraphs) == 1:
subgraphs = generate_bond_subgraphs_from_break(
subgraphs[0], self['CD'], self['N'])
if len(subgraphs) == 2:
for g in subgraphs:
if self['CB'] in g:
side_chain_atoms = g.nodes()
break
except:
warning_message = "Malformed PDB for Residue {0}: {1}.".format(
self.id, self)
if 'CB' in self.atoms.keys():
side_chain_atoms.append(self['CB'])
warning_message += " Side-chain is just the CB atom."
else:
warning_message += " Empty side-chain."
warnings.warn(warning_message, MalformedPDBWarning)
return side_chain_atoms | List of the side-chain atoms (R-group).
Notes
-----
Returns empty list for glycine.
Returns
-------
side_chain_atoms: list(`Atoms`) |
def side_chain_environment(self, cutoff=4, include_neighbours=True,
inter_chain=True, include_ligands=False, include_solvent=False):
if self.mol_code == 'GLY':
return [self]
side_chain_dict = {x: {y: self.states[x][y]
for y in self.states[x] if self.states[x][y] in
self.side_chain} for x in self.states}
side_chain_monomer = Monomer(
atoms=side_chain_dict, monomer_id=self.id,
ampal_parent=self.ampal_parent)
sc_environment = side_chain_monomer.environment(
cutoff=cutoff, include_ligands=include_ligands,
include_neighbours=include_neighbours,
include_solvent=include_solvent, inter_chain=inter_chain)
return sc_environment | Finds `Residues` with any atom within the cutoff distance of side-chain.
Notes
-----
Includes the parent residue in the list.
Parameters
----------
cutoff : float, optional
Maximum inter-atom distance for residue to be included.
Defaults to 4.
include_neighbours : bool, optional
If `false`, does not return `Residue` at i-1, i+1 positions
in same chain as `Residue`.
inter_chain : bool, optional
If `false`, only includes nearby `Residue` in the same chain
as the `Residue`.
include_ligands : bool, optional
If `true`, `Residue` classed as ligands but not identified as
solvent will be included in the environment.
include_solvent : bool, optional
If `true`, Monomers classed as categorised as solvent
will be included in the environment.
Returns
-------
sc_environment : list
List of monomers within cutoff distance of side-chain. |
def load_global_settings():
with open(settings_path, 'r') as settings_f:
global global_settings
settings_json = json.loads(settings_f.read())
if global_settings is None:
global_settings = settings_json
global_settings[u'package_path'] = package_dir
else:
for k, v in settings_json.items():
if type(v) == dict:
global_settings[k].update(v)
else:
global_settings[k] = v | Loads settings file containing paths to dependencies and other optional configuration elements. |
def build(self):
for i in range(2):
self._molecules.append(
self.make_helix(self.aas[i], self.axis_distances[i],
self.z_shifts[i], self.phis[i], self.splays[i],
self.off_plane[i]))
return | Builds a `HelixPair` using the defined attributes. |
def make_helix(aa, axis_distance, z_shift, phi, splay, off_plane):
start = numpy.array([axis_distance, 0 + z_shift, 0])
end = numpy.array([axis_distance, (aa * 1.52) + z_shift, 0])
mid = (start + end) / 2
helix = Helix.from_start_and_end(start, end, aa=aa)
helix.rotate(splay, (0, 0, 1), mid)
helix.rotate(off_plane, (1, 0, 0), mid)
helix.rotate(phi, helix.axis.unit_tangent, helix.helix_start)
return helix | Builds a helix for a given set of parameters. |
def build(self):
self._molecules = []
if self.handedness == 'l':
handedness = -1
else:
handedness = 1
rot_ang = self.rot_ang * handedness
for i in range(self.num_of_repeats):
dup_unit = copy.deepcopy(self.repeat_unit)
z = (self.rise * i) * numpy.array([0, 0, 1])
dup_unit.translate(z)
dup_unit.rotate(rot_ang * i, [0, 0, 1])
self.extend(dup_unit)
self.relabel_all()
return | Builds a Solenoid using the defined attributes. |
def from_start_and_end(cls, start, end, sequence, helix_type='b_dna',
phos_3_prime=False):
start = numpy.array(start)
end = numpy.array(end)
instance = cls(sequence, helix_type=helix_type,
phos_3_prime=phos_3_prime)
instance.move_to(start=start, end=end)
return instance | Generates a helical `Polynucleotide` that is built along an axis.
Parameters
----------
start: [float, float, float]
Start of the build axis.
end: [float, float, float]
End of build axis.
sequence: str
The nucleotide sequence of the nucleic acid.
helix_type: str
The type of nucleic acid helix to generate.
phos_3_prime: bool
If false the 5' and the 3' phosphor will be omitted. |
def move_to(self, start, end):
start = numpy.array(start)
end = numpy.array(end)
if numpy.allclose(start, end):
raise ValueError('start and end must NOT be identical')
translation, angle, axis, point = find_transformations(
self.helix_start, self.helix_end, start, end)
if not numpy.isclose(angle, 0.0):
self.rotate(angle=angle, axis=axis, point=point, radians=False)
self.translate(vector=translation)
return | Moves the `Polynucleotide` to lie on the `start` and `end` vector.
Parameters
----------
start : 3D Vector (tuple or list or numpy.array)
The coordinate of the start of the helix primitive.
end : 3D Vector (tuple or list or numpy.array)
The coordinate of the end of the helix primitive.
Raises
------
ValueError
Raised if `start` and `end` are very close together. |
def fit_heptad_register(crangles):
crangles = [x if x > 0 else 360 + x for x in crangles]
hept_p = [x * (360.0 / 7.0) + ((360.0 / 7.0) / 2.0) for x in range(7)]
ideal_crangs = [
hept_p[0],
hept_p[2],
hept_p[4],
hept_p[6],
hept_p[1],
hept_p[3],
hept_p[5]
]
full_hept = len(crangles) // 7
ideal_crang_list = ideal_crangs * (full_hept + 2) # This is dirty, too long but trimmed with zip
fitting = []
for i in range(7):
ang_pairs = zip(crangles, ideal_crang_list[i:])
ang_diffs = [abs(y - x) for x, y in ang_pairs]
fitting.append((i, numpy.mean(ang_diffs), numpy.std(ang_diffs)))
return sorted(fitting, key=lambda x: x[1]) | Attempts to fit a heptad repeat to a set of Crick angles.
Parameters
----------
crangles: [float]
A list of average Crick angles for the coiled coil.
Returns
-------
fit_data: [(float, float, float)]
Sorted list of fits for each heptad position. |
def gather_layer_info(self):
for i in range(len(self.cc[0])):
layer_radii = [x[i].tags['distance_to_ref_axis'] for x in self.cc]
self.radii_layers.append(layer_radii)
layer_alpha = [x[i].tags['alpha_angle_ref_axis'] for x in self.cc]
self.alpha_layers.append(layer_alpha)
layer_ca = [x[i].tags['crick_angle_ref_axis'] for x in self.cc]
self.ca_layers.append(layer_ca)
return | Extracts the tagged coiled-coil parameters for each layer. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.