code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def calc_average_parameters(parameter_layers): mean_layers = [numpy.mean(x) if x[0] else 0 for x in parameter_layers] overall_mean = numpy.mean([x for x in mean_layers if x]) return mean_layers, overall_mean
Takes a group of equal length lists and averages them across each index. Returns ------- mean_layers: [float] List of values averaged by index overall_mean: float Mean of the averaged values.
def heptad_register(self): base_reg = 'abcdefg' exp_base = base_reg * (self.cc_len//7+2) ave_ca_layers = self.calc_average_parameters(self.ca_layers)[0][:-1] reg_fit = fit_heptad_register(ave_ca_layers) hep_pos = reg_fit[0][0] return exp_base[hep_pos:hep_pos+self.cc_len], reg_fit[0][1:]
Returns the calculated register of the coiled coil and the fit quality.
def buff_interaction_eval(cls, specification, sequences, parameters, **kwargs): instance = cls(specification, sequences, parameters, build_fn=default_build, eval_fn=buff_interaction_eval, **kwargs) return instance
Creates optimizer with default build and BUFF interaction eval. Notes ----- Any keyword arguments will be propagated down to BaseOptimizer. Parameters ---------- specification : ampal.assembly.specification Any assembly level specification. sequences : [str] A list of sequences, one for each polymer. parameters : [base_ev_opt.Parameter] A list of `Parameter` objects in the same order as the function signature expects.
def rmsd_eval(cls, specification, sequences, parameters, reference_ampal, **kwargs): eval_fn = make_rmsd_eval(reference_ampal) instance = cls(specification, sequences, parameters, build_fn=default_build, eval_fn=eval_fn, mp_disabled=True, **kwargs) return instance
Creates optimizer with default build and RMSD eval. Notes ----- Any keyword arguments will be propagated down to BaseOptimizer. RMSD eval is restricted to a single core only, due to restrictions on closure pickling. Parameters ---------- specification : ampal.assembly.specification Any assembly level specification. sequences : [str] A list of sequences, one for each polymer. parameters : [base_ev_opt.Parameter] A list of `Parameter` objects in the same order as the function signature expects. reference_ampal : ampal.Assembly The target structure of the optimisation.
def parse_individual(self, individual): scaled_ind = [] for i in range(len(self.value_means)): scaled_ind.append(self.value_means[i] + ( individual[i] * self.value_ranges[i])) fullpars = list(self.arrangement) for k in range(len(self.variable_parameters)): for j in range(len(fullpars)): if fullpars[j] == self.variable_parameters[k]: fullpars[j] = scaled_ind[k] return fullpars
Converts a deap individual into a full list of parameters. Parameters ---------- individual: deap individual from optimization Details vary according to type of optimization, but parameters within deap individual are always between -1 and 1. This function converts them into the values used to actually build the model Returns ------- fullpars: list Full parameter list for model building.
def _make_parameters(self): self.value_means = [] self.value_ranges = [] self.arrangement = [] self.variable_parameters = [] current_var = 0 for parameter in self.parameters: if parameter.type == ParameterType.DYNAMIC: self.value_means.append(parameter.value[0]) if parameter.value[1] < 0: raise AttributeError( '"{}" parameter has an invalid range. Range values ' 'must be greater than zero'.format(parameter.label)) self.value_ranges.append(parameter.value[1]) var_label = 'var{}'.format(current_var) self.arrangement.append(var_label) self.variable_parameters.append(var_label) current_var += 1 elif parameter.type == ParameterType.STATIC: self.arrangement.append(parameter.value) else: raise AttributeError( '"{}"Unknown parameter type ({}). Parameters can be STATIC or' ' DYNAMIC.'.format(parameter.type)) return
Converts a list of Parameters into DEAP format.
def assign_fitnesses(self, targets): self._evals = len(targets) px_parameters = zip([self.specification] * len(targets), [self.sequences] * len(targets), [self.parse_individual(x) for x in targets]) if (self._cores == 1) or (self.mp_disabled): models = map(self.build_fn, px_parameters) fitnesses = map(self.eval_fn, models) else: with futures.ProcessPoolExecutor( max_workers=self._cores) as executor: models = executor.map(self.build_fn, px_parameters) fitnesses = executor.map(self.eval_fn, models) tars_fits = list(zip(targets, fitnesses)) if self._store_params: self.parameter_log.append( [(self.parse_individual(x[0]), x[1]) for x in tars_fits]) for ind, fit in tars_fits: ind.fitness.values = (fit,) return
Assigns fitnesses to parameters. Notes ----- Uses `self.eval_fn` to evaluate each member of target. Parameters --------- targets Parameter values for each member of the population.
def log_results(self, output_path=None, run_id=None): best_ind = self.halloffame[0] model_params = self.parse_individual( best_ind) # need to change name of 'params' if output_path is None: output_path = os.getcwd() if run_id is None: run_id = '{:%Y%m%d-%H%M%S}'.format( datetime.datetime.now()) with open('{0}/{1}_opt_log.txt'.format( output_path, run_id), 'w') as log_file: log_file.write('\nEvaluated {0} models in total\n'.format( self._model_count)) log_file.write('Run ID is {0}\n'.format(run_id)) log_file.write('Best fitness is {0}\n'.format( self.halloffame[0].fitness)) log_file.write( 'Parameters of best model are {0}\n'.format(model_params)) log_file.write( 'Best individual is {0}\n'.format(self.halloffame[0])) for i, entry in enumerate(self.halloffame[0]): if entry > 0.95: log_file.write( "Warning! Parameter {0} is at or near maximum allowed " "value\n".format(i + 1)) elif entry < -0.95: log_file.write( "Warning! Parameter {0} is at or near minimum allowed " "value\n".format(i + 1)) log_file.write('Minimization history: \n{0}'.format(self.logbook)) with open('{0}/{1}_opt_best_model.pdb'.format( output_path, run_id), 'w') as output_file: output_file.write(self.best_model.pdb) return
Saves files for the minimization. Notes ----- Currently saves a logfile with best individual and a pdb of the best model.
def best_model(self): if not hasattr(self, 'halloffame'): raise AttributeError( 'No best model found, have you ran the optimiser?') model = self.build_fn( (self.specification, self.sequences, self.parse_individual(self.halloffame[0]) )) return model
Rebuilds the top scoring model from an optimisation. Returns ------- model: AMPAL Returns an AMPAL model of the top scoring parameters. Raises ------ AttributeError Raises a name error if the optimiser has not been run.
def dynamic(cls, label, val_mean, val_range): return cls(label, ParameterType.DYNAMIC, (val_mean, val_range))
Creates a static parameter. Parameters ---------- label : str A human-readable label for the parameter. val_mean : float The mean value of the parameter. val_range : float The minimum and maximum variance from the mean allowed for parameter.
def parse_scwrl_out(scwrl_std_out, scwrl_pdb): score = re.findall( r'Total minimal energy of the graph = ([-0-9.]+)', scwrl_std_out)[0] # Add temperature factors to SCWRL out split_scwrl = scwrl_pdb.splitlines() fixed_scwrl = [] for line in split_scwrl: if len(line) < 80: line += ' ' * (80 - len(line)) if re.search(r'H?E?T?ATO?M\s+\d+.+', line): front = line[:61] temp_factor = ' 0.00' back = line[66:] fixed_scwrl.append(''.join([front, temp_factor, back])) else: fixed_scwrl.append(line) fixed_scwrl_str = '\n'.join(fixed_scwrl) + '\n' return fixed_scwrl_str, float(score)
Parses SCWRL output and returns PDB and SCWRL score. Parameters ---------- scwrl_std_out : str Std out from SCWRL. scwrl_pdb : str String of packed SCWRL PDB. Returns ------- fixed_scwrl_str : str String of packed SCWRL PDB, with correct PDB format. score : float SCWRL Score
def pack_sidechains(pdb, sequence, path=False): scwrl_std_out, scwrl_pdb = run_scwrl(pdb, sequence, path=path) return parse_scwrl_out(scwrl_std_out, scwrl_pdb)
Packs sidechains onto a given PDB file or string. Parameters ---------- pdb : str PDB string or a path to a PDB file. sequence : str Amino acid sequence for SCWRL to pack in single-letter code. path : bool, optional True if pdb is a path. Returns ------- scwrl_pdb : str String of packed SCWRL PDB. scwrl_score : float Scwrl packing score.
def parse_pdb_file(self): self.pdb_parse_tree = {'info': {}, 'data': { self.state: {}} } try: for line in self.pdb_lines: self.current_line = line record_name = line[:6].strip() if record_name in self.proc_functions: self.proc_functions[record_name]() else: if record_name not in self.pdb_parse_tree['info']: self.pdb_parse_tree['info'][record_name] = [] self.pdb_parse_tree['info'][record_name].append(line) except EOFError: # Raised by END record pass if self.new_labels: ampal_data_session.commit() return
Runs the PDB parser.
def proc_atom(self): atom_data = self.proc_line_coordinate(self.current_line) (at_type, at_ser, at_name, alt_loc, res_name, chain_id, res_seq, i_code, x, y, z, occupancy, temp_factor, element, charge) = atom_data # currently active state a_state = self.pdb_parse_tree['data'][self.state] res_id = (res_seq, i_code) if chain_id not in a_state: a_state[chain_id] = (set(), OrderedDict()) if res_id not in a_state[chain_id][1]: a_state[chain_id][1][res_id] = (set(), OrderedDict()) if at_type == 'ATOM': if res_name in standard_amino_acids.values(): poly = 'P' else: poly = 'N' else: poly = 'H' a_state[chain_id][0].add((chain_id, at_type, poly)) a_state[chain_id][1][res_id][0].add( (at_type, res_seq, res_name, i_code)) if at_ser not in a_state[chain_id][1][res_id][1]: a_state[chain_id][1][res_id][1][at_ser] = [atom_data] else: a_state[chain_id][1][res_id][1][at_ser].append(atom_data) return
Processes an "ATOM" or "HETATM" record.
def proc_line_coordinate(self, line): pdb_atom_col_dict = global_settings['ampal']['pdb_atom_col_dict'] at_type = line[0:6].strip() # 0 at_ser = int(line[6:11].strip()) # 1 at_name = line[12:16].strip() # 2 alt_loc = line[16].strip() # 3 res_name = line[17:20].strip() # 4 chain_id = line[21].strip() # 5 res_seq = int(line[22:26].strip()) # 6 i_code = line[26].strip() # 7 x = float(line[30:38].strip()) # 8 y = float(line[38:46].strip()) # 9 z = float(line[46:54].strip()) # 10 occupancy = float(line[54:60].strip()) # 11 temp_factor = float(line[60:66].strip()) # 12 element = line[76:78].strip() # 13 charge = line[78:80].strip() # 14 if at_name not in pdb_atom_col_dict: pdb_atom_col_dict[at_name] = line[12:16] pdb_col_e = PDBColFormat(atom_name=at_name, atom_col=line[12:16]) ampal_data_session.add(pdb_col_e) self.new_labels = True return (at_type, at_ser, at_name, alt_loc, res_name, chain_id, res_seq, i_code, x, y, z, occupancy, temp_factor, element, charge)
Extracts data from columns in ATOM/HETATM record.
def make_ampal(self): data = self.pdb_parse_tree['data'] if len(data) > 1: ac = AmpalContainer(id=self.id) for state, chains in sorted(data.items()): if chains: ac.append(self.proc_state(chains, self.id + '_state_{}'.format(state + 1))) return ac elif len(data) == 1: return self.proc_state(data[0], self.id) else: raise ValueError('Empty parse tree, check input PDB format.')
Generates an AMPAL object from the parse tree. Notes ----- Will create an `Assembly` if there is a single state in the parese tree or an `AmpalContainer` if there is more than one.
def proc_state(self, state_data, state_id): assembly = Assembly(assembly_id=state_id) for k, chain in sorted(state_data.items()): assembly._molecules.append(self.proc_chain(chain, assembly)) return assembly
Processes a state into an `Assembly`. Parameters ---------- state_data : dict Contains information about the state, including all the per line structural data. state_id : str ID given to `Assembly` that represents the state.
def proc_monomer(self, monomer_info, parent, mon_cls=False): monomer_labels, monomer_data = monomer_info if len(monomer_labels) > 1: raise ValueError( 'Malformed PDB, single monomer id with ' 'multiple labels. {}'.format(monomer_labels)) else: monomer_label = list(monomer_labels)[0] if mon_cls: monomer_class = mon_cls het = True elif monomer_label[0] == 'ATOM': if monomer_label[2] in standard_amino_acids.values(): monomer_class = Residue else: monomer_class = Nucleotide het = False else: raise ValueError('Unknown Monomer type.') monomer = monomer_class( atoms=None, mol_code=monomer_label[2], monomer_id=monomer_label[1], insertion_code=monomer_label[3], is_hetero=het, ampal_parent=parent ) monomer.states = self.gen_states(monomer_data.values(), monomer) monomer._active_state = sorted(monomer.states.keys())[0] return monomer
Processes a records into a `Monomer`. Parameters ---------- monomer_info : (set, OrderedDict) Labels and data for a monomer. parent : ampal.Polymer `Polymer` used to assign `ampal_parent` on created `Monomer`. mon_cls : `Monomer class or subclass`, optional A `Monomer` class can be defined explicitly.
def generate_antisense_sequence(sequence): dna_antisense = { 'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C' } antisense = [dna_antisense[x] for x in sequence[::-1]] return ''.join(antisense)
Creates the antisense sequence of a DNA strand.
def from_sequence(cls, sequence, phos_3_prime=False): strand1 = NucleicAcidStrand(sequence, phos_3_prime=phos_3_prime) duplex = cls(strand1) return duplex
Creates a DNA duplex from a nucleotide sequence. Parameters ---------- sequence: str Nucleotide sequence. phos_3_prime: bool, optional If false the 5' and the 3' phosphor will be omitted.
def from_start_and_end(cls, start, end, sequence, phos_3_prime=False): strand1 = NucleicAcidStrand.from_start_and_end( start, end, sequence, phos_3_prime=phos_3_prime) duplex = cls(strand1) return duplex
Creates a DNA duplex from a start and end point. Parameters ---------- start: [float, float, float] Start of the build axis. end: [float, float, float] End of build axis. sequence: str Nucleotide sequence. phos_3_prime: bool, optional If false the 5' and the 3' phosphor will be omitted.
def generate_complementary_strand(strand1): rise_adjust = ( strand1.rise_per_nucleotide * strand1.axis.unit_tangent) * 2 strand2 = NucleicAcidStrand.from_start_and_end( strand1.helix_end - rise_adjust, strand1.helix_start - rise_adjust, generate_antisense_sequence(strand1.base_sequence), phos_3_prime=strand1.phos_3_prime) ad_ang = dihedral(strand1[0]["C1'"]._vector, strand1.axis.start, strand2.axis.start + rise_adjust, strand2[-1]["C1'"]._vector) strand2.rotate( 225.0 + ad_ang, strand2.axis.unit_tangent, point=strand2.helix_start) # 225 is the base adjust return strand2
Takes a SingleStrandHelix and creates the antisense strand.
def total_accessibility(in_rsa, path=True): if path: with open(in_rsa, 'r') as inf: rsa = inf.read() else: rsa = in_rsa[:] all_atoms, side_chains, main_chain, non_polar, polar = [ float(x) for x in rsa.splitlines()[-1].split()[1:]] return all_atoms, side_chains, main_chain, non_polar, polar
Parses rsa file for the total surface accessibility data. Parameters ---------- in_rsa : str Path to naccess rsa file. path : bool Indicates if in_rsa is a path or a string. Returns ------- dssp_residues : 5-tuple(float) Total accessibility values for: [0] all atoms [1] all side-chain atoms [2] all main-chain atoms [3] all non-polar atoms [4] all polar atoms
def extract_residue_accessibility(in_rsa, path=True, get_total=False): if path: with open(in_rsa, 'r') as inf: rsa = inf.read() else: rsa = in_rsa[:] residue_list = [x for x in rsa.splitlines()] rel_solv_acc_all_atoms = [ float(x[22:28]) for x in residue_list if x[0:3] == "RES" or x[0:3] == "HEM"] if get_total: (all_atoms, side_chains, main_chain, non_polar, polar) = total_accessibility( rsa, path=False) return rel_solv_acc_all_atoms, all_atoms else: return rel_solv_acc_all_atoms, None
Parses rsa file for solvent accessibility for each residue. Parameters ---------- in_rsa : str Path to naccess rsa file path : bool Indicates if in_rsa is a path or a string get_total : bool Indicates if the total accessibility from the file needs to be extracted. Convenience method for running the total_accessibility function but only running NACCESS once Returns ------- rel_solv_ac_acc_atoms : list Relative solvent accessibility of all atoms in each amino acid get_total : float Relative solvent accessibility of all atoms in the NACCESS rsa file
def get_aa_code(aa_letter): aa_code = None if aa_letter != 'X': for key, val in standard_amino_acids.items(): if key == aa_letter: aa_code = val return aa_code
Get three-letter aa code if possible. If not, return None. If three-letter code is None, will have to find this later from the filesystem. Parameters ---------- aa_letter : str One-letter amino acid code. Returns ------- aa_code : str, or None Three-letter aa code.
def get_aa_letter(aa_code): aa_letter = 'X' for key, val in standard_amino_acids.items(): if val == aa_code: aa_letter = key return aa_letter
Get one-letter version of aa_code if possible. If not, return 'X'. Parameters ---------- aa_code : str Three-letter amino acid code. Returns ------- aa_letter : str One-letter aa code. Default value is 'X'.
def get_aa_info(code): letter = 'X' # Try to get content from PDBE. url_string = 'http://www.ebi.ac.uk/pdbe-srv/pdbechem/chemicalCompound/show/{0}'.format(code) r = requests.get(url_string) # Raise error if content not obtained. if not r.ok: raise IOError("Could not get to url {0}".format(url_string)) # Parse r.text in an ugly way to get the required information. description = r.text.split('<h3>Molecule name')[1].split('</tr>')[0] description = description.strip().split('\n')[3].strip()[:255] modified = r.text.split("<h3>Standard parent ")[1].split('</tr>')[0] modified = modified.replace(" ", "").replace('\n', '').split('<')[-3].split('>')[-1] if modified == "NotAssigned": modified = None # Add the required information to a dictionary which can then be passed to add_amino_acid_to_json. aa_dict = {'code': code, 'description': description, 'modified': modified, 'letter': letter} return aa_dict
Get dictionary of information relating to a new amino acid code not currently in the database. Notes ----- Use this function to get a dictionary that is then to be sent to the function add_amino_acid_to_json(). use to fill in rows of amino_acid table for new amino acid code. Parameters ---------- code : str Three-letter amino acid code. Raises ------ IOError If unable to locate the page associated with the amino acid name on the PDBE site. Returns ------- aa_dict : dict Keys are AminoAcidDB field names. Values are the str values for the new amino acid, scraped from the PDBE if possible. None if not found.
def add_amino_acid_to_json(code, description, letter='X', modified=None, force_add=False): # If code is already in the dictionary, raise an error if (not force_add) and code in amino_acids_dict.keys(): raise IOError("{0} is already in the amino_acids dictionary, with values: {1}".format( code, amino_acids_dict[code])) # Prepare data to be added. add_code = code add_code_dict = {'description': description, 'letter': letter, 'modified': modified} # Check that data does not already exist, and if not, add it to the dictionary. amino_acids_dict[add_code] = add_code_dict # Write over json file with updated dictionary. with open(_amino_acids_json_path, 'w') as foo: foo.write(json.dumps(amino_acids_dict)) return
Add an amino acid to the amino_acids.json file used to populate the amino_acid table. Parameters ---------- code : str New code to be added to amino acid table. description : str Description of the amino acid, e.g. 'amidated terminal carboxy group'. letter : str, optional One letter code for the amino acid. Defaults to 'X' modified : str or None, optional Code of modified amino acid, e.g. 'ALA', or None. Defaults to None force_add : bool, optional If True, will over-write existing dictionary value for code if already in amino_acids.json. If False, then an IOError is raised if code is already in amino_acids.json. Raises ------ IOError If code is already in amino_acids.json and force_add is False. Returns ------- None
def from_polymers(cls, polymers): n = len(polymers) instance = cls(n=n, auto_build=False) instance.major_radii = [x.major_radius for x in polymers] instance.major_pitches = [x.major_pitch for x in polymers] instance.major_handedness = [x.major_handedness for x in polymers] instance.aas = [x.num_monomers for x in polymers] instance.minor_helix_types = [x.minor_helix_type for x in polymers] instance.orientations = [x.orientation for x in polymers] instance.phi_c_alphas = [x.phi_c_alpha for x in polymers] instance.minor_repeats = [x.minor_repeat for x in polymers] instance.build() return instance
Creates a `CoiledCoil` from a list of `HelicalHelices`. Parameters ---------- polymers : [HelicalHelix] List of `HelicalHelices`.
def from_parameters(cls, n, aa=28, major_radius=None, major_pitch=None, phi_c_alpha=26.42, minor_helix_type='alpha', auto_build=True): instance = cls(n=n, auto_build=False) instance.aas = [aa] * n instance.phi_c_alphas = [phi_c_alpha] * n instance.minor_helix_types = [minor_helix_type] * n if major_pitch is not None: instance.major_pitches = [major_pitch] * n if major_radius is not None: instance.major_radii = [major_radius] * n if auto_build: instance.build() return instance
Creates a `CoiledCoil` from defined super-helical parameters. Parameters ---------- n : int Oligomeric state aa : int, optional Number of amino acids per minor helix. major_radius : float, optional Radius of super helix. major_pitch : float, optional Pitch of super helix. phi_c_alpha : float, optional Rotation of minor helices relative to the super-helical axis. minor_helix_type : float, optional Helix type of minor helices. Can be: 'alpha', 'pi', '3-10', 'PPI', 'PP2', 'collagen'. auto_build : bool, optional If `True`, the model will be built as part of instantiation.
def tropocollagen( cls, aa=28, major_radius=5.0, major_pitch=85.0, auto_build=True): instance = cls.from_parameters( n=3, aa=aa, major_radius=major_radius, major_pitch=major_pitch, phi_c_alpha=0.0, minor_helix_type='collagen', auto_build=False) instance.major_handedness = ['r'] * 3 # default z-shifts taken from rise_per_residue of collagen helix rpr_collagen = _helix_parameters['collagen'][1] instance.z_shifts = [-rpr_collagen * 2, -rpr_collagen, 0.0] instance.minor_repeats = [None] * 3 if auto_build: instance.build() return instance
Creates a model of a collagen triple helix. Parameters ---------- aa : int, optional Number of amino acids per minor helix. major_radius : float, optional Radius of super helix. major_pitch : float, optional Pitch of super helix. auto_build : bool, optional If `True`, the model will be built as part of instantiation.
def build(self): monomers = [HelicalHelix(major_pitch=self.major_pitches[i], major_radius=self.major_radii[i], major_handedness=self.major_handedness[i], aa=self.aas[i], minor_helix_type=self.minor_helix_types[i], orientation=self.orientations[i], phi_c_alpha=self.phi_c_alphas[i], minor_repeat=self.minor_repeats[i], ) for i in range(self.oligomeric_state)] axis_unit_vector = numpy.array([0, 0, 1]) for i, m in enumerate(monomers): m.rotate(angle=self.rotational_offsets[i], axis=axis_unit_vector) m.translate(axis_unit_vector * self.z_shifts[i]) self._molecules = monomers[:] self.relabel_all() for m in self._molecules: m.ampal_parent = self return
Builds a model of a coiled coil protein using input parameters.
def find_max_rad_npnp(self): max_rad = 0 max_npnp = 0 for res, atoms in self.items(): if res != 'KEY': for atom, ff_params in self[res].items(): if max_rad < ff_params[1]: max_rad = ff_params[1] if max_npnp < ff_params[4]: max_npnp = ff_params[4] return max_rad, max_npnp
Finds the maximum radius and npnp in the force field. Returns ------- (max_rad, max_npnp): (float, float) Maximum radius and npnp distance in the loaded force field.
def parameter_struct_dict(self): if self._parameter_struct_dict is None: self._parameter_struct_dict = self._make_ff_params_dict() elif self.auto_update_f_params: new_hash = hash( tuple([tuple(item) for sublist in self.values() for item in sublist.values()])) if self._old_hash != new_hash: self._parameter_struct_dict = self._make_ff_params_dict() self._old_hash = new_hash return self._parameter_struct_dict
Dictionary containing PyAtomData structs for the force field.
def reduce_output_path(path=None, pdb_name=None): if not path: if not pdb_name: raise NameError( "Cannot save an output for a temporary file without a PDB" "code specified") pdb_name = pdb_name.lower() output_path = Path(global_settings['structural_database']['path'], pdb_name[1:3].lower(), pdb_name[:4].lower(), 'reduce', pdb_name + '_reduced.mmol') else: input_path = Path(path) if len(input_path.parents) > 1: output_path = input_path.parents[1] / 'reduce' / \ (input_path.stem + '_reduced' + input_path.suffix) else: output_path = input_path.parent / \ (input_path.stem + '_reduced' + input_path.suffix) return output_path
Defines location of Reduce output files relative to input files.
def output_reduce(input_file, path=True, pdb_name=None, force=False): if path: output_path = reduce_output_path(path=input_file) else: output_path = reduce_output_path(pdb_name=pdb_name) if output_path.exists() and not force: return output_path reduce_mmol, reduce_message = run_reduce(input_file, path=path) if not reduce_mmol: return None output_path.parent.mkdir(exist_ok=True) output_path.write_text(reduce_mmol) return output_path
Runs Reduce on a pdb or mmol file and creates a new file with the output. Parameters ---------- input_file : str or pathlib.Path Path to file to run Reduce on. path : bool True if input_file is a path. pdb_name : str PDB ID of protein. Required if providing string not path. force : bool True if existing reduce outputs should be overwritten. Returns ------- output_path : pathlib.Path Location of output file.
def output_reduce_list(path_list, force=False): output_paths = [] for path in path_list: output_path = output_reduce(path, force=force) if output_path: output_paths.append(output_path) return output_paths
Generates structure file with protons from a list of structure files.
def assembly_plus_protons(input_file, path=True, pdb_name=None, save_output=False, force_save=False): from ampal.pdb_parser import convert_pdb_to_ampal if path: input_path = Path(input_file) if not pdb_name: pdb_name = input_path.stem[:4] reduced_path = reduce_output_path(path=input_path) if reduced_path.exists() and not save_output and not force_save: reduced_assembly = convert_pdb_to_ampal( str(reduced_path), pdb_id=pdb_name) return reduced_assembly if save_output: reduced_path = output_reduce( input_file, path=path, pdb_name=pdb_name, force=force_save) reduced_assembly = convert_pdb_to_ampal(str(reduced_path), path=True) else: reduce_mmol, reduce_message = run_reduce(input_file, path=path) if not reduce_mmol: return None reduced_assembly = convert_pdb_to_ampal( reduce_mmol, path=False, pdb_id=pdb_name) return reduced_assembly
Returns an Assembly with protons added by Reduce. Notes ----- Looks for a pre-existing Reduce output in the standard location before running Reduce. If the protein contains oligosaccharides or glycans, use reduce_correct_carbohydrates. Parameters ---------- input_file : str or pathlib.Path Location of file to be converted to Assembly or PDB file as string. path : bool Whether we are looking at a file or a pdb string. Defaults to file. pdb_name : str PDB ID of protein. Required if providing string not path. save_output : bool If True will save the generated assembly. force_save : bool If True will overwrite existing reduced assembly. Returns ------- reduced_assembly : AMPAL Assembly Assembly of protein with protons added by Reduce.
def from_start_and_end(cls, start, end, aa=None, helix_type='alpha'): start = numpy.array(start) end = numpy.array(end) if aa is None: rise_per_residue = _helix_parameters[helix_type][1] aa = int((numpy.linalg.norm(end - start) / rise_per_residue) + 1) instance = cls(aa=aa, helix_type=helix_type) instance.move_to(start=start, end=end) return instance
Creates a `Helix` between `start` and `end`. Parameters ---------- start : 3D Vector (tuple or list or numpy.array) The coordinate of the start of the helix primitive. end : 3D Vector (tuple or list or numpy.array) The coordinate of the end of the helix primitive. aa : int, optional Number of amino acids in the `Helix`. If `None, an appropriate number of residues are added. helix_type : str, optional Type of helix, can be: 'alpha', 'pi', '3-10', 'PPI', 'PPII', 'collagen'.
def build(self): ang_per_res = (2 * numpy.pi) / self.residues_per_turn atom_offsets = _atom_offsets[self.helix_type] if self.handedness == 'l': handedness = -1 else: handedness = 1 atom_labels = ['N', 'CA', 'C', 'O'] if all([x in atom_offsets.keys() for x in atom_labels]): res_label = 'GLY' else: res_label = 'UNK' monomers = [] for i in range(self.num_monomers): residue = Residue(mol_code=res_label, ampal_parent=self) atoms_dict = OrderedDict() for atom_label in atom_labels: r, zeta, z_shift = atom_offsets[atom_label] rot_ang = ((i * ang_per_res) + zeta) * handedness z = (self.rise_per_residue * i) + z_shift coords = cylindrical_to_cartesian( radius=r, azimuth=rot_ang, z=z, radians=True) atom = Atom( coordinates=coords, element=atom_label[0], ampal_parent=residue, res_label=atom_label) atoms_dict[atom_label] = atom residue.atoms = atoms_dict monomers.append(residue) self._monomers = monomers self.relabel_monomers() self.relabel_atoms() return
Build straight helix along z-axis, starting with CA1 on x-axis
def from_start_and_end(cls, start, end, aa=None, major_pitch=225.8, major_radius=5.07, major_handedness='l', minor_helix_type='alpha', orientation=1, phi_c_alpha=0.0, minor_repeat=None): start = numpy.array(start) end = numpy.array(end) if aa is None: minor_rise_per_residue = _helix_parameters[minor_helix_type][1] aa = int((numpy.linalg.norm(end - start) / minor_rise_per_residue) + 1) instance = cls( aa=aa, major_pitch=major_pitch, major_radius=major_radius, major_handedness=major_handedness, minor_helix_type=minor_helix_type, orientation=orientation, phi_c_alpha=phi_c_alpha, minor_repeat=minor_repeat) instance.move_to(start=start, end=end) return instance
Creates a `HelicalHelix` between a `start` and `end` point.
def curve(self): return HelicalCurve.pitch_and_radius( self.major_pitch, self.major_radius, handedness=self.major_handedness)
Curve of the super helix.
def curve_primitive(self): curve = self.curve curve.axis_start = self.helix_start curve.axis_end = self.helix_end coords = curve.get_coords( n_points=(self.num_monomers + 1), spacing=self.minor_rise_per_residue) if self.orientation == -1: coords.reverse() return Primitive.from_coordinates(coords)
`Primitive` of the super-helical curve.
def major_rise_per_monomer(self): return numpy.cos(numpy.deg2rad(self.curve.alpha)) * self.minor_rise_per_residue
Rise along super-helical axis per monomer.
def minor_residues_per_turn(self, minor_repeat=None): if minor_repeat is None: minor_rpt = _helix_parameters[self.minor_helix_type][0] else: # precession angle in radians precession = self.curve.t_from_arc_length( minor_repeat * self.minor_rise_per_residue) if self.orientation == -1: precession = -precession if self.major_handedness != self.minor_handedness: precession = -precession minor_rpt = ((minor_repeat * numpy.pi * 2) / ((2 * numpy.pi) + precession)) return minor_rpt
Calculates the number of residues per turn of the minor helix. Parameters ---------- minor_repeat : float, optional Hydrophobic repeat of the minor helix. Returns ------- minor_rpt : float Residues per turn of the minor helix.
def get_orient_angle(self, reference_point=numpy.array([0, 0, 0]), monomer_index=0, res_label='CA', radians=False): if (monomer_index < len(self)) and monomer_index != -1: adjacent_index = monomer_index + 1 elif (monomer_index == len(self)) or monomer_index == -1: adjacent_index = monomer_index - 1 else: raise ValueError( "centred_index ({0}) cannot be greater than the " "length of the polymer ({1})".format( monomer_index, len(self))) angle = dihedral(reference_point, self.curve_primitive[monomer_index]['CA'], self.curve_primitive[adjacent_index]['CA'], self[monomer_index][res_label]) if radians: angle = numpy.deg2rad(angle) return angle
Angle between reference_point and self[monomer_index][res_label]. Notes ----- Angle is calculated using the dihedral angle, with the second and third points coming from the curve_primitive. Parameters ---------- reference_point : list, tuple or numpy.array of length 3. monomer_index : int Index of the Residue to centre. res_label : str Atom name for centred atom, e.g. "CA" or "OE1". radians : bool If True, then desired_angle is in radians instead of degrees.
def rotate_monomers(self, angle, radians=False): if radians: angle = numpy.rad2deg(angle) for i in range(len(self.primitive) - 1): axis = self.primitive[i + 1]['CA'] - self.primitive[i]['CA'] point = self.primitive[i]['CA']._vector self[i].rotate(angle=angle, axis=axis, point=point) return
Rotates each Residue in the Polypeptide. Notes ----- Each monomer is rotated about the axis formed between its corresponding primitive `PseudoAtom` and that of the subsequent `Monomer`. Parameters ---------- angle : float Angle by which to rotate each monomer. radians : bool Indicates whether angle is in radians or degrees.
def side_chain_centres(assembly, masses=False): if masses: elts = set([x.element for x in assembly.get_atoms()]) masses_dict = {e: element_data[e]['atomic mass'] for e in elts} pseudo_monomers = [] for chain in assembly: if isinstance(chain, Polypeptide): centres = OrderedDict() for r in chain.get_monomers(ligands=False): side_chain = r.side_chain if masses: masses_list = [masses_dict[x.element] for x in side_chain] else: masses_list = None if side_chain: centre = centre_of_mass(points=[x._vector for x in side_chain], masses=masses_list) # for Glycine residues. else: centre = r['CA']._vector centres[r.unique_id] = PseudoAtom(coordinates=centre, name=r.unique_id, ampal_parent=r) pseudo_monomers.append(PseudoMonomer(pseudo_atoms=centres, monomer_id=' ', ampal_parent=chain)) return PseudoGroup(monomers=pseudo_monomers, ampal_parent=assembly)
PseudoGroup containing side_chain centres of each Residue in each Polypeptide in Assembly. Notes ----- Each PseudoAtom is a side-chain centre. There is one PseudoMonomer per chain in ampal (each containing len(chain) PseudoAtoms). The PseudoGroup has len(ampal) PseudoMonomers. Parameters ---------- assembly : Assembly masses : bool If True, side-chain centres are centres of mass. If False, side-chain centres are centres of coordinates. Returns ------- PseudoGroup containing all side_chain centres, and with ampal_parent=assembly.
def cluster_helices(helices, cluster_distance=12.0): condensed_distance_matrix = [] for h1, h2 in itertools.combinations(helices, 2): md = minimal_distance_between_lines(h1[0]['CA']._vector, h1[-1]['CA']._vector, h2[0]['CA']._vector, h2[-1]['CA']._vector, segments=True) condensed_distance_matrix.append(md) z = linkage(condensed_distance_matrix, method='single') clusters = fcluster(z, t=cluster_distance, criterion='distance') cluster_dict = {} for h, k in zip(helices, clusters): if k not in cluster_dict: cluster_dict[k] = [h] else: cluster_dict[k].append(h) return cluster_dict
Clusters helices according to the minimum distance between the line segments representing their backbone. Notes ----- Each helix is represented as a line segement joining the CA of its first Residue to the CA if its final Residue. The minimal distance between pairwise line segments is calculated and stored in a condensed_distance_matrix. This is clustered using the 'single' linkage metric (all members of cluster i are at < cluster_distance away from at least one other member of cluster i). Helices belonging to the same cluster are grouped together as values of the returned cluster_dict. Parameters ---------- helices: Assembly cluster_distance: float Returns ------- cluster_dict: dict Keys: int cluster number Values: [Polymer]
def find_kihs(assembly, hole_size=4, cutoff=7.0): pseudo_group = side_chain_centres(assembly=assembly, masses=False) pairs = itertools.permutations(pseudo_group, 2) kihs = [] for pp_1, pp_2 in pairs: for r in pp_1: close_atoms = pp_2.is_within(cutoff, r) # kihs occur between residue and (hole_size) closest side-chains on adjacent polypeptide. if len(close_atoms) < hole_size: continue elif len(close_atoms) > hole_size: close_atoms = sorted(close_atoms, key=lambda x: distance(x, r))[:hole_size] kih = OrderedDict() kih['k'] = r for i, hole_atom in enumerate(close_atoms): kih['h{0}'.format(i)] = hole_atom knob_into_hole = KnobIntoHole(pseudo_atoms=kih) kihs.append(knob_into_hole) return kihs
KnobIntoHoles between residues of different chains in assembly. Notes ----- A KnobIntoHole is a found when the side-chain centre of a Residue a chain is close than (cutoff) Angstroms from at least (hole_size) side-chain centres of Residues of a different chain. Parameters ---------- assembly : Assembly hole_size : int Number of Residues required to form each hole. cutoff : float Maximum distance between the knob and each of the hole residues. Returns ------- kihs : [KnobIntoHole]
def find_contiguous_packing_segments(polypeptide, residues, max_dist=10.0): segments = Assembly(assembly_id=polypeptide.ampal_parent.id) residues_in_polypeptide = list(sorted(residues.intersection(set(polypeptide.get_monomers())), key=lambda x: int(x.id))) if not residues_in_polypeptide: return segments # residue_pots contains separate pots of residues divided according to their separation distance. residue_pots = [] pot = [residues_in_polypeptide[0]] for r1, r2 in zip(residues_in_polypeptide, residues_in_polypeptide[1:]): d = distance(r1['CA'], r2['CA']) if d <= max_dist: pot.append(r2) if sum([len(x) for x in residue_pots] + [len(pot)]) == len(residues_in_polypeptide): residue_pots.append(pot) else: residue_pots.append(pot) pot = [r2] for pot in residue_pots: segment = polypeptide.get_slice_from_res_id(pot[0].id, pot[-1].id) segment.ampal_parent = polypeptide.ampal_parent segments.append(segment) return segments
Assembly containing segments of polypeptide, divided according to separation of contiguous residues. Parameters ---------- polypeptide : Polypeptide residues : iterable containing Residues max_dist : float Separation beyond which splitting of Polymer occurs. Returns ------- segments : Assembly Each segment contains a subset of residues, each not separated by more than max_dist from the previous Residue.
def start_and_end_of_reference_axis(chains): coords = [numpy.array(chains[0].primitive.coordinates)] orient_vector = polypeptide_vector(chains[0]) # Append the coordinates for the remaining chains, reversing the direction in antiparallel arrangements. for i, c in enumerate(chains[1:]): if is_acute(polypeptide_vector(c), orient_vector): coords.append(numpy.array(c.primitive.coordinates)) else: coords.append(numpy.flipud(numpy.array(c.primitive.coordinates))) start = numpy.mean([x[0] for x in coords], axis=0) end = numpy.mean([x[-1] for x in coords], axis=0) return start, end
Get start and end coordinates that approximate the reference axis for a collection of chains (not necessarily all the same length). Parameters ---------- chains : [Polypeptide] Returns ------- start, end : numpy.array 3D start and end coordinates for defining the reference axis.
def gen_reference_primitive(polypeptide, start, end): prim = polypeptide.primitive q = find_foot(a=start, b=end, p=prim.coordinates[0]) ax = Axis(start=q, end=end) # flip axis if antiparallel to polypeptide_vector if not is_acute(polypeptide_vector(polypeptide), ax.unit_tangent): ax = Axis(start=end, end=q) arc_length = 0 points = [ax.start] for rise in prim.rise_per_residue()[:-1]: arc_length += rise t = ax.t_from_arc_length(arc_length=arc_length) point = ax.point(t) points.append(point) reference_primitive = Primitive.from_coordinates(points) return reference_primitive
Generates a reference Primitive for a Polypeptide given start and end coordinates. Notes ----- Uses the rise_per_residue of the Polypeptide primitive to define the separation of points on the line joining start and end. Parameters ---------- polypeptide : Polypeptide start : numpy.array 3D coordinates of reference axis start end : numpy.array 3D coordinates of reference axis end Returns ------- reference_primitive : Primitive
def tag_residues_with_heptad_register(helices): base_reg = 'abcdefg' start, end = start_and_end_of_reference_axis(helices) for h in helices: ref_axis = gen_reference_primitive(h, start=start, end=end) crangles = crick_angles(h, reference_axis=ref_axis, tag=False)[:-1] reg_fit = fit_heptad_register(crangles) exp_base = base_reg * (len(h) // 7 + 2) hep_pos = reg_fit[0][0] register_string = exp_base[hep_pos:hep_pos + len(h)] for i, register in enumerate(register_string): h[i].tags['register'] = register return
tags Residues in input helices with heptad register. (Helices not required to be the same length). Parameters ---------- helices : [Polypeptide] Returns ------- None
def knob_subgroup(self, cutoff=7.0): if cutoff > self.cutoff: raise ValueError("cutoff supplied ({0}) cannot be greater than self.cutoff ({1})".format(cutoff, self.cutoff)) return KnobGroup(monomers=[x for x in self.get_monomers() if x.max_kh_distance <= cutoff], ampal_parent=self.ampal_parent)
KnobGroup where all KnobsIntoHoles have max_kh_distance <= cutoff.
def graph(self): g = networkx.MultiDiGraph() edge_list = [(x.knob_helix, x.hole_helix, x.id, {'kih': x}) for x in self.get_monomers()] g.add_edges_from(edge_list) return g
Returns MultiDiGraph from kihs. Nodes are helices and edges are kihs.
def filter_graph(g, cutoff=7.0, min_kihs=2): edge_list = [e for e in g.edges(keys=True, data=True) if e[3]['kih'].max_kh_distance <= cutoff] if min_kihs > 0: c = Counter([(e[0], e[1]) for e in edge_list]) # list of nodes that share > min_kihs edges with at least one other node. node_list = set(list(itertools.chain.from_iterable([k for k, v in c.items() if v > min_kihs]))) edge_list = [e for e in edge_list if (e[0] in node_list) and (e[1] in node_list)] return networkx.MultiDiGraph(edge_list)
Get subgraph formed from edges that have max_kh_distance < cutoff. Parameters ---------- g : MultiDiGraph representing KIHs g is the output from graph_from_protein cutoff : float Socket cutoff in Angstroms. Default is 7.0. min_kihs : int Minimum number of KIHs shared between all pairs of connected nodes in the graph. Returns ------- networkx.MultiDigraph subgraph formed from edges that have max_kh_distance < cutoff.
def get_coiledcoil_region(self, cc_number=0, cutoff=7.0, min_kihs=2): g = self.filter_graph(self.graph, cutoff=cutoff, min_kihs=min_kihs) ccs = sorted(networkx.connected_component_subgraphs(g, copy=True), key=lambda x: len(x.nodes()), reverse=True) cc = ccs[cc_number] helices = [x for x in g.nodes() if x.number in cc.nodes()] assigned_regions = self.get_assigned_regions(helices=helices, include_alt_states=False, complementary_only=True) coiledcoil_monomers = [h.get_slice_from_res_id(*assigned_regions[h.number]) for h in helices] return Assembly(coiledcoil_monomers)
Assembly containing only assigned regions (i.e. regions with contiguous KnobsIntoHoles.
def daisy_chain_graph(self): g = networkx.DiGraph() for x in self.get_monomers(): for h in x.hole: g.add_edge(x.knob, h) return g
Directed graph with edges from knob residue to each hole residue for each KnobIntoHole in self.
def daisy_chains(self, kih, max_path_length=None): if max_path_length is None: max_path_length = len(self.ampal_parent) g = self.daisy_chain_graph paths = networkx.all_simple_paths(g, source=kih.knob, target=kih.knob, cutoff=max_path_length) return paths
Generator for daisy chains (complementary kihs) associated with a knob. Notes ----- Daisy chain graph is the directed graph with edges from knob residue to each hole residue for each KnobIntoHole in self. Given a KnobIntoHole, the daisy chains are non-trivial paths in this graph (walks along the directed edges) that begin and end at the knob. These paths must be of length <= max_path_length Parameters ---------- kih : KnobIntoHole interaction. max_path_length : int or None Maximum length of a daisy chain. Defaults to number of chains in self.ampal_parent. This is the maximum sensible value. Larger values than this will cause slow running of this function.
def knob_end(self): side_chain_atoms = self.knob_residue.side_chain if not side_chain_atoms: return self.knob_residue['CA'] distances = [distance(self.knob_residue['CB'], x) for x in side_chain_atoms] max_d = max(distances) knob_end_atoms = [atom for atom, d in zip(side_chain_atoms, distances) if d == max_d] if len(knob_end_atoms) == 1: return knob_end_atoms[0]._vector else: return numpy.mean([x._vector for x in knob_end_atoms], axis=0)
Coordinates of the end of the knob residue (atom in side-chain furthest from CB atom. Returns CA coordinates for GLY.
def packing_angle(self): try: knob_vector = self.knob_residue['CB'] - self.knob_residue['CA'] # exception for GLY residues (with no CB atom). except KeyError: return None hole_vector = self.hole_residues[2]['CA'] - self.hole_residues[1]['CA'] return angle_between_vectors(knob_vector, hole_vector)
Angle between CA-CB of knob and CA(h1)-CA(h2). Returns None if knob is GLY.
def max_knob_end_distance(self): return max([distance(self.knob_end, h) for h in self.hole])
Maximum distance between knob_end and each of the hole side-chain centres.
def base_install(): # scwrl scwrl = {} print('{BOLD}{HEADER}Generating configuration files for ISAMBARD.{END_C}\n' 'All required input can use tab completion for paths.\n' '{BOLD}Setting up SCWRL 4.0 (Recommended){END_C}'.format(**text_colours)) scwrl_path = get_user_path('Please provide a path to your SCWRL executable', required=False) scwrl['path'] = str(scwrl_path) pack_mode = get_user_option( 'Please choose your packing mode (flexible is significantly slower but is more accurate).', ['flexible', 'rigid']) if pack_mode == 'rigid': scwrl['rigid_rotamer_model'] = True else: scwrl['rigid_rotamer_model'] = False settings['scwrl'] = scwrl # dssp print('{BOLD}Setting up DSSP (Recommended){END_C}'.format(**text_colours)) dssp = {} dssp_path = get_user_path('Please provide a path to your DSSP executable.', required=False) dssp['path'] = str(dssp_path) settings['dssp'] = dssp # buff print('{BOLD}Setting up BUFF (Required){END_C}'.format(**text_colours)) buff = {} ffs = [] ff_dir = isambard_path / 'buff' / 'force_fields' for ff_file in os.listdir(str(ff_dir)): ff = pathlib.Path(ff_file) ffs.append(ff.stem) force_field_choice = get_user_option( 'Please choose the default BUFF force field, this can be modified during runtime.', ffs) buff['default_force_field'] = force_field_choice settings['buff'] = buff return
Generates configuration setting for required functionality of ISAMBARD.
def optional_install(): # reduce print('{BOLD}Setting up Reduce (optional){END_C}'.format(**text_colours)) reduce = {} reduce_path = get_user_path('Please provide a path to your reduce executable.', required=False) reduce['path'] = str(reduce_path) reduce['folder'] = str(reduce_path.parent) if reduce_path else '' settings['reduce'] = reduce # naccess print('{BOLD}Setting up naccess (optional){END_C}'.format(**text_colours)) naccess = {} naccess_path = get_user_path('Please provide a path to your naccess executable.', required=False) naccess['path'] = str(naccess_path) settings['naccess'] = naccess # profit print('{BOLD}Setting up ProFit (optional){END_C}'.format(**text_colours)) profit = {} profit_path = get_user_path('Please provide a path to your ProFit executable.', required=False) profit['path'] = str(profit_path) settings['profit'] = profit return
Generates configuration settings for optional functionality of ISAMBARD.
def pdb(self): pdb_str = write_pdb( [self], ' ' if not self.tags['chain_id'] else self.tags['chain_id']) return pdb_str
Generates a PDB string for the `PseudoMonomer`.
def from_coordinates(cls, coordinates): prim = cls() for coord in coordinates: pm = PseudoMonomer(ampal_parent=prim) pa = PseudoAtom(coord, ampal_parent=pm) pm.atoms = OrderedDict([('CA', pa)]) prim.append(pm) prim.relabel_all() return prim
Creates a `Primitive` from a list of coordinates.
def rise_per_residue(self): rprs = [distance(self[i]['CA'], self[i + 1]['CA']) for i in range(len(self) - 1)] rprs.append(None) return rprs
The rise per residue at each point on the Primitive. Notes ----- Each element of the returned list is the rise per residue, at a point on the Primitive. Element i is the distance between primitive[i] and primitive[i + 1]. The final value is None.
def radii_of_curvature(self): rocs = [] for i in range(len(self)): if 0 < i < len(self) - 1: rocs.append(radius_of_circumcircle( self[i - 1]['CA'], self[i]['CA'], self[i + 1]['CA'])) else: rocs.append(None) return rocs
The radius of curvature at each point on the Polymer primitive. Notes ----- Each element of the returned list is the radius of curvature, at a point on the Polymer primitive. Element i is the radius of the circumcircle formed from indices [i-1, i, i+1] of the primitve. The first and final values are None.
def sequence(self): seq = [x.mol_code for x in self._monomers] return ' '.join(seq)
Returns the sequence of the `Polynucleotide` as a string. Returns ------- sequence : str String of the monomer sequence of the `Polynucleotide`.
def run_dssp(pdb, path=True, outfile=None): if not path: if type(pdb) == str: pdb = pdb.encode() try: temp_pdb = tempfile.NamedTemporaryFile(delete=False) temp_pdb.write(pdb) temp_pdb.seek(0) dssp_out = subprocess.check_output( [global_settings['dssp']['path'], temp_pdb.name]) temp_pdb.close() finally: os.remove(temp_pdb.name) else: dssp_out = subprocess.check_output( [global_settings['dssp']['path'], pdb]) # Python 3 string formatting. dssp_out = dssp_out.decode() if outfile: with open(outfile, 'w') as outf: outf.write(dssp_out) return dssp_out
Uses DSSP to find helices and extracts helices from a pdb file or string. Parameters ---------- pdb : str Path to pdb file or string. path : bool, optional Indicates if pdb is a path or a string. outfile : str, optional Filepath for storing the dssp output. Returns ------- dssp_out : str Std out from DSSP.
def extract_solvent_accessibility_dssp(in_dssp, path=True): if path: with open(in_dssp, 'r') as inf: dssp_out = inf.read() else: dssp_out = in_dssp[:] dssp_residues = [] go = False for line in dssp_out.splitlines(): if go: try: res_num = int(line[5:10].strip()) chain = line[10:12].strip() residue = line[13] acc = int(line[35:38].strip()) # It is IMPORTANT that acc remains the final value of the # returned list, due to its usage in # isambard.ampal.base_ampal.tag_dssp_solvent_accessibility dssp_residues.append([res_num, chain, residue, acc]) except ValueError: pass else: if line[2] == '#': go = True pass return dssp_residues
Uses DSSP to extract solvent accessibilty information on every residue. Notes ----- For more information on the solvent accessibility metrics used in dssp, see: http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC In the dssp files value is labeled 'ACC'. Parameters ---------- in_dssp : str Path to DSSP file. path : bool Indicates if in_dssp is a path or a string. Returns ------- dssp_residues : list Each internal list contains: [0] int Residue number [1] str Chain identifier [2] str Residue type [3] int dssp solvent accessibilty
def extract_helices_dssp(in_pdb): from ampal.pdb_parser import split_pdb_lines dssp_out = subprocess.check_output( [global_settings['dssp']['path'], in_pdb]) helix = 0 helices = [] h_on = False for line in dssp_out.splitlines(): dssp_line = line.split() try: if dssp_line[4] == 'H': if helix not in [x[0] for x in helices]: helices.append( [helix, dssp_line[2], {int(dssp_line[1]): None}]) else: helices[helix][2][int(dssp_line[1])] = None h_on = True else: if h_on: helix += 1 h_on = False except IndexError: pass with open(in_pdb, 'r') as pdb: pdb_atoms = split_pdb_lines(pdb.read()) for atom in pdb_atoms: for helix in helices: if (atom[2] == "CA") and (atom[5] == helix[1]) and (atom[6] in helix[2].keys()): helix[2][atom[6]] = tuple(atom[8:11]) return helices
Uses DSSP to find alpha-helices and extracts helices from a pdb file. Returns a length 3 list with a helix id, the chain id and a dict containing the coordinates of each residues CA. Parameters ---------- in_pdb : string Path to a PDB file.
def find_ss_regions(dssp_residues): loops = [' ', 'B', 'S', 'T'] current_ele = None fragment = [] fragments = [] first = True for ele in dssp_residues: if first: first = False fragment.append(ele) elif current_ele in loops: if ele[1] in loops: fragment.append(ele) else: fragments.append(fragment) fragment = [ele] else: if ele[1] == current_ele: fragment.append(ele) else: fragments.append(fragment) fragment = [ele] current_ele = ele[1] return fragments
Separates parsed DSSP data into groups of secondary structure. Notes ----- Example: all residues in a single helix/loop/strand will be gathered into a list, then the next secondary structure element will be gathered into a separate list, and so on. Parameters ---------- dssp_residues : [list] Each internal list contains: [0] int Residue number [1] str Secondary structure type [2] str Chain identifier [3] str Residue type [4] float Phi torsion angle [5] float Psi torsion angle Returns ------- fragments : [[list]] Lists grouped in continuous regions of secondary structure. Innermost list has the same format as above.
def memory(): mem_info = {} if platform.linux_distribution()[0]: with open('/proc/meminfo') as file: c = 0 for line in file: lst = line.split() if str(lst[0]) == 'MemTotal:': mem_info['total'] = int(lst[1]) elif str(lst[0]) in ('MemFree:', 'Buffers:', 'Cached:'): c += int(lst[1]) mem_info['free'] = c mem_info['used'] = (mem_info['total']) - c elif platform.mac_ver()[0]: ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0] vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0] # Iterate processes process_lines = ps.split('\n') sep = re.compile('[\s]+') rss_total = 0 # kB for row in range(1, len(process_lines)): row_text = process_lines[row].strip() row_elements = sep.split(row_text) try: rss = float(row_elements[0]) * 1024 except: rss = 0 # ignore... rss_total += rss # Process vm_stat vm_lines = vm.split('\n') sep = re.compile(':[\s]+') vm_stats = {} for row in range(1, len(vm_lines) - 2): row_text = vm_lines[row].strip() row_elements = sep.split(row_text) vm_stats[(row_elements[0])] = int(row_elements[1].strip('\.')) * 4096 mem_info['total'] = rss_total mem_info['used'] = vm_stats["Pages active"] mem_info['free'] = vm_stats["Pages free"] else: raise('Unsupported Operating System.\n') exit(1) return mem_info
Determine the machine's memory specifications. Returns ------- mem_info : dictonary Holds the current values for the total, free and used memory of the system.
def get_chunk_size(N, n): mem_free = memory()['free'] if mem_free > 60000000: chunks_size = int(((mem_free - 10000000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 40000000: chunks_size = int(((mem_free - 7000000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 14000000: chunks_size = int(((mem_free - 2000000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 8000000: chunks_size = int(((mem_free - 1400000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 2000000: chunks_size = int(((mem_free - 900000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 1000000: chunks_size = int(((mem_free - 400000) * 1000) / (4 * n * N)) return chunks_size else: raise MemoryError("\nERROR: DBSCAN_multiplex @ get_chunk_size:\n" "this machine does not have enough free memory " "to perform the remaining computations.\n")
Given a dimension of size 'N', determine the number of rows or columns that can fit into memory. Parameters ---------- N : int The size of one of the dimension of a two-dimensional array. n : int The number of times an 'N' by 'chunks_size' array can fit in memory. Returns ------- chunks_size : int The size of a dimension orthogonal to the dimension of size 'N'.
def all_floating_ips(self): if self.api_version == 2: json = self.request('/floating_ips') return json['floating_ips'] else: raise DoError(v2_api_required_str)
Lists all of the Floating IPs available on the account.
def new_floating_ip(self, **kwargs): droplet_id = kwargs.get('droplet_id') region = kwargs.get('region') if self.api_version == 2: if droplet_id is not None and region is not None: raise DoError('Only one of droplet_id and region is required to create a Floating IP. ' \ 'Set one of the variables and try again.') elif droplet_id is None and region is None: raise DoError('droplet_id or region is required to create a Floating IP. ' \ 'Set one of the variables and try again.') else: if droplet_id is not None: params = {'droplet_id': droplet_id} else: params = {'region': region} json = self.request('/floating_ips', params=params, method='POST') return json['floating_ip'] else: raise DoError(v2_api_required_str)
Creates a Floating IP and assigns it to a Droplet or reserves it to a region.
def destroy_floating_ip(self, ip_addr): if self.api_version == 2: self.request('/floating_ips/' + ip_addr, method='DELETE') else: raise DoError(v2_api_required_str)
Deletes a Floating IP and removes it from the account.
def assign_floating_ip(self, ip_addr, droplet_id): if self.api_version == 2: params = {'type': 'assign','droplet_id': droplet_id} json = self.request('/floating_ips/' + ip_addr + '/actions', params=params, method='POST') return json['action'] else: raise DoError(v2_api_required_str)
Assigns a Floating IP to a Droplet.
def unassign_floating_ip(self, ip_addr): if self.api_version == 2: params = {'type': 'unassign'} json = self.request('/floating_ips/' + ip_addr + '/actions', params=params, method='POST') return json['action'] else: raise DoError(v2_api_required_str)
Unassign a Floating IP from a Droplet. The Floating IP will be reserved in the region but not assigned to a Droplet.
def list_floating_ip_actions(self, ip_addr): if self.api_version == 2: json = self.request('/floating_ips/' + ip_addr + '/actions') return json['actions'] else: raise DoError(v2_api_required_str)
Retrieve a list of all actions that have been executed on a Floating IP.
def get_floating_ip_action(self, ip_addr, action_id): if self.api_version == 2: json = self.request('/floating_ips/' + ip_addr + '/actions/' + action_id) return json['action'] else: raise DoError(v2_api_required_str)
Retrieve the status of a Floating IP action.
def raw_sign(message, secret): digest = hmac.new(secret, message, hashlib.sha256).digest() return base64.b64encode(digest)
Sign a message.
def http_signature(message, key_id, signature): template = ('Signature keyId="%(keyId)s",algorithm="hmac-sha256",' 'headers="%(headers)s",signature="%(signature)s"') headers = ['(request-target)', 'host', 'accept', 'date'] return template % { 'keyId': key_id, 'signature': signature, 'headers': ' '.join(headers), }
Return a tuple (message signature, HTTP header message signature).
def get_signature_from_signature_string(self, signature): match = self.SIGNATURE_RE.search(signature) if not match: return None return match.group(1)
Return the signature from the signature header or None.
def get_headers_from_signature(self, signature): match = self.SIGNATURE_HEADERS_RE.search(signature) if not match: return ['date'] headers_string = match.group(1) return headers_string.split()
Returns a list of headers fields to sign. According to http://tools.ietf.org/html/draft-cavage-http-signatures-03 section 2.1.3, the headers are optional. If not specified, the single value of "Date" must be used.
def header_canonical(self, header_name): # Translate as stated in the docs: # https://docs.djangoproject.com/en/1.6/ref/request-response/#django.http.HttpRequest.META header_name = header_name.lower() if header_name == 'content-type': return 'CONTENT-TYPE' elif header_name == 'content-length': return 'CONTENT-LENGTH' return 'HTTP_%s' % header_name.replace('-', '_').upper()
Translate HTTP headers to Django header names.
def build_dict_to_sign(self, request, signature_headers): d = {} for header in signature_headers: if header == '(request-target)': continue d[header] = request.META.get(self.header_canonical(header)) return d
Build a dict with headers and values used in the signature. "signature_headers" is a list of lowercase header names.
def build_signature(self, user_api_key, user_secret, request): path = request.get_full_path() sent_signature = request.META.get( self.header_canonical('Authorization')) signature_headers = self.get_headers_from_signature(sent_signature) unsigned = self.build_dict_to_sign(request, signature_headers) # Sign string and compare. signer = HeaderSigner( key_id=user_api_key, secret=user_secret, headers=signature_headers, algorithm=self.ALGORITHM) signed = signer.sign(unsigned, method=request.method, path=path) return signed['authorization']
Return the signature for the request.
def camel_to_snake_case(string): s = _1.sub(r'\1_\2', string) return _2.sub(r'\1_\2', s).lower()
Converts 'string' presented in camel case to snake case. e.g.: CamelCase => snake_case
def url_assembler(query_string, no_redirect=0, no_html=0, skip_disambig=0): params = [('q', query_string.encode("utf-8")), ('format', 'json')] if no_redirect: params.append(('no_redirect', 1)) if no_html: params.append(('no_html', 1)) if skip_disambig: params.append(('skip_disambig', 1)) return '/?' + urlencode(params)
Assembler of parameters for building request query. Args: query_string: Query to be passed to DuckDuckGo API. no_redirect: Skip HTTP redirects (for !bang commands). Default - False. no_html: Remove HTML from text, e.g. bold and italics. Default - False. skip_disambig: Skip disambiguation (D) Type. Default - False. Returns: A “percent-encoded” string which is used as a part of the query.
def _import(module, cls): global Scanner try: cls = str(cls) mod = __import__(str(module), globals(), locals(), [cls], 1) Scanner = getattr(mod, cls) except ImportError: pass
A messy way to import library-specific classes. TODO: I should really make a factory class or something, but I'm lazy. Plus, factories remind me a lot of java...
def create(type_dict, *type_parameters): assert len(type_parameters) == 1 klazz = TypeFactory.new(type_dict, *type_parameters[0]) assert isclass(klazz) assert issubclass(klazz, Object) return TypeMetaclass('%sList' % klazz.__name__, (ListContainer,), {'TYPE': klazz})
Construct a List containing type 'klazz'.
def load_file(filename): "Runs the given scent.py file." mod_name = '.'.join(os.path.basename(filename).split('.')[:-1]) mod_path = os.path.dirname(filename) if mod_name in sys.modules: del sys.modules[mod_name] if mod_path not in set(sys.modules.keys()): sys.path.insert(0, mod_path) return ScentModule(__import__(mod_name, g, g), filenamef load_file(filename): "Runs the given scent.py file." mod_name = '.'.join(os.path.basename(filename).split('.')[:-1]) mod_path = os.path.dirname(filename) if mod_name in sys.modules: del sys.modules[mod_name] if mod_path not in set(sys.modules.keys()): sys.path.insert(0, mod_path) return ScentModule(__import__(mod_name, g, g), filename)
Runs the given scent.py file.
def exec_from_dir(dirname=None, scent="scent.py"): if dirname is None: dirname = os.getcwd() files = os.listdir(dirname) if scent not in files: return None return load_file(os.path.join(dirname, scent))
Runs the scent.py file from the given directory (cwd if None given). Returns module if loaded a scent, None otherwise.
def new(type_dict, type_factory, *type_parameters): type_tuple = (type_factory,) + type_parameters if type_tuple not in type_dict: factory = TypeFactory.get_factory(type_factory) reified_type = factory.create(type_dict, *type_parameters) type_dict[type_tuple] = reified_type return type_dict[type_tuple]
Create a fully reified type from a type schema.
def load(type_tuple, into=None): type_dict = {} TypeFactory.new(type_dict, *type_tuple) deposit = into if (into is not None and isinstance(into, dict)) else {} for reified_type in type_dict.values(): deposit[reified_type.__name__] = reified_type return deposit
Determine all types touched by loading the type and deposit them into the particular namespace.
def load_json(json_list, into=None): def l2t(obj): if isinstance(obj, list): return tuple(l2t(L) for L in obj) elif isinstance(obj, dict): return frozendict(obj) else: return obj return TypeFactory.load(l2t(json_list), into=into)
Determine all types touched by loading the type and deposit them into the particular namespace.
def create(type_dict, *type_parameters): name, values = type_parameters assert isinstance(values, (list, tuple)) for value in values: assert isinstance(value, Compatibility.stringy) return TypeMetaclass(str(name), (EnumContainer,), { 'VALUES': values })
EnumFactory.create(*type_parameters) expects: enumeration name, (enumeration values)