response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Retrieve the database cross references for the sequence (PRIVATE). | def _retrieve_dbxrefs(adaptor, primary_id):
"""Retrieve the database cross references for the sequence (PRIVATE)."""
_dbxrefs = []
dbxrefs = adaptor.execute_and_fetchall(
"SELECT dbname, accession, version"
" FROM bioentry_dbxref join dbxref using (dbxref_id)"
" WHERE bioentry_id = %s"
' ORDER BY "rank"',
(primary_id,),
)
for dbname, accession, version in dbxrefs:
if version and version != "0":
v = f"{accession}.{version}"
else:
v = accession
_dbxrefs.append(f"{dbname}:{v}")
return _dbxrefs |
Load an existing BioSQL-style database.
This function is the easiest way to retrieve a connection to a
database, doing something like::
from BioSQL import BioSeqDatabase
server = BioSeqDatabase.open_database(user="root", db="minidb")
Arguments:
- driver - The name of the database driver to use for connecting. The
driver should implement the python DB API. By default, the MySQLdb
driver is used.
- user -the username to connect to the database with.
- password, passwd - the password to connect with
- host - the hostname of the database
- database or db - the name of the database | def open_database(driver="MySQLdb", **kwargs):
"""Load an existing BioSQL-style database.
This function is the easiest way to retrieve a connection to a
database, doing something like::
from BioSQL import BioSeqDatabase
server = BioSeqDatabase.open_database(user="root", db="minidb")
Arguments:
- driver - The name of the database driver to use for connecting. The
driver should implement the python DB API. By default, the MySQLdb
driver is used.
- user -the username to connect to the database with.
- password, passwd - the password to connect with
- host - the hostname of the database
- database or db - the name of the database
"""
if driver == "psycopg":
raise ValueError(
"Using BioSQL with psycopg (version one) is no "
"longer supported. Use psycopg2 instead."
)
if os.name == "java":
from com.ziclix.python.sql import zxJDBC
module = zxJDBC
if driver in ["MySQLdb"]:
jdbc_driver = "com.mysql.jdbc.Driver"
url_pref = "jdbc:mysql://" + kwargs["host"] + "/"
elif driver in ["psycopg2"]:
jdbc_driver = "org.postgresql.Driver"
url_pref = "jdbc:postgresql://" + kwargs["host"] + "/"
else:
module = __import__(driver, fromlist=["connect"])
connect = module.connect
# Different drivers use different keywords...
kw = kwargs.copy()
if driver in ["MySQLdb", "mysql.connector"] and os.name != "java":
if "database" in kw:
kw["db"] = kw["database"]
del kw["database"]
if "password" in kw:
kw["passwd"] = kw["password"]
del kw["password"]
# kw["charset"] = "utf8"
# kw["use_unicode"] = True
else:
# DB-API recommendations
if "db" in kw:
kw["database"] = kw["db"]
del kw["db"]
if "passwd" in kw:
kw["password"] = kw["passwd"]
del kw["passwd"]
if driver in ["psycopg2", "pgdb"] and not kw.get("database"):
kw["database"] = "template1"
# SQLite connect takes the database name as input
if os.name == "java":
if driver in ["MySQLdb"]:
conn = connect(
url_pref + kw.get("database", "mysql"),
kw["user"],
kw["password"],
jdbc_driver,
)
elif driver in ["psycopg2"]:
conn = connect(
url_pref + kw.get("database", "postgresql") + "?stringtype=unspecified",
kw["user"],
kw["password"],
jdbc_driver,
)
elif driver in ["sqlite3"]:
conn = connect(kw["database"])
else:
conn = connect(**kw)
if os.name == "java":
server = DBServer(conn, module, driver)
else:
server = DBServer(conn, module)
# Sets MySQL to allow double quotes, rather than only backticks
if driver in ["MySQLdb", "mysql.connector"]:
server.adaptor.execute("SET sql_mode='ANSI_QUOTES';")
# TODO - Remove the following once BioSQL Bug 2839 is fixed.
# Test for RULES in PostgreSQL schema, see also Bug 2833.
if driver in ["psycopg2", "pgdb"]:
sql = (
"SELECT ev_class FROM pg_rewrite WHERE "
"rulename='rule_bioentry_i1' OR "
"rulename='rule_bioentry_i2';"
)
if server.adaptor.execute_and_fetchall(sql):
import warnings
from Bio import BiopythonWarning
warnings.warn(
"Your BioSQL PostgreSQL schema includes some rules "
"currently required for bioperl-db but which may"
"cause problems loading data using Biopython (see "
"BioSQL's RedMine Bug 2839 aka GitHub Issue 4 "
"https://github.com/biosql/biosql/issues/4). "
"If you do not use BioPerl, please remove these "
"rules. Biopython should cope with the rules "
"present, but with a performance penalty when "
"loading new records.",
BiopythonWarning,
)
global _POSTGRES_RULES_PRESENT
_POSTGRES_RULES_PRESENT = True
elif driver == "sqlite3":
# Tell SQLite that we want to use foreign keys
# https://www.sqlite.org/foreignkeys.html#fk_enable
server.adaptor.execute("PRAGMA foreign_keys = ON")
return server |
Return the correct dbutils object for the database driver. | def get_dbutils(module_name):
"""Return the correct dbutils object for the database driver."""
try:
return _dbutils[module_name]()
except KeyError:
return Generic_dbutils() |
Perform LaTeX magic to insert author line breaks. | def _insert_latex_author_line_breaks(author):
"""Perform LaTeX magic to insert author line breaks."""
names = [x.strip() + "," for x in author.split(",")]
for i in range(2, len(names) - 1, 3):
# After every three names insert legacy LaTeX \and
# for a line break in the authors
names[i] += r" \and"
return (" ".join(names))[:-1] |
Insert file specific :github_url: metadata for theme breadcrumbs.
See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html#confval-github_url
We use this for the API documentation pages which are generated from
our source code, rather than native RST files in the repository. | def insert_github_link(filename):
"""Insert file specific :github_url: metadata for theme breadcrumbs.
See https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html#confval-github_url
We use this for the API documentation pages which are generated from
our source code, rather than native RST files in the repository.
"""
assert filename.startswith("api/Bio") and filename.endswith(".rst")
with open(filename) as handle:
text = handle.read()
if ":github_url:" in text:
return
source = filename[4:-4].replace(".", "/") + "/__init__.py"
# C is the rarest case, but doing it last would give more confusing error message
# Right now it doesn't work anyway...
# Handler <function update_defvalue at 0x...> for event 'autodoc-before-process-signature' threw an exception
# if not os.path.isfile(os.path.join("../", source)):
# source = filename[4:-4].replace(".", "/") + ".c"
if not os.path.isfile(os.path.join("../", source)):
source = filename[4:-4].replace(".", "/") + ".py"
if not os.path.isfile(os.path.join("../", source)):
sys.stderr.write(
"WARNING: Could not map %s to a Python file, e.g. %s\n" % (filename, source)
)
return
text = ":github_url: https://github.com/%s/%s/blob/%s/%s\n\n%s" % (
html_context["github_user"],
html_context["github_repo"],
html_context["github_version"],
source,
text,
)
with open(filename, "w") as handle:
handle.write(text) |
Call sphinx-apidoc on Bio and BioSQL modules. | def run_apidoc(_):
"""Call sphinx-apidoc on Bio and BioSQL modules."""
from sphinx.ext.apidoc import main as apidoc_main
cur_dir = os.path.abspath(os.path.dirname(__file__))
# Can't see a better way than running apidoc twice, for Bio & BioSQL
# We don't care about the index.rst / conf.py (we have our own)
# or the Makefile / make.bat (effectively same) clashing,
# $ sphinx-apidoc -e -F -o /tmp/api/BioSQL BioSQL
# $ sphinx-apidoc -e -F -o /tmp/api/Bio Bio
tmp_path = tempfile.mkdtemp()
apidoc_main(["-e", "-F", "-o", tmp_path, "../BioSQL"])
apidoc_main(
[
"-e",
"-F",
"-o",
tmp_path,
# The input path:
"../Bio",
# These are patterns to exclude:
"../Bio/Alphabet/",
"../Bio/Restriction/Restriction.py",
]
)
os.remove(os.path.join(tmp_path, "index.rst")) # Using our own
for filename in os.listdir(tmp_path):
if filename.endswith(".rst"):
# Use to just move the files, but sometimes see dubplicated TOC
# entries from our non-private C extensions... so remove any:
with open(os.path.join(tmp_path, filename)) as in_h:
with open(os.path.join(cur_dir, "api", filename), "w") as out_h:
prev = ""
for line in in_h:
if line != prev:
out_h.write(line)
prev = line
# shutil.move(
# os.path.join(tmp_path, filename), os.path.join(cur_dir, "api", filename)
# )
shutil.rmtree(tmp_path)
for f in os.listdir(os.path.join(cur_dir, "api")):
if f.startswith("Bio") and f.endswith(".rst"):
insert_github_link("api/" + f) |
Over-ride Sphinx setup to trigger sphinx-apidoc. | def setup(app):
"""Over-ride Sphinx setup to trigger sphinx-apidoc."""
app.connect("builder-inited", run_apidoc)
app.add_css_file("biopython.css")
def add_documenter(app, env, docnames):
app.add_autodocumenter(BioPythonAPI, True)
# Over-ride autodoc documenter
app.connect("env-before-read-docs", add_documenter) |
Extract accession number from sequence id. | def get_accession_num(seq_record):
"""Extract accession number from sequence id."""
accession_atoms = seq_record.id.split("|")
gb_name = accession_atoms[3]
# strip the version info before returning
return gb_name[:-2] |
Extract species names from sequence description line. | def extract_organisms(file_to_parse, fmt):
"""Extract species names from sequence description line."""
all_species = set()
for cur_record in SeqIO.parse(open(file_to_parse), fmt):
# extract the info from the description
new_species = cur_record.description.split()[1]
all_species.add(new_species)
# sorting the species will convert the set to a list
all_species = sorted(all_species)
return all_species |
Search list of SeqFeature objects for an identifier under the given tags. | def get_feature(features, id, tags=("locus_tag", "gene", "old_locus_tag")):
"""Search list of SeqFeature objects for an identifier under the given tags."""
for f in features:
for key in tags:
# tag may not be present in this feature
for x in f.qualifiers.get(key, []):
if x == id:
return f
raise KeyError(id) |
Print a help message. | def print_usage():
"""Print a help message."""
print(
"""query_pubmed.py [-h] [-c] [-d delay] query
This script sends a query to PubMed (via the NCBI Entrez webservice*)
and prints the MEDLINE formatted results to the screen.
Arguments:
-h Print out this help message.
-c Count the hits, and don't print them out.
* http://www.ncbi.nlm.nih.gov/Entrez/
"""
) |
Print a help message. | def usage():
"""Print a help message."""
print(
"""Extract a SCOP domain's ATOM and HETATOM records from the relevant PDB file.
For example:
scop_pdb.py astral-rapid-access-1.55.raf dir.cla.scop.txt_1.55 d3hbib_
A result file, d3hbib_.ent, will be generated in the working directory.
The required RAF file can be found at [http://astral.stanford.edu/raf.html],
and the SCOP CLA file at [http://scop.berkeley.edu/parse/index.html].
Note: Errors will occur if the PDB file has been altered since the creation
of the SCOP CLA and ASTRAL RAF files.
Usage: scop_pdb [-h] [-i file] [-o file] [-p pdb_url_prefix]
raf_url cla_url [sid] [sid] [sid] ...
-h -- Print this help message.
-i file -- Input file name. Each line should start with an sid (Scop domain
identifier). Blank lines, and lines starting with '#' are
ignored. If file is '-' then data is read from stdin. If not
given then sids are taken from the command line.
-o file -- Output file name. If '-' then data is written to stdout. If not
given then data is written to files named sid+'.ent'.
-p pdb_url-- A URL for PDB files. The token '%s' will be replaced with the
4 character PDB ID. If the pdb_url is not given then the latest
PDB file is retrieved directly from rcsb.org.
raf_url -- The URL or filename of an ASTRAL Rapid Access File sequence map.
See [http://astral.stanford.edu/raf.html]
cla_url -- The URL or filename of a SCOP parsable CLA file.
See [http://scop.berkeley.edu/parse/index.html]
sid -- A SCOP domain identifier. e.g. d3hbib_
"""
) |
Make a local copy of an online pdb file and return a file handle. | def open_pdb(pdbid, pdb_url=None):
"""Make a local copy of an online pdb file and return a file handle."""
if pdb_url is None:
pdb_url = default_pdb_url
url = pdb_url % pdbid
fn, header = _urlretrieve(url)
return open(fn) |
Extract a SCOP domain's ATOM and HETATOM records from a PDB file. | def main():
"""Extract a SCOP domain's ATOM and HETATOM records from a PDB file."""
try:
opts, args = getopt.getopt(
sys.argv[1:], "hp:o:i:", ["help", "usage", "pdb=", "output=", "input="]
)
except getopt.GetoptError:
# show help information and exit:
usage()
sys.exit(2)
input = None
in_handle = None
output = None
pdb_url = None
cla_url = None
raf_url = None
for o, a in opts:
if o in ("-h", "--help", "--usage"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
elif o in ("-i", "--input"):
input = a
elif o in ("-p", "--pdb"):
pdb_url = a
if len(args) < 2:
sys.stderr.write("Not enough arguments. Try --help for more details.\n")
sys.exit(2)
raf_url = args[0]
cla_url = args[1]
(raf_filename, headers) = _urlretrieve(raf_url)
seqMapIndex = Raf.SeqMapIndex(raf_filename)
(cla_filename, headers) = _urlretrieve(cla_url)
claIndex = Cla.Index(cla_filename)
if input is None:
sids = args[2:]
elif input == "-":
sids = sys.stdin
else:
in_handle = open(input)
sids = in_handle
try:
for sid in sids:
if not sid or sid[0:1] == "#":
continue
id = sid[0:7]
pdbid = id[1:5]
s = pdbid[0:1]
if s == "0" or s == "s":
sys.stderr.write(f"No coordinates for domain {id}\n")
continue
if output is None:
filename = id + ".ent"
out_handle = open(filename, "w+")
elif output == "-":
out_handle = sys.stdout
else:
out_handle = open(output, "w+")
try:
try:
claRec = claIndex[id]
residues = claRec.residues
seqMap = seqMapIndex.getSeqMap(residues)
pdbid = residues.pdbid
f = open_pdb(pdbid, pdb_url)
try:
seqMap.getAtoms(f, out_handle)
finally:
f.close()
except (OSError, KeyError, RuntimeError) as e:
sys.stderr.write(f"I cannot do SCOP domain {id} : {e}\n")
finally:
out_handle.close()
finally:
if in_handle is not None:
in_handle.close() |
Return a wrapped line if length is larger max_len.
The new parameter 'string' allows to wrap quoted text which is delimited
by single quotes. It adds a closing quote to the end of the line and an
opening quote to the start of the next line. | def line_wrap(text, indent=0, max_len=78, string=False):
"""Return a wrapped line if length is larger max_len.
The new parameter 'string' allows to wrap quoted text which is delimited
by single quotes. It adds a closing quote to the end of the line and an
opening quote to the start of the next line.
"""
split_len = max_len if not string else max_len - 2
if len(text) <= max_len:
return text
line = text[:split_len]
assert " " in line, line
line, rest = line.rsplit(" ", 1)
# New:
if string:
line += ' "'
rest = '"' + rest
rest = " " * indent + rest + text[split_len:]
assert len(line) < max_len
if indent + len(rest) <= max_len:
return line + "\n" + rest
else:
return line + "\n" + line_wrap(rest, indent, max_len, string) |
Compare two records to see if they are the same.
This compares the two GenBank record, and will raise an AssertionError
if two lines do not match, showing the non-matching lines. | def do_comparison(good_record, test_record):
"""Compare two records to see if they are the same.
This compares the two GenBank record, and will raise an AssertionError
if two lines do not match, showing the non-matching lines.
"""
good_handle = StringIO(good_record)
test_handle = StringIO(test_record)
while True:
good_line = good_handle.readline()
test_line = test_handle.readline()
if not good_line and not test_line:
break
if not good_line:
if good_line.strip():
raise AssertionError(f"Extra info in Test: `{test_line}`")
if not test_line:
if test_line.strip():
raise AssertionError(f"Extra info in Expected: `{good_line}`")
assert test_line == good_line, (
"Expected does not match Test.\n"
f"Expect:`{good_line}`\nTest :`{test_line}`\n"
) |
Write a GenBank record from a Genbank file and compare them. | def write_format(file):
"""Write a GenBank record from a Genbank file and compare them."""
record_parser = GenBank.RecordParser(debug_level=2)
print("Testing GenBank writing for %s..." % os.path.basename(file))
# be able to handle gzipped files
if ".gz" in file:
cur_handle = gzip.open(file, "rb")
compare_handle = gzip.open(file, "rb")
else:
cur_handle = open(file)
compare_handle = open(file)
iterator = GenBank.Iterator(cur_handle, record_parser)
compare_iterator = GenBank.Iterator(compare_handle)
while True:
cur_record = next(iterator)
compare_record = next(compare_iterator)
if cur_record is None or compare_record is None:
break
# print("\tTesting for %s" % cur_record.version)
output_record = str(cur_record) + "\n"
try:
do_comparison(compare_record, output_record)
except AssertionError as msg:
print(f"\tTesting for {cur_record.version}")
print(msg)
cur_handle.close()
compare_handle.close() |
Parse ENZYME records.
This function is for parsing ENZYME files containing multiple
records.
Arguments:
- handle - handle to the file. | def parse_enzyme_records(handle):
"""Parse ENZYME records.
This function is for parsing ENZYME files containing multiple
records.
Arguments:
- handle - handle to the file.
"""
while True:
record = read_enzyme_record(handle)
if not record:
break
yield record |
Read a single Enzyme record.
Enzyme record read format is adapted from Bio.ExPASy.Enzyme, but must be
able to read an accession field that is not used by Bio.ExPASy.Enzyme. | def read_enzyme_record(handle):
"""Read a single Enzyme record.
Enzyme record read format is adapted from Bio.ExPASy.Enzyme, but must be
able to read an accession field that is not used by Bio.ExPASy.Enzyme.
"""
record = None
for line in handle:
key, value = line[:2], line[5:].rstrip()
if key == "ID":
record = {"ID": value}
elif key == "AC":
record["AC"] = value
elif key == "//":
if record:
return record
else: # This was the copyright notice
continue
if record:
raise ValueError("Unexpected end of stream") |
Load enzyme identifiers from bairoch-format file. | def load_enzyme_ids(file) -> dict[str, int]:
"""Load enzyme identifiers from bairoch-format file."""
with open(file) as in_file:
return {
record["ID"]: int(record["AC"].removeprefix("RB").removesuffix(";"))
for record in parse_enzyme_records(in_file)
} |
Return string representation of value, preferring double quotes.
Used to produce Python code with double quotes.
Only special cases strings, tuples and lists so far. | def double_quote_repr(value): # TODO similar not to produce long horizontal lists
"""Return string representation of value, preferring double quotes.
Used to produce Python code with double quotes.
Only special cases strings, tuples and lists so far.
"""
if isinstance(value, str):
if '"' not in value:
return f'"{value}"'
elif isinstance(value, tuple):
if len(value) == 1:
# Need trailing comma
return "(%s,)" % double_quote_repr(list(value)[0])
else:
return "(%s)" % ", ".join(double_quote_repr(_) for _ in value)
elif isinstance(value, list):
return "[%s]" % ", ".join(double_quote_repr(_) for _ in value)
return repr(value) |
Construct a regular expression (string) from a DNA sequence.
Examples
--------
>>> regex('ABCGN')
'A[CGT]CG.' | def regex(site):
"""Construct a regular expression (string) from a DNA sequence.
Examples
--------
>>> regex('ABCGN')
'A[CGT]CG.'
"""
reg_ex = str(site)
for base in reg_ex:
if base in ("A", "T", "C", "G", "a", "c", "g", "t"):
pass
if base in ("N", "n"):
reg_ex = ".".join(reg_ex.split("N"))
reg_ex = ".".join(reg_ex.split("n"))
if base in ("R", "Y", "W", "M", "S", "K", "H", "D", "B", "V"):
expand = "[" + amb_dna[base.upper()] + "]"
reg_ex = expand.join(reg_ex.split(base))
return reg_ex |
Check whether the sequence is a palindrome or not. | def is_palindrome(sequence):
"""Check whether the sequence is a palindrome or not."""
return sequence == sequence.reverse_complement() |
Set up for running as main. | def standalone():
"""Set up for running as main."""
parser = optparse.OptionParser()
add = parser.add_option
add(
"-i",
"--install",
action="store_true",
dest="i",
default=False,
help="compile and install the newly created file. "
"default behaviour (without switch): "
"Compile the enzymes and store them in the Updates folder",
)
options, args = parser.parse_args()
return options, args |
Download Rebase and LinkOut files. | def get_files():
"""Download Rebase and LinkOut files."""
print(f"Preparing to download {len(files)} files")
for file in files:
print(f"copying {file}")
fn = os.path.basename(file)
filename = os.path.join(os.getcwd(), fn)
print(f"to {filename}")
try:
urlretrieve(file, filename)
urlcleanup()
except OSError as e:
print(e)
print(
"Download of Rebase files failed. Please download the files "
f'"emboss_e.{release_number}", "emboss_s.{release_number}", "emboss_r.{release_number}", and "bairoch.{release_number}" manually '
"from: ftp://ftp.neb.com/pub/rebase."
)
return |
Clear the output window. | def clear_output():
"""Clear the output window."""
input_text.delete(1.0, tk.END)
output_text.delete(1.0, tk.END) |
Do the selected operation. | def apply_operation():
"""Do the selected operation."""
codon_table = codon_list.get(codon_list.curselection())
print(f"Code: {codon_table}")
seq = "".join(input_text.get(1.0, tk.END).split())
print(f"Input sequence: {seq}")
operation = transform_var.get()
print(f"Operation: {operation}")
if operation == "transcribe":
result = transcribe(seq)
elif operation == "translate":
result = translate(seq, table=codon_table, to_stop=True)
elif operation == "back transcribe":
result = back_transcribe(seq)
else:
result = ""
output_text.delete(1.0, tk.END)
output_text.insert(tk.END, result)
print(f"Result: {result}") |
Show statusbar comments from menu selection. | def set_statusbar(event):
"""Show statusbar comments from menu selection."""
index = main_window.call(event.widget, "index", "active")
if index == 0:
statustext.set("More information about this program")
elif index == 2:
statustext.set("Terminate the program")
else:
statustext.set("This is the statusbar") |
Load the database settings from INI file. | def load_biosql_ini(DBTYPE):
"""Load the database settings from INI file."""
if not os.path.isfile("biosql.ini"):
raise MissingExternalDependencyError(
"BioSQL test configuration file biosql.ini missing (see biosql.ini.sample)"
)
config = configparser.ConfigParser()
config.read("biosql.ini")
DBHOST = config.get(DBTYPE, "dbhost")
DBUSER = config.get(DBTYPE, "dbuser")
DBPASSWD = config.get(DBTYPE, "dbpasswd")
TESTDB = config.get(DBTYPE, "testdb")
return DBHOST, DBUSER, DBPASSWD, TESTDB |
Generate a temporary filename for SQLite database. | def temp_db_filename():
"""Generate a temporary filename for SQLite database."""
# In memory SQLite does not work with current test structure since the tests
# expect databases to be retained between individual tests.
# TESTDB = ':memory:'
# Instead, we use (if we can) /dev/shm
try:
h, test_db_fname = tempfile.mkstemp("_BioSQL.db", dir="/dev/shm")
except OSError:
# We can't use /dev/shm
h, test_db_fname = tempfile.mkstemp("_BioSQL.db")
os.close(h)
return test_db_fname |
Verify the database settings work for connecting. | def check_config(dbdriver, dbtype, dbhost, dbuser, dbpasswd, testdb):
"""Verify the database settings work for connecting."""
global DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB, DBSCHEMA
global SYSTEM, SQL_FILE
DBDRIVER = dbdriver
DBTYPE = dbtype
DBHOST = dbhost
DBUSER = dbuser
DBPASSWD = dbpasswd
TESTDB = testdb
if not DBDRIVER or not DBTYPE or not DBUSER:
# No point going any further...
raise MissingExternalDependencyError("Incomplete BioSQL test settings")
# Check the database driver is installed:
if SYSTEM == "Java":
try:
if DBDRIVER in ["MySQLdb"]:
import com.mysql.jdbc.Driver # noqa: F401
elif DBDRIVER in ["psycopg2", "pgdb"]:
import org.postgresql.Driver # noqa: F401
except ImportError:
message = f"Install the JDBC driver for {DBTYPE} to use BioSQL "
raise MissingExternalDependencyError(message) from None
else:
try:
__import__(DBDRIVER)
except ImportError:
if DBDRIVER in ["MySQLdb"]:
message = (
"Install MySQLdb or mysqlclient if you want to use %s with BioSQL "
% (DBTYPE)
)
else:
message = f"Install {DBDRIVER} if you want to use {DBTYPE} with BioSQL "
raise MissingExternalDependencyError(message) from None
try:
if DBDRIVER in ["sqlite3"]:
server = BioSeqDatabase.open_database(driver=DBDRIVER, db=TESTDB)
else:
server = BioSeqDatabase.open_database(
driver=DBDRIVER, host=DBHOST, user=DBUSER, passwd=DBPASSWD
)
server.close()
del server
except Exception as e:
message = f"Connection failed, check settings if you plan to use BioSQL: {e}"
raise MissingExternalDependencyError(message) from None
DBSCHEMA = "biosqldb-" + DBTYPE + ".sql"
SQL_FILE = os.path.join(os.getcwd(), "BioSQL", DBSCHEMA)
if not os.path.isfile(SQL_FILE):
message = f"Missing SQL schema file: {SQL_FILE}"
raise MissingExternalDependencyError(message) |
Cleanup everything from TESTDB.
Relevant for MySQL and PostgreSQL. | def _do_db_cleanup():
"""Cleanup everything from TESTDB.
Relevant for MySQL and PostgreSQL.
"""
if DBDRIVER in ["psycopg2", "pgdb"]:
# first open a connection the database
# notice that postgres doesn't have createdb privileges, so
# the TESTDB must exist
server = BioSeqDatabase.open_database(
driver=DBDRIVER, host=DBHOST, user=DBUSER, passwd=DBPASSWD, db=TESTDB
)
# The pgdb postgres driver does not support autocommit, so here we
# commit the current transaction so that 'drop database' query will
# be outside a transaction block
server.adaptor.cursor.execute("COMMIT")
# drop anything in the database
# with Postgres, can get errors about database still being used.
# Wait briefly to be sure previous tests are done with it.
time.sleep(1)
# drop anything in the database
sql = r"DROP OWNED BY " + DBUSER
server.adaptor.cursor.execute(sql, ())
server.close()
else:
# first open a connection to create the database
server = BioSeqDatabase.open_database(
driver=DBDRIVER, host=DBHOST, user=DBUSER, passwd=DBPASSWD
)
# Auto-commit
try:
server.adaptor.autocommit()
except AttributeError:
pass
# drop the database
try:
sql = r"DROP DATABASE " + TESTDB
server.adaptor.cursor.execute(sql, ())
except (
server.module.OperationalError,
server.module.Error,
server.module.DatabaseError,
): # the database doesn't exist
pass
except (
server.module.IntegrityError,
server.module.ProgrammingError,
) as e: # ditto--perhaps
if str(e).find(f'database "{TESTDB}" does not exist') == -1:
server.close()
raise
# create a new database
sql = r"CREATE DATABASE " + TESTDB
server.adaptor.execute(sql, ())
server.close() |
Delete any existing BioSQL test DB, then (re)create an empty BioSQL DB.
Returns TESTDB name which will change for for SQLite. | def create_database():
"""Delete any existing BioSQL test DB, then (re)create an empty BioSQL DB.
Returns TESTDB name which will change for for SQLite.
"""
if DBDRIVER in ["sqlite3"]:
global TESTDB
if os.path.exists(TESTDB):
try:
os.remove(TESTDB)
except Exception:
time.sleep(1)
try:
os.remove(TESTDB)
except Exception:
# Seen this with PyPy 2.1 (and older) on Windows -
# which suggests an open handle still exists?
print(f"Could not remove {TESTDB!r}")
# Now pick a new filename - just in case there is a stale handle
# (which might be happening under Windows...)
TESTDB = temp_db_filename()
else:
_do_db_cleanup()
# now open a connection to load the database
server = BioSeqDatabase.open_database(
driver=DBDRIVER, user=DBUSER, passwd=DBPASSWD, host=DBHOST, db=TESTDB
)
try:
server.load_database_sql(SQL_FILE)
server.commit()
server.close()
except Exception:
# Failed, but must close the handle...
server.close()
raise
return TESTDB |
Delete any temporary BioSQL sqlite3 database files. | def destroy_database():
"""Delete any temporary BioSQL sqlite3 database files."""
if DBDRIVER in ["sqlite3"]:
if os.path.exists(TESTDB):
os.remove(TESTDB) |
Load a GenBank file into a new BioSQL database.
This is useful for running tests against a newly created database. | def load_database(gb_filename_or_handle):
"""Load a GenBank file into a new BioSQL database.
This is useful for running tests against a newly created database.
"""
TESTDB = create_database()
# now open a connection to load the database
db_name = "biosql-test"
server = BioSeqDatabase.open_database(
driver=DBDRIVER, user=DBUSER, passwd=DBPASSWD, host=DBHOST, db=TESTDB
)
db = server.new_database(db_name)
# get the GenBank file we are going to put into it
iterator = SeqIO.parse(gb_filename_or_handle, "gb")
records = []
for record in iterator:
if record.annotations.get("molecule_type") == "mRNA":
record.annotations["molecule_type"] = "DNA"
records.append(record)
# finally put it in the database
count = db.load(records)
server.commit()
server.close()
return count |
Load two GenBank files into a new BioSQL database as different subdatabases.
This is useful for running tests against a newly created database. | def load_multi_database(gb_filename_or_handle, gb_filename_or_handle2):
"""Load two GenBank files into a new BioSQL database as different subdatabases.
This is useful for running tests against a newly created database.
"""
TESTDB = create_database()
# now open a connection to load the database
db_name = "biosql-test"
db_name2 = "biosql-test2"
server = BioSeqDatabase.open_database(
driver=DBDRIVER, user=DBUSER, passwd=DBPASSWD, host=DBHOST, db=TESTDB
)
db = server.new_database(db_name)
# get the GenBank file we are going to put into it
iterator = SeqIO.parse(gb_filename_or_handle, "gb")
count = db.load(iterator)
db = server.new_database(db_name2)
# get the GenBank file we are going to put into it
iterator = SeqIO.parse(gb_filename_or_handle2, "gb")
# finally put it in the database
count2 = db.load(iterator)
server.commit()
server.close()
return count + count2 |
Make sure we can access the DB settings from this file. | def share_config(dbdriver, dbtype, dbhost, dbuser, dbpasswd, testdb):
"""Make sure we can access the DB settings from this file."""
global DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB, DBSCHEMA
global SYSTEM, SQL_FILE
DBDRIVER = dbdriver
DBTYPE = dbtype
DBHOST = dbhost
DBUSER = dbuser
DBPASSWD = dbpasswd
TESTDB = testdb |
Run tests, return number of failures (integer). | def main(argv):
"""Run tests, return number of failures (integer)."""
# Using "export LANG=C" (which should work on Linux and similar) can
# avoid problems detecting optional command line tools on
# non-English OS (we may want 'command not found' in English).
# HOWEVER, we do not want to change the default encoding which is
# rather important on Python 3 with unicode.
# lang = os.environ['LANG']
# get the command line options
try:
opts, args = getopt.getopt(
argv, "gv", ["generate", "verbose", "doctest", "help", "offline"]
)
except getopt.error as msg:
print(msg)
print(__doc__)
return 2
verbosity = VERBOSITY
# deal with the options
for opt, _ in opts:
if opt == "--help":
print(__doc__)
return 0
if opt == "--offline":
print("Skipping any tests requiring internet access")
EXCLUDE_DOCTEST_MODULES.extend(ONLINE_DOCTEST_MODULES)
# This is a bit of a hack...
import requires_internet
requires_internet.check.available = False
# Monkey patch for urlopen()
import urllib.request
def dummy_urlopen(url):
raise RuntimeError(
"Internal test suite error, attempting to use internet despite --offline setting"
)
urllib.request.urlopen = dummy_urlopen
if opt == "-v" or opt == "--verbose":
verbosity = 2
# deal with the arguments, which should be names of tests to run
for arg_num in range(len(args)):
# strip off the .py if it was included
if args[arg_num][-3:] == ".py":
args[arg_num] = args[arg_num][:-3]
print(f"Python version: {sys.version}")
print(f"Operating system: {os.name} {sys.platform}")
# run the tests
runner = TestRunner(args, verbosity)
return runner.run() |
Return the number of instance attributes present only in one object. | def _num_difference(obj_a, obj_b):
"""Return the number of instance attributes present only in one object."""
attrs_a = set(obj_a.__dict__)
attrs_b = set(obj_b.__dict__)
diff = attrs_a.symmetric_difference(attrs_b)
privates = len([x for x in diff if x.startswith("_")])
return len(diff) - privates |
Create an Alignment object from a list of sequences. | def createAlignment(sequences):
"""Create an Alignment object from a list of sequences."""
return Alignment(
[
SeqRecord(Seq(s), id="sequence%i" % (i + 1))
for (i, s) in enumerate(sequences)
]
) |
Create a MultipleSeqAlignment object from a list of sequences. | def createMultipleSeqAlignment(sequences):
"""Create a MultipleSeqAlignment object from a list of sequences."""
return MultipleSeqAlignment(
SeqRecord(Seq(s), id="sequence%i" % (i + 1)) for (i, s) in enumerate(sequences)
) |
Return a tuple of three ints, e.g. (6,1,0). | def get_emboss_version():
"""Return a tuple of three ints, e.g. (6,1,0)."""
# Windows and Unix versions of EMBOSS seem to differ in
# which lines go to stdout and stderr - so merge them.
child = subprocess.Popen(
_escape_filename(exes["embossversion"]),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
shell=(sys.platform != "win32"),
)
stdout, stderr = child.communicate()
child.stdout.close() # This is both stdout and stderr
del child
assert stderr is None # Send to stdout instead
for line in stdout.split("\n"):
if line.strip() == "Report the current EMBOSS version number":
# e.g.
# $ embossversion
# Report the current EMBOSS version number
# 6.5.7.0
pass
elif line.strip() == "Reports the current EMBOSS version number":
# e.g.
# $ embossversion
# Reports the current EMBOSS version number
# 6.3.1
pass
elif line.startswith("Writes the current EMBOSS version number"):
pass
elif line.count(".") == 2:
return tuple(int(v) for v in line.strip().split("."))
elif line.count(".") == 3:
# e.g. I installed mEMBOSS-6.2.0.1-setup.exe
# which reports 6.2.0.1 - for this return (6,2,0)
return tuple(int(v) for v in line.strip().split("."))[:3]
else:
# Either we can't understand the output, or this is really
# an error message not caught earlier (e.g. not in English)
raise MissingExternalDependencyError(
f"Install EMBOSS if you want to use Bio.Emboss ({line})."
)
# In case there was no output at all...
raise MissingExternalDependencyError("Could not get EMBOSS version") |
Run seqret, returns records (as a generator). | def emboss_piped_SeqIO_convert(records, old_format, new_format):
"""Run seqret, returns records (as a generator)."""
# Setup, this assumes for all the format names used
# Biopython and EMBOSS names are consistent!
cline = SeqretCommandline(
exes["seqret"],
sformat=old_format,
osformat=new_format,
auto=True, # no prompting
filter=True,
)
# Run the tool,
child = subprocess.Popen(
str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"),
)
SeqIO.write(records, child.stdin, old_format)
child.stdin.close()
child.stderr.close()
records = SeqIO.parse(child.stdout, new_format)
yield from records
child.stdout.close() |
Run seqret, returns alignments (as a generator). | def emboss_piped_AlignIO_convert(alignments, old_format, new_format):
"""Run seqret, returns alignments (as a generator)."""
# Setup, this assumes for all the format names used
# Biopython and EMBOSS names are consistent!
cline = SeqretCommandline(
exes["seqret"],
sformat=old_format,
osformat=new_format,
auto=True, # no prompting
filter=True,
)
# Run the tool,
with subprocess.Popen(
str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"),
) as child:
AlignIO.write(alignments, child.stdin, old_format)
child.stdin.close()
aligns = list(AlignIO.parse(child.stdout, new_format))
return aligns |
Fallback clean up method to remove temp files. | def clean_up():
"""Fallback clean up method to remove temp files."""
for filename in os.listdir("Emboss"):
if filename.startswith("temp_"):
try:
os.remove(filename)
except Exception: # TODO - Which exceptions?
pass |
Delete tests files (to be used as tearDown() function in test fixtures). | def clean_up():
"""Delete tests files (to be used as tearDown() function in test fixtures)."""
for filename in ["test_file", "Phylip/opuntia.phy", "Phylip/hedgehog.phy"]:
if os.path.isfile(filename):
os.remove(filename) |
Parse trees.
Helper function until we have Bio.Phylo on trunk. | def parse_trees(filename):
"""Parse trees.
Helper function until we have Bio.Phylo on trunk.
"""
# TODO - Can this be removed now?
with open("test_file") as handle:
data = handle.read()
for tree_str in data.split(";\n"):
if tree_str:
yield Trees.Tree(tree_str + ";") |
Convert a parsed URL back to string but only include scheme, netloc, and path, omitting query. | def get_base_url(parsed):
"""Convert a parsed URL back to string but only include scheme, netloc, and path, omitting query."""
return parsed.scheme + "://" + parsed.netloc + parsed.path |
Create a mocked version of a response object returned by urlopen().
:param int code: Value of "code" attribute.
:param str content_type: Used to set the "Content-Type" header in the "headers" attribute. This
is checked in Entrez._open() to determine if the response data is plain text. | def mock_httpresponse(code=200, content_type="/xml"):
"""Create a mocked version of a response object returned by urlopen().
:param int code: Value of "code" attribute.
:param str content_type: Used to set the "Content-Type" header in the "headers" attribute. This
is checked in Entrez._open() to determine if the response data is plain text.
"""
resp = mock.NonCallableMock()
resp.code = code
resp.headers = HTTPMessage()
resp.headers.add_header("Content-Type", content_type + "; charset=UTF-8")
return resp |
Create a context manager which replaces Bio.Entrez.urlopen with a mocked version.
Within the decorated function, Bio.Entrez.urlopen will be replaced with a unittest.mock.Mock
object which when called simply records the arguments passed to it and returns a mocked response
object. The actual urlopen function will not be called so no request will actually be made. | def patch_urlopen(**kwargs):
"""Create a context manager which replaces Bio.Entrez.urlopen with a mocked version.
Within the decorated function, Bio.Entrez.urlopen will be replaced with a unittest.mock.Mock
object which when called simply records the arguments passed to it and returns a mocked response
object. The actual urlopen function will not be called so no request will actually be made.
"""
response = mock_httpresponse(**kwargs)
return unittest.mock.patch("Bio.Entrez.urlopen", return_value=response) |
Get the Request object passed to the patched urlopen() function.
Expects that the patched function should have been called a single time with a Request instance
as the only positional argument and no keyword arguments.
:param patched_urlopen: value returned when entering the context manager created by patch_urlopen.
:type patched_urlopen: unittest.mock.Mock
:param testcase: Test case currently being run, which is used to make asserts.
:type testcase: unittest.TestCase
:rtype: urllib.urlopen.Request | def get_patched_request(patched_urlopen, testcase=None):
"""Get the Request object passed to the patched urlopen() function.
Expects that the patched function should have been called a single time with a Request instance
as the only positional argument and no keyword arguments.
:param patched_urlopen: value returned when entering the context manager created by patch_urlopen.
:type patched_urlopen: unittest.mock.Mock
:param testcase: Test case currently being run, which is used to make asserts.
:type testcase: unittest.TestCase
:rtype: urllib.urlopen.Request
"""
args, kwargs = patched_urlopen.call_args
if testcase is not None:
testcase.assertEqual(patched_urlopen.call_count, 1)
testcase.assertEqual(len(args), 1)
testcase.assertEqual(len(kwargs), 0)
testcase.assertIsInstance(args[0], Request)
return args[0] |
Get the base URL and parsed parameters of a Request object.
Method may be either GET or POST, POST data should be encoded query params.
:param request: Request object passed to urlopen().
:type request: urllib.request.Request
:param testcase: Test case currently being run, which is used to make asserts.
:type testcase: unittest.TestCase
:returns: (base_url, params) tuple. | def deconstruct_request(request, testcase=None):
"""Get the base URL and parsed parameters of a Request object.
Method may be either GET or POST, POST data should be encoded query params.
:param request: Request object passed to urlopen().
:type request: urllib.request.Request
:param testcase: Test case currently being run, which is used to make asserts.
:type testcase: unittest.TestCase
:returns: (base_url, params) tuple.
"""
parsed = urlparse(request.full_url)
if request.method == "GET":
params = parse_qs(parsed.query)
elif request.method == "POST":
data = request.data.decode("utf8")
params = parse_qs(data)
else:
raise ValueError(
"Expected method to be either GET or POST, got %r" % request.method
)
return get_base_url(parsed), params |
Check that the constructed request parameters contain the correct IDs.
:param testcase: Test case currently being run, which is used to make asserts.
:type testcase: unittest.TestCase
:param params: Parsed parameter dictionary returned by `deconstruct_request`.
:type params: dict
:param expected: Expected set of IDs, as collection of strings. | def check_request_ids(testcase, params, expected):
"""Check that the constructed request parameters contain the correct IDs.
:param testcase: Test case currently being run, which is used to make asserts.
:type testcase: unittest.TestCase
:param params: Parsed parameter dictionary returned by `deconstruct_request`.
:type params: dict
:param expected: Expected set of IDs, as collection of strings.
"""
testcase.assertEqual(len(params["id"]), 1)
ids_str = params["id"][0]
# Compare up to ordering
testcase.assertCountEqual(ids_str.split(","), expected) |
Return fill and border colors given a base color. | def fill_and_border(base_color, alpha=0.5):
"""Return fill and border colors given a base color."""
try:
c = base_color.clone()
c.alpha = alpha
return c, base_color
except AttributeError:
# Old ReportLab, no transparency and/or no clone
return base_color, base_color |
Apply function to windows of the given sequence.
Returns a list of (position, value) tuples for fragments of the passed
sequence of length window_size (stepped by step), calculated by the passed
function. Returned positions are the midpoint of each window.
- sequence - Bio.Seq.Seq object.
- window_size - an integer describing the length of sequence to consider.
- step - an integer describing the step to take between windows
(default = window_size//2).
- function - Method or function that accepts a Bio.Seq.Seq object
as its sole argument and returns a single value. | def apply_to_window(sequence, window_size, function, step=None):
"""Apply function to windows of the given sequence.
Returns a list of (position, value) tuples for fragments of the passed
sequence of length window_size (stepped by step), calculated by the passed
function. Returned positions are the midpoint of each window.
- sequence - Bio.Seq.Seq object.
- window_size - an integer describing the length of sequence to consider.
- step - an integer describing the step to take between windows
(default = window_size//2).
- function - Method or function that accepts a Bio.Seq.Seq object
as its sole argument and returns a single value.
"""
seqlen = len(sequence) # Total length of sequence to be used
if step is None: # No step specified, so use half window-width or 1 if larger
step = max(window_size // 2, 1)
else: # Use specified step, or 1 if greater
step = max(step, 1)
results = [] # Holds (position, value) results
# Perform the passed function on as many windows as possible, short of
# overrunning the sequence
pos = 0
while pos < seqlen - window_size + 1:
# Obtain sequence fragment
start, middle, end = pos, (pos + window_size + pos) // 2, pos + window_size
fragment = sequence[start:end]
# Apply function to the sequence fragment
value = function(fragment)
results.append((middle, value)) # Add results to list
# Advance to next fragment
pos += step
# Use the last available window on the sequence, even if it means
# re-covering old ground
if pos != seqlen - window_size:
# Obtain sequence fragment
pos = seqlen - window_size
start, middle, end = pos, (pos + window_size + pos) // 2, pos + window_size
fragment = sequence[start:end]
# Apply function to sequence fragment
value = function(fragment)
results.append((middle, value)) # Add results to list
return results |
Return the % G+C content in a passed sequence.
Arguments:
- sequence - a Bio.Seq.Seq object.
calc_gc_content(sequence) | def calc_gc_content(sequence):
"""Return the % G+C content in a passed sequence.
Arguments:
- sequence - a Bio.Seq.Seq object.
calc_gc_content(sequence)
"""
d = {}
for nt in ["A", "T", "G", "C"]:
d[nt] = sequence.count(nt) + sequence.count(nt.lower())
gc = d.get("G", 0) + d.get("C", 0)
if gc == 0:
return 0
return gc / (d["A"] + d["T"] + gc) |
Return the % A+T content in a passed sequence.
Arguments:
- sequence - a Bio.Seq.Seq object.
calc_at_content(sequence) | def calc_at_content(sequence):
"""Return the % A+T content in a passed sequence.
Arguments:
- sequence - a Bio.Seq.Seq object.
calc_at_content(sequence)
"""
d = {}
for nt in ["A", "T", "G", "C"]:
d[nt] = sequence.count(nt) + sequence.count(nt.lower())
at = d.get("A", 0) + d.get("T", 0)
if at == 0:
return 0
return at / (d["G"] + d["G"] + at) |
Return the (G-C)/(G+C) GC skew in a passed sequence.
Arguments:
- sequence - a Bio.Seq.Seq object.
calc_gc_skew(sequence) | def calc_gc_skew(sequence):
"""Return the (G-C)/(G+C) GC skew in a passed sequence.
Arguments:
- sequence - a Bio.Seq.Seq object.
calc_gc_skew(sequence)
"""
g = sequence.count("G") + sequence.count("g")
c = sequence.count("C") + sequence.count("c")
if g + c == 0:
return 0.0 # TODO - return NaN or None here?
else:
return (g - c) / (g + c) |
Return the (A-T)/(A+T) AT skew in a passed sequence.
Arguments:
- sequence - a Bio.Seq.Seq object.
calc_at_skew(sequence) | def calc_at_skew(sequence):
"""Return the (A-T)/(A+T) AT skew in a passed sequence.
Arguments:
- sequence - a Bio.Seq.Seq object.
calc_at_skew(sequence)
"""
a = sequence.count("A") + sequence.count("a")
t = sequence.count("T") + sequence.count("t")
if a + t == 0:
return 0.0 # TODO - return NaN or None here?
else:
return (a - t) / (a + t) |
Return the total count of di-nucleotides repeats (e.g. "AA", "CC").
This is purely for the sake of generating some non-random sequence
based score for plotting, with no expected biological meaning.
NOTE - Only considers same case pairs.
NOTE - "AA" scores 1, "AAA" scores 2, "AAAA" scores 3 etc. | def calc_dinucleotide_counts(sequence):
"""Return the total count of di-nucleotides repeats (e.g. "AA", "CC").
This is purely for the sake of generating some non-random sequence
based score for plotting, with no expected biological meaning.
NOTE - Only considers same case pairs.
NOTE - "AA" scores 1, "AAA" scores 2, "AAAA" scores 3 etc.
"""
total = 0
for letter in "ACTGUactgu":
total += sequence.count(letter + letter)
return total |
Load a chromosome and all of its segments. | def load_chromosome(chr_name):
"""Load a chromosome and all of its segments."""
cur_chromosome = BasicChromosome.Chromosome(chr_name)
chr_segment_info = all_chr_info[chr_name]
for seg_info_num in range(len(chr_segment_info)):
label, fill_color, scale = chr_segment_info[seg_info_num]
# make the top and bottom telomeres
if seg_info_num == 0:
cur_segment = BasicChromosome.TelomereSegment()
elif seg_info_num == len(chr_segment_info) - 1:
cur_segment = BasicChromosome.TelomereSegment(1)
# otherwise, they are just regular segments
else:
cur_segment = BasicChromosome.ChromosomeSegment()
if label != "":
cur_segment.label = label
if fill_color is not None:
cur_segment.fill_color = fill_color
cur_segment.scale = scale
cur_chromosome.add(cur_segment)
# scale by the size of chromosome 2
cur_chromosome.scale_num = 19
return cur_chromosome |
Generate a random id number. | def get_random_id():
"""Generate a random id number."""
id = ""
for n in range(6):
letter = random.choice(letter_choices)
id += letter
return id |
Generate a chromosome with random information about it. | def load_random_chromosome(chr_name):
"""Generate a chromosome with random information about it."""
cur_chromosome = BasicChromosome.Chromosome(chr_name)
num_segments = random.randrange(num_possible_segments)
for seg in range(num_segments):
# make the top and bottom telomeres
if seg == 0:
cur_segment = BasicChromosome.TelomereSegment()
elif seg == num_segments - 1:
cur_segment = BasicChromosome.TelomereSegment(1)
# otherwise, they are just regular segments
else:
cur_segment = BasicChromosome.ChromosomeSegment()
color_chance = random.random()
if color_chance <= color_prob:
fill_color = random.choice(color_choices)
cur_segment.fill_color = fill_color
id_chance = random.random()
if id_chance <= id_prob:
id = get_random_id()
cur_segment.label = id
cur_chromosome.add(cur_segment)
return cur_chromosome, num_segments |
Create a series of random distribution information. | def random_distribution(min=-5.0, max=5.0, total_items=50):
"""Create a series of random distribution information."""
num_items = random.randrange(5, total_items)
all_info = []
for item in range(num_items):
new_item = random.uniform(min, max)
all_info.append(new_item)
return all_info |
Generate a bunch of rolls corresponding to the casino probabilities.
Returns:
- The generate roll sequence
- The state sequence that generated the roll. | def generate_rolls(num_rolls):
"""Generate a bunch of rolls corresponding to the casino probabilities.
Returns:
- The generate roll sequence
- The state sequence that generated the roll.
"""
# start off in the fair state
cur_state = "F"
roll_seq = []
state_seq = []
loaded_weights = [0.1, 0.1, 0.1, 0.1, 0.1, 0.5]
# generate the sequence
for roll in range(num_rolls):
state_seq.append(cur_state)
# add on a new roll to the sequence
if cur_state == "F":
new_rolls = random.choices(dice_roll_alphabet)
elif cur_state == "L":
new_rolls = random.choices(dice_roll_alphabet, weights=loaded_weights)
new_roll = new_rolls[0]
roll_seq.append(new_roll)
# now give us a chance to switch to a new state
chance_num = random.random()
if cur_state == "F":
if chance_num <= 0.05:
cur_state = "L"
elif cur_state == "L":
if chance_num <= 0.1:
cur_state = "F"
return roll_seq, state_seq |
No action callback function, used when training the model. | def show_progress(iteration, loglikelihood):
"""No action callback function, used when training the model.""" |
Perform randomized tests and compare to pslMap.
Run this function to perform 8 x n mappings for alignments of randomly
generated sequences, get the alignment in PSL format, and compare the
result to that of pslMap. | def perform_randomized_tests(n=1000):
"""Perform randomized tests and compare to pslMap.
Run this function to perform 8 x n mappings for alignments of randomly
generated sequences, get the alignment in PSL format, and compare the
result to that of pslMap.
"""
aligner = PairwiseAligner()
aligner.internal_open_gap_score = -1
aligner.internal_extend_gap_score = -0.0
aligner.match_score = +1
aligner.mismatch_score = -1
aligner.mode = "local"
for i in range(n):
nBlocks1 = random.randint(1, 10)
nBlocks2 = random.randint(1, 10)
test_random(aligner, nBlocks1, nBlocks2, "+", "+")
test_random(aligner, nBlocks1, nBlocks2, "+", "-")
test_random(aligner, nBlocks1, nBlocks2, "-", "+")
test_random(aligner, nBlocks1, nBlocks2, "-", "-")
test_random_sequences(aligner, "+", "+")
test_random_sequences(aligner, "+", "-")
test_random_sequences(aligner, "-", "+")
test_random_sequences(aligner, "-", "-") |
Test if a file is an executable. | def is_exe(filepath):
"""Test if a file is an executable."""
return os.path.exists(filepath) and os.access(filepath, os.X_OK) |
Find the path to an executable. | def which(program):
"""Find the path to an executable."""
filepath, filename = os.path.split(program)
os_path = os.environ["PATH"].split(os.pathsep)
if sys.platform == "win32":
try:
# This can vary depending on the Windows language.
prog_files = os.environ["PROGRAMFILES"]
except KeyError:
prog_files = r"C:\Program Files"
# For Windows, the user is instructed to move the programs to a folder
# and then to add the folder to the system path. Just in case they didn't
# do that, we can check for it in Program Files.
likely_dirs = [
"", # Current dir
prog_files,
os.path.join(prog_files, "paml41"),
os.path.join(prog_files, "paml43"),
os.path.join(prog_files, "paml44"),
os.path.join(prog_files, "paml45"),
] + sys.path
os_path.extend(likely_dirs)
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None |
Parse the DSSP version into a tuple from the tool output. | def parse_dssp_version(version_string):
"""Parse the DSSP version into a tuple from the tool output."""
match = re.search(r"\s*([\d.]+)", version_string)
if match:
version = match.group(1)
return tuple(map(int, version.split("."))) |
Convert the input into a float if it is a number.
If the input is a string, the output does not change. | def will_it_float(s): # well played, whoever this was :)
"""Convert the input into a float if it is a number.
If the input is a string, the output does not change.
"""
try:
return float(s)
except ValueError:
return s |
Return full residue identifier for thoroughly comparing residues. | def res_full_id(res: Residue):
"""Return full residue identifier for thoroughly comparing residues."""
return (res.get_resname(), *res.get_id()) |
Generate a test method for read()ing the given source.
The generated function reads an example file to produce a phyloXML object,
then tests for existence of the root node, and counts the number of
phylogenies under the root. | def _test_read_factory(source, count):
"""Generate a test method for read()ing the given source.
The generated function reads an example file to produce a phyloXML object,
then tests for existence of the root node, and counts the number of
phylogenies under the root.
"""
fname = os.path.basename(source)
def test_read(self):
phx = PhyloXMLIO.read(source)
self.assertTrue(phx)
self.assertEqual(len(phx), count[0])
self.assertEqual(len(phx.other), count[1])
test_read.__doc__ = f"Read {fname} to produce a phyloXML object."
return test_read |
Generate a test method for parse()ing the given source.
The generated function extracts each phylogenetic tree using the parse()
function and counts the total number of trees extracted. | def _test_parse_factory(source, count):
"""Generate a test method for parse()ing the given source.
The generated function extracts each phylogenetic tree using the parse()
function and counts the total number of trees extracted.
"""
fname = os.path.basename(source)
def test_parse(self):
trees = PhyloXMLIO.parse(source)
self.assertEqual(len(list(trees)), count)
test_parse.__doc__ = f"Parse the phylogenies in {fname}."
return test_parse |
Generate a test method for checking tree shapes.
Counts the branches at each level of branching in a phylogenetic tree, 3
clades deep. | def _test_shape_factory(source, shapes):
"""Generate a test method for checking tree shapes.
Counts the branches at each level of branching in a phylogenetic tree, 3
clades deep.
"""
fname = os.path.basename(source)
def test_shape(self):
trees = PhyloXMLIO.parse(source)
for tree, shape_expect in zip(trees, shapes):
self.assertEqual(len(tree.clade), len(shape_expect))
for clade, sub_expect in zip(tree.clade, shape_expect):
self.assertEqual(len(clade), sub_expect[0])
for subclade, len_expect in zip(clade, sub_expect[1]):
self.assertEqual(len(subclade), len_expect)
test_shape.__doc__ = f"Check the branching structure of {fname}."
return test_shape |
Generate a test method for parse()ing the given source.
The generated function extracts each phylogenetic tree using the parse()
function. | def _test_parse_factory(source):
"""Generate a test method for parse()ing the given source.
The generated function extracts each phylogenetic tree using the parse()
function.
"""
filename = os.path.join("CDAO/", source)
def test_parse(self):
trees = list(bp._io.parse(filename, "cdao"))
test_parse.__doc__ = f"Parse the phylogenies in {source}."
return test_parse |
Test for serialization of objects to CDAO format.
Modifies the globally defined filenames in order to run the other parser
tests on files (re)generated by CDAOIO's own writer. | def _test_write_factory(source):
"""Test for serialization of objects to CDAO format.
Modifies the globally defined filenames in order to run the other parser
tests on files (re)generated by CDAOIO's own writer.
"""
filename = os.path.join("CDAO/", source)
def test_write(self):
"""Parse, rewrite and retest an example file."""
with open(filename) as infile:
t1 = next(CDAOIO.Parser(infile).parse())
with open(DUMMY, "w") as outfile:
CDAOIO.write([t1], outfile)
with open(DUMMY) as infile:
t2 = next(CDAOIO.Parser(infile).parse())
for prop_name in ("name", "branch_length", "confidence"):
p1 = [getattr(n, prop_name) for n in t1.get_terminals()]
p2 = [getattr(n, prop_name) for n in t2.get_terminals()]
if p1 == p2:
pass
else:
# Can't sort lists with None on Python 3 ...
self.assertNotIn(None, p1, f"Bad input values for {prop_name}: {p1!r}")
self.assertNotIn(None, p2, f"Bad output values for {prop_name}: {p2!r}")
self.assertEqual(sorted(p1), sorted(p2))
test_write.__doc__ = f"Write and re-parse the phylogenies in {source}."
return test_write |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return the path of a test file. | def get_file(filename):
"""Return the path of a test file."""
return os.path.join(TEST_DIR, filename) |
Return a concise summary of an Alignment object as a string. | def alignment_summary(alignment, index=" "):
"""Return a concise summary of an Alignment object as a string."""
answer = []
alignment_len = alignment.get_alignment_length()
rec_count = len(alignment)
for i in range(min(5, alignment_len)):
answer.append(index + col_summary(alignment[:, i]) + " alignment column %i" % i)
if alignment_len > 5:
i = alignment_len - 1
answer.append(index + col_summary("|" * rec_count) + " ...")
answer.append(index + col_summary(alignment[:, i]) + " alignment column %i" % i)
return "\n".join(answer) |
Convert a FASTA title line into the id, name, and description.
This is just a quick-n-dirty implementation, and is definitely not meant
to handle every FASTA title line case. | def title_to_ids(title):
"""Convert a FASTA title line into the id, name, and description.
This is just a quick-n-dirty implementation, and is definitely not meant
to handle every FASTA title line case.
"""
# first split the id information from the description
# the first item is the id info block, the rest is the description
all_info = title.split(" ")
id_info = all_info[0]
rest = all_info[1:]
descr = " ".join(rest)
# now extract the ids from the id block
# gi|5690369|gb|AF158246.1|AF158246
id_info_items = id_info.split("|")
if len(id_info_items) >= 4:
assert id_info_items[2] in ["gb", "emb", "dbj", "pdb"], title
id = id_info_items[3] # the id with version info
name = id_info_items[4] # the id without version info
else:
# Fallback:
id = id_info_items[0]
name = id_info_items[0]
return id, name, descr |
Crude parser that gets the first record from a FASTA file. | def read_title_and_seq(filename):
"""Crude parser that gets the first record from a FASTA file."""
with open(filename) as handle:
title = handle.readline().rstrip()
assert title.startswith(">")
seq = ""
for line in handle:
if line.startswith(">"):
break
seq += line.strip()
return title[1:], seq |
Test factory for tests reading SEQRES (or similar) records.
This is a factory returning a parameterised superclass for tests reading
sequences from the sequence records of structure files.
Arguments:
extension:
The extension of the files to read from the ``PDB`` directory (e.g.
``pdb`` or ``cif``).
parser:
The name of the SeqIO parser to use (e.g. ``pdb-atom``). | def SeqresTestGenerator(extension, parser):
"""Test factory for tests reading SEQRES (or similar) records.
This is a factory returning a parameterised superclass for tests reading
sequences from the sequence records of structure files.
Arguments:
extension:
The extension of the files to read from the ``PDB`` directory (e.g.
``pdb`` or ``cif``).
parser:
The name of the SeqIO parser to use (e.g. ``pdb-atom``).
"""
class SeqresTests(unittest.TestCase):
"""Use "parser" to parse sequence records from a structure file.
Args:
parser (str): Name of the parser used by SeqIO.
extension (str): Extension of the files to parse.
"""
def test_seqres_parse(self):
"""Parse a multi-chain PDB by SEQRES entries.
Reference:
http://www.rcsb.org/pdb/files/fasta.txt?structureIdList=2BEG
"""
chains = list(SeqIO.parse("PDB/2BEG." + extension, parser))
self.assertEqual(len(chains), 5)
actual_seq = "DAEFRHDSGYEVHHQKLVFFAEDVGSNKGAIIGLMVGGVVIA"
for chain, chn_id in zip(chains, "ABCDE"):
self.assertEqual(chain.id, "2BEG:" + chn_id)
self.assertEqual(chain.annotations["chain"], chn_id)
self.assertEqual(chain.seq, actual_seq)
def test_seqres_read(self):
"""Read a single-chain structure by sequence entries.
Reference:
http://www.rcsb.org/pdb/files/fasta.txt?structureIdList=1A8O
"""
chain = SeqIO.read("PDB/1A8O." + extension, parser)
self.assertEqual(chain.id, "1A8O:A")
self.assertEqual(chain.annotations["chain"], "A")
self.assertEqual(
chain.seq,
"MDIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNWMTETLLVQNANPD"
"CKTILKALGPGATLEEMMTACQG",
)
def test_seqres_missing(self):
"""Parse a PDB with no SEQRES entries."""
chains = list(SeqIO.parse("PDB/a_structure." + extension, parser))
self.assertEqual(len(chains), 0)
return SeqresTests |
Test factory for tests reading ATOM (or similar) records.
See SeqresTestGenerator for more information. | def AtomTestGenerator(extension, parser):
"""Test factory for tests reading ATOM (or similar) records.
See SeqresTestGenerator for more information.
"""
class AtomTests(unittest.TestCase):
def test_atom_parse(self):
"""Parse a multi-chain structure by ATOM entries.
Reference:
http://www.rcsb.org/pdb/files/fasta.txt?structureIdList=2BEG
"""
chains = list(SeqIO.parse("PDB/2BEG." + extension, parser))
self.assertEqual(len(chains), 5)
actual_seq = "LVFFAEDVGSNKGAIIGLMVGGVVIA"
for chain, chn_id in zip(chains, "ABCDE"):
self.assertEqual(chain.id, "2BEG:" + chn_id)
self.assertEqual(chain.annotations["chain"], chn_id)
self.assertEqual(chain.annotations["model"], 0)
self.assertEqual(chain.seq, actual_seq)
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
chains = list(SeqIO.parse("PDB/2XHE." + extension, parser))
actual_seq = (
"DRLSRLRQMAAENQXXXXXXXXXXXXXXXXXXXXXXXPEPFMADFFNRVK"
"RIRDNIEDIEQAIEQVAQLHTESLVAVSKEDRDRLNEKLQDTMARISALG"
"NKIRADLKQIEKENKRAQQEGTFEDGTVSTDLRIRQSQHSSLSRKFVKVM"
"TRYNDVQAENKRRYGENVARQCRVVEPSLSDDAIQKVIEHGXXXXXXXXX"
"XXXXXXXXNEIRDRHKDIQQLERSLLELHEMFTDMSTLVASQGEMIDRIE"
"FSVEQSHNYV"
)
self.assertEqual(chains[1].seq, actual_seq)
def test_atom_read(self):
"""Read a single-chain structure by ATOM entries.
Reference:
http://www.rcsb.org/pdb/files/fasta.txt?structureIdList=1A8O
"""
chain = SeqIO.read("PDB/1A8O." + extension, parser)
self.assertEqual(chain.id, "1A8O:A")
self.assertEqual(chain.annotations["chain"], "A")
self.assertEqual(chain.annotations["model"], 0)
self.assertEqual(
chain.seq,
"MDIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNWMTETLLVQNANPDCKTIL"
"KALGPGATLEEMMTACQG",
)
return AtomTests |
Scan RST file and pull out marked doctests as strings.
This is a generator, yielding one tuple per doctest. | def extract_doctests(rst_filename):
"""Scan RST file and pull out marked doctests as strings.
This is a generator, yielding one tuple per doctest.
"""
base_name = os.path.splitext(os.path.basename(rst_filename))[0]
name = None
deps = ""
folder = ""
with open(rst_filename, encoding="utf8") as handle:
line_number = 0
lines = []
while True:
line = handle.readline()
line_number += 1
if not line:
# End of file
break
elif line.lstrip().startswith(".. cont-doctest"):
x = _extract(handle)
lines.extend(x)
line_number += len(x) + 2
elif line.lstrip().startswith(".. doctest"):
if lines:
if not lines[0].lstrip().startswith(">>> "):
raise ValueError(
f"Should start with '>>> ' (indented), not {lines[0]!r}"
)
yield name, "".join(lines), folder, deps
lines = []
deps = [x.strip() for x in line.split()[2:]]
if deps:
folder = deps[0]
deps = deps[1:]
else:
folder = ""
name = "test_%s_line_%05i" % (base_name, line_number)
x = _extract(handle)
lines.extend(x)
line_number += len(x) + 2
if lines:
if not name:
raise ValueError(f"Unanchored doctest in {rst_filename}: {lines}")
if not lines[0].lstrip().startswith(">>> "):
raise ValueError(f"Should start '>>> ' not {lines[0]!r}")
yield name, "".join(lines), folder, deps |
Check 'lib:XXX' and 'internet' dependencies are met. | def check_deps(dependencies):
"""Check 'lib:XXX' and 'internet' dependencies are met."""
missing = []
for dep in dependencies:
if dep == "internet":
if not online:
missing.append("internet")
else:
assert dep.startswith("lib:"), dep
lib = dep[4:]
try:
tmp = __import__(lib)
del tmp
except ImportError:
missing.append(lib)
return missing |
Get Python package dependencies from requirements.txt. | def get_requirements() -> List[str]:
"""Get Python package dependencies from requirements.txt."""
with open(get_path("requirements.txt")) as f:
requirements = f.read().strip().split("\n")
return requirements |
Extract version information from the given filepath.
Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py | def find_version(filepath: str) -> str:
"""Extract version information from the given filepath.
Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py
"""
with open(filepath) as fp:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", fp.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.") |
Get the CUDA version from nvcc.
Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py | def get_nvcc_cuda_version():
"""Get the CUDA version from nvcc.
Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
"""
nvcc_output = subprocess.check_output(["nvcc", "-V"], universal_newlines=True)
output = nvcc_output.split()
release_idx = output.index("release") + 1
nvcc_cuda_version = LooseVersion(output[release_idx].split(",")[0])
return nvcc_cuda_version |
Read the README file if present. | def read_readme() -> str:
"""Read the README file if present."""
p = get_path("README.md")
if os.path.isfile(p):
return io.open(get_path("README.md"), "r", encoding="utf-8").read()
else:
return "" |
Downloads and extracts the specified version of LLVM for the given platform.
Args:
version (str): The version of LLVM to download.
is_aarch64 (bool): True if the target platform is aarch64, False otherwise.
extract_path (str): The directory path where the archive will be extracted.
Returns:
str: The path where the LLVM archive was extracted. | def download_and_extract_llvm(version, is_aarch64=False, extract_path="3rdparty"):
"""
Downloads and extracts the specified version of LLVM for the given platform.
Args:
version (str): The version of LLVM to download.
is_aarch64 (bool): True if the target platform is aarch64, False otherwise.
extract_path (str): The directory path where the archive will be extracted.
Returns:
str: The path where the LLVM archive was extracted.
"""
ubuntu_version = "16.04"
if version >= "16.0.0":
ubuntu_version = "20.04"
elif version >= "13.0.0":
ubuntu_version = "18.04"
base_url = (f"https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}")
file_name = f"clang+llvm-{version}-{'aarch64-linux-gnu' if is_aarch64 else f'x86_64-linux-gnu-ubuntu-{ubuntu_version}'}.tar.xz"
download_url = f"{base_url}/{file_name}"
# Download the file
print(f"Downloading {file_name} from {download_url}")
with urllib.request.urlopen(download_url) as response:
if response.status != 200:
raise Exception(f"Download failed with status code {response.status}")
file_content = response.read()
# Ensure the extract path exists
os.makedirs(extract_path, exist_ok=True)
# if the file already exists, remove it
if os.path.exists(os.path.join(extract_path, file_name)):
os.remove(os.path.join(extract_path, file_name))
# Extract the file
print(f"Extracting {file_name} to {extract_path}")
with tarfile.open(fileobj=BytesIO(file_content), mode="r:xz") as tar:
tar.extractall(path=extract_path)
print("Download and extraction completed successfully.")
return os.path.abspath(os.path.join(extract_path, file_name.replace(".tar.xz", ""))) |
Updates git submodules. | def update_submodules():
"""Updates git submodules."""
try:
subprocess.check_call(["git", "submodule", "update", "--init", "--recursive"])
except subprocess.CalledProcessError as error:
raise RuntimeError("Failed to update submodules") from error |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.