repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 39
1.84M
| func_code_tokens
listlengths 15
672k
| func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
listlengths 1
3.92k
| split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|---|---|---|
esheldon/fitsio | fitsio/header.py | FITSHDR.get | def get(self, item, default_value=None):
"""
Get the requested header entry by keyword name
"""
found, name = self._contains_and_name(item)
if found:
return self._record_map[name]['value']
else:
return default_value | python | def get(self, item, default_value=None):
found, name = self._contains_and_name(item)
if found:
return self._record_map[name]['value']
else:
return default_value | [
"def",
"get",
"(",
"self",
",",
"item",
",",
"default_value",
"=",
"None",
")",
":",
"found",
",",
"name",
"=",
"self",
".",
"_contains_and_name",
"(",
"item",
")",
"if",
"found",
":",
"return",
"self",
".",
"_record_map",
"[",
"name",
"]",
"[",
"'value'",
"]",
"else",
":",
"return",
"default_value"
]
| Get the requested header entry by keyword name | [
"Get",
"the",
"requested",
"header",
"entry",
"by",
"keyword",
"name"
]
| train | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L290-L299 |
esheldon/fitsio | fitsio/header.py | FITSHDR.next | def next(self):
"""
for iteration over the header entries
"""
if self._current < len(self._record_list):
rec = self._record_list[self._current]
key = rec['name']
self._current += 1
return key
else:
raise StopIteration | python | def next(self):
if self._current < len(self._record_list):
rec = self._record_list[self._current]
key = rec['name']
self._current += 1
return key
else:
raise StopIteration | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_current",
"<",
"len",
"(",
"self",
".",
"_record_list",
")",
":",
"rec",
"=",
"self",
".",
"_record_list",
"[",
"self",
".",
"_current",
"]",
"key",
"=",
"rec",
"[",
"'name'",
"]",
"self",
".",
"_current",
"+=",
"1",
"return",
"key",
"else",
":",
"raise",
"StopIteration"
]
| for iteration over the header entries | [
"for",
"iteration",
"over",
"the",
"header",
"entries"
]
| train | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L359-L369 |
esheldon/fitsio | fitsio/header.py | FITSHDR._record2card | def _record2card(self, record):
"""
when we add new records they don't have a card,
this sort of fakes it up similar to what cfitsio
does, just for display purposes. e.g.
DBL = 23.299843
LNG = 3423432
KEYSNC = 'hello '
KEYSC = 'hello ' / a comment for string
KEYDC = 3.14159265358979 / a comment for pi
KEYLC = 323423432 / a comment for long
basically,
- 8 chars, left aligned, for the keyword name
- a space
- 20 chars for value, left aligned for strings, right aligned for
numbers
- if there is a comment, one space followed by / then another space
then the comment out to 80 chars
"""
name = record['name']
value = record['value']
v_isstring = isstring(value)
if name == 'COMMENT':
# card = 'COMMENT %s' % value
card = 'COMMENT %s' % value
elif name == 'CONTINUE':
card = 'CONTINUE %s' % value
elif name == 'HISTORY':
card = 'HISTORY %s' % value
else:
if len(name) > 8:
card = 'HIERARCH %s= ' % name
else:
card = '%-8s= ' % name[0:8]
# these may be string representations of data, or actual strings
if v_isstring:
value = str(value)
if len(value) > 0:
if value[0] != "'":
# this is a string representing a string header field
# make it look like it will look in the header
value = "'" + value + "'"
vstr = '%-20s' % value
else:
vstr = "%20s" % value
else:
vstr = "''"
else:
vstr = '%20s' % value
card += vstr
if 'comment' in record:
card += ' / %s' % record['comment']
if v_isstring and len(card) > 80:
card = card[0:79] + "'"
else:
card = card[0:80]
return card | python | def _record2card(self, record):
name = record['name']
value = record['value']
v_isstring = isstring(value)
if name == 'COMMENT':
card = 'COMMENT %s' % value
elif name == 'CONTINUE':
card = 'CONTINUE %s' % value
elif name == 'HISTORY':
card = 'HISTORY %s' % value
else:
if len(name) > 8:
card = 'HIERARCH %s= ' % name
else:
card = '%-8s= ' % name[0:8]
if v_isstring:
value = str(value)
if len(value) > 0:
if value[0] != "'":
value = "'" + value + "'"
vstr = '%-20s' % value
else:
vstr = "%20s" % value
else:
vstr = "''"
else:
vstr = '%20s' % value
card += vstr
if 'comment' in record:
card += ' / %s' % record['comment']
if v_isstring and len(card) > 80:
card = card[0:79] + "'"
else:
card = card[0:80]
return card | [
"def",
"_record2card",
"(",
"self",
",",
"record",
")",
":",
"name",
"=",
"record",
"[",
"'name'",
"]",
"value",
"=",
"record",
"[",
"'value'",
"]",
"v_isstring",
"=",
"isstring",
"(",
"value",
")",
"if",
"name",
"==",
"'COMMENT'",
":",
"# card = 'COMMENT %s' % value",
"card",
"=",
"'COMMENT %s'",
"%",
"value",
"elif",
"name",
"==",
"'CONTINUE'",
":",
"card",
"=",
"'CONTINUE %s'",
"%",
"value",
"elif",
"name",
"==",
"'HISTORY'",
":",
"card",
"=",
"'HISTORY %s'",
"%",
"value",
"else",
":",
"if",
"len",
"(",
"name",
")",
">",
"8",
":",
"card",
"=",
"'HIERARCH %s= '",
"%",
"name",
"else",
":",
"card",
"=",
"'%-8s= '",
"%",
"name",
"[",
"0",
":",
"8",
"]",
"# these may be string representations of data, or actual strings",
"if",
"v_isstring",
":",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"len",
"(",
"value",
")",
">",
"0",
":",
"if",
"value",
"[",
"0",
"]",
"!=",
"\"'\"",
":",
"# this is a string representing a string header field",
"# make it look like it will look in the header",
"value",
"=",
"\"'\"",
"+",
"value",
"+",
"\"'\"",
"vstr",
"=",
"'%-20s'",
"%",
"value",
"else",
":",
"vstr",
"=",
"\"%20s\"",
"%",
"value",
"else",
":",
"vstr",
"=",
"\"''\"",
"else",
":",
"vstr",
"=",
"'%20s'",
"%",
"value",
"card",
"+=",
"vstr",
"if",
"'comment'",
"in",
"record",
":",
"card",
"+=",
"' / %s'",
"%",
"record",
"[",
"'comment'",
"]",
"if",
"v_isstring",
"and",
"len",
"(",
"card",
")",
">",
"80",
":",
"card",
"=",
"card",
"[",
"0",
":",
"79",
"]",
"+",
"\"'\"",
"else",
":",
"card",
"=",
"card",
"[",
"0",
":",
"80",
"]",
"return",
"card"
]
| when we add new records they don't have a card,
this sort of fakes it up similar to what cfitsio
does, just for display purposes. e.g.
DBL = 23.299843
LNG = 3423432
KEYSNC = 'hello '
KEYSC = 'hello ' / a comment for string
KEYDC = 3.14159265358979 / a comment for pi
KEYLC = 323423432 / a comment for long
basically,
- 8 chars, left aligned, for the keyword name
- a space
- 20 chars for value, left aligned for strings, right aligned for
numbers
- if there is a comment, one space followed by / then another space
then the comment out to 80 chars | [
"when",
"we",
"add",
"new",
"records",
"they",
"don",
"t",
"have",
"a",
"card",
"this",
"sort",
"of",
"fakes",
"it",
"up",
"similar",
"to",
"what",
"cfitsio",
"does",
"just",
"for",
"display",
"purposes",
".",
"e",
".",
"g",
"."
]
| train | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L372-L438 |
esheldon/fitsio | fitsio/header.py | FITSRecord.set_record | def set_record(self, record, **kw):
"""
check the record is valid and set keys in the dict
parameters
----------
record: string
Dict representing a record or a string representing a FITS header
card
"""
if isstring(record):
card = FITSCard(record)
self.update(card)
self.verify()
else:
if isinstance(record, FITSRecord):
self.update(record)
elif isinstance(record, dict):
if 'name' in record and 'value' in record:
self.update(record)
elif 'card_string' in record:
self.set_record(record['card_string'])
else:
raise ValueError('record must have name,value fields '
'or a card_string field')
else:
raise ValueError("record must be a string card or "
"dictionary or FITSRecord") | python | def set_record(self, record, **kw):
if isstring(record):
card = FITSCard(record)
self.update(card)
self.verify()
else:
if isinstance(record, FITSRecord):
self.update(record)
elif isinstance(record, dict):
if 'name' in record and 'value' in record:
self.update(record)
elif 'card_string' in record:
self.set_record(record['card_string'])
else:
raise ValueError('record must have name,value fields '
'or a card_string field')
else:
raise ValueError("record must be a string card or "
"dictionary or FITSRecord") | [
"def",
"set_record",
"(",
"self",
",",
"record",
",",
"*",
"*",
"kw",
")",
":",
"if",
"isstring",
"(",
"record",
")",
":",
"card",
"=",
"FITSCard",
"(",
"record",
")",
"self",
".",
"update",
"(",
"card",
")",
"self",
".",
"verify",
"(",
")",
"else",
":",
"if",
"isinstance",
"(",
"record",
",",
"FITSRecord",
")",
":",
"self",
".",
"update",
"(",
"record",
")",
"elif",
"isinstance",
"(",
"record",
",",
"dict",
")",
":",
"if",
"'name'",
"in",
"record",
"and",
"'value'",
"in",
"record",
":",
"self",
".",
"update",
"(",
"record",
")",
"elif",
"'card_string'",
"in",
"record",
":",
"self",
".",
"set_record",
"(",
"record",
"[",
"'card_string'",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'record must have name,value fields '",
"'or a card_string field'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"record must be a string card or \"",
"\"dictionary or FITSRecord\"",
")"
]
| check the record is valid and set keys in the dict
parameters
----------
record: string
Dict representing a record or a string representing a FITS header
card | [
"check",
"the",
"record",
"is",
"valid",
"and",
"set",
"keys",
"in",
"the",
"dict"
]
| train | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L477-L510 |
esheldon/fitsio | fitsio/header.py | FITSCard._check_equals | def _check_equals(self):
"""
check for = in position 8, set attribute _has_equals
"""
card_string = self['card_string']
if len(card_string) < 9:
self._has_equals = False
elif card_string[8] == '=':
self._has_equals = True
else:
self._has_equals = False | python | def _check_equals(self):
card_string = self['card_string']
if len(card_string) < 9:
self._has_equals = False
elif card_string[8] == '=':
self._has_equals = True
else:
self._has_equals = False | [
"def",
"_check_equals",
"(",
"self",
")",
":",
"card_string",
"=",
"self",
"[",
"'card_string'",
"]",
"if",
"len",
"(",
"card_string",
")",
"<",
"9",
":",
"self",
".",
"_has_equals",
"=",
"False",
"elif",
"card_string",
"[",
"8",
"]",
"==",
"'='",
":",
"self",
".",
"_has_equals",
"=",
"True",
"else",
":",
"self",
".",
"_has_equals",
"=",
"False"
]
| check for = in position 8, set attribute _has_equals | [
"check",
"for",
"=",
"in",
"position",
"8",
"set",
"attribute",
"_has_equals"
]
| train | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L588-L598 |
esheldon/fitsio | fitsio/header.py | FITSCard._convert_value | def _convert_value(self, value_orig):
"""
things like 6 and 1.25 are converted with ast.literal_value
Things like 'hello' are stripped of quotes
"""
import ast
if value_orig is None:
return value_orig
if value_orig.startswith("'") and value_orig.endswith("'"):
value = value_orig[1:-1]
else:
try:
avalue = ast.parse(value_orig).body[0].value
if isinstance(avalue, ast.BinOp):
# this is probably a string that happens to look like
# a binary operation, e.g. '25-3'
value = value_orig
else:
value = ast.literal_eval(value_orig)
except Exception:
value = self._convert_string(value_orig)
if isinstance(value, int) and '_' in value_orig:
value = value_orig
return value | python | def _convert_value(self, value_orig):
import ast
if value_orig is None:
return value_orig
if value_orig.startswith("'") and value_orig.endswith("'"):
value = value_orig[1:-1]
else:
try:
avalue = ast.parse(value_orig).body[0].value
if isinstance(avalue, ast.BinOp):
value = value_orig
else:
value = ast.literal_eval(value_orig)
except Exception:
value = self._convert_string(value_orig)
if isinstance(value, int) and '_' in value_orig:
value = value_orig
return value | [
"def",
"_convert_value",
"(",
"self",
",",
"value_orig",
")",
":",
"import",
"ast",
"if",
"value_orig",
"is",
"None",
":",
"return",
"value_orig",
"if",
"value_orig",
".",
"startswith",
"(",
"\"'\"",
")",
"and",
"value_orig",
".",
"endswith",
"(",
"\"'\"",
")",
":",
"value",
"=",
"value_orig",
"[",
"1",
":",
"-",
"1",
"]",
"else",
":",
"try",
":",
"avalue",
"=",
"ast",
".",
"parse",
"(",
"value_orig",
")",
".",
"body",
"[",
"0",
"]",
".",
"value",
"if",
"isinstance",
"(",
"avalue",
",",
"ast",
".",
"BinOp",
")",
":",
"# this is probably a string that happens to look like",
"# a binary operation, e.g. '25-3'",
"value",
"=",
"value_orig",
"else",
":",
"value",
"=",
"ast",
".",
"literal_eval",
"(",
"value_orig",
")",
"except",
"Exception",
":",
"value",
"=",
"self",
".",
"_convert_string",
"(",
"value_orig",
")",
"if",
"isinstance",
"(",
"value",
",",
"int",
")",
"and",
"'_'",
"in",
"value_orig",
":",
"value",
"=",
"value_orig",
"return",
"value"
]
| things like 6 and 1.25 are converted with ast.literal_value
Things like 'hello' are stripped of quotes | [
"things",
"like",
"6",
"and",
"1",
".",
"25",
"are",
"converted",
"with",
"ast",
".",
"literal_value"
]
| train | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L641-L669 |
sanger-pathogens/ariba | ariba/cluster.py | Cluster._update_threads | def _update_threads(self):
"""Update available thread count post-construction.
To be called any number of times from run() method"""
if self.remaining_clusters is not None:
self.threads = max(1,self.threads_total//self.remaining_clusters.value)
#otherwise just keep the current (initial) value
print("{} detected {} threads available to it".format(self.name,self.threads), file = self.log_fh) | python | def _update_threads(self):
if self.remaining_clusters is not None:
self.threads = max(1,self.threads_total//self.remaining_clusters.value)
print("{} detected {} threads available to it".format(self.name,self.threads), file = self.log_fh) | [
"def",
"_update_threads",
"(",
"self",
")",
":",
"if",
"self",
".",
"remaining_clusters",
"is",
"not",
"None",
":",
"self",
".",
"threads",
"=",
"max",
"(",
"1",
",",
"self",
".",
"threads_total",
"//",
"self",
".",
"remaining_clusters",
".",
"value",
")",
"#otherwise just keep the current (initial) value",
"print",
"(",
"\"{} detected {} threads available to it\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"self",
".",
"threads",
")",
",",
"file",
"=",
"self",
".",
"log_fh",
")"
]
| Update available thread count post-construction.
To be called any number of times from run() method | [
"Update",
"available",
"thread",
"count",
"post",
"-",
"construction",
".",
"To",
"be",
"called",
"any",
"number",
"of",
"times",
"from",
"run",
"()",
"method"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/cluster.py#L146-L152 |
sanger-pathogens/ariba | ariba/cluster.py | Cluster._report_completion | def _report_completion(self):
"""Update shared counters to signal that we are done with this cluster.
Call just before exiting run() method (in a finally clause)"""
rem_clust = self.remaining_clusters
if rem_clust is not None:
# -= is non-atomic, need to acquire a lock
with self.remaining_clusters_lock:
rem_clust.value -= 1
# we do not need this object anymore
self.remaining_clusters = None
print("{} reported completion".format(self.name), file=self.log_fh) | python | def _report_completion(self):
rem_clust = self.remaining_clusters
if rem_clust is not None:
with self.remaining_clusters_lock:
rem_clust.value -= 1
self.remaining_clusters = None
print("{} reported completion".format(self.name), file=self.log_fh) | [
"def",
"_report_completion",
"(",
"self",
")",
":",
"rem_clust",
"=",
"self",
".",
"remaining_clusters",
"if",
"rem_clust",
"is",
"not",
"None",
":",
"# -= is non-atomic, need to acquire a lock",
"with",
"self",
".",
"remaining_clusters_lock",
":",
"rem_clust",
".",
"value",
"-=",
"1",
"# we do not need this object anymore",
"self",
".",
"remaining_clusters",
"=",
"None",
"print",
"(",
"\"{} reported completion\"",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"file",
"=",
"self",
".",
"log_fh",
")"
]
| Update shared counters to signal that we are done with this cluster.
Call just before exiting run() method (in a finally clause) | [
"Update",
"shared",
"counters",
"to",
"signal",
"that",
"we",
"are",
"done",
"with",
"this",
"cluster",
".",
"Call",
"just",
"before",
"exiting",
"run",
"()",
"method",
"(",
"in",
"a",
"finally",
"clause",
")"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/cluster.py#L154-L164 |
sanger-pathogens/ariba | ariba/cluster.py | Cluster._make_reads_for_assembly | def _make_reads_for_assembly(number_of_wanted_reads, total_reads, reads_in1, reads_in2, reads_out1, reads_out2, random_seed=None):
'''Makes fastq files that are random subset of input files. Returns total number of reads in output files.
If the number of wanted reads is >= total reads, then just makes symlinks instead of making
new copies of the input files.'''
random.seed(random_seed)
if number_of_wanted_reads < total_reads:
reads_written = 0
percent_wanted = 100 * number_of_wanted_reads / total_reads
file_reader1 = pyfastaq.sequences.file_reader(reads_in1)
file_reader2 = pyfastaq.sequences.file_reader(reads_in2)
out1 = pyfastaq.utils.open_file_write(reads_out1)
out2 = pyfastaq.utils.open_file_write(reads_out2)
for read1 in file_reader1:
try:
read2 = next(file_reader2)
except StopIteration:
pyfastaq.utils.close(out1)
pyfastaq.utils.close(out2)
raise Error('Error subsetting reads. No mate found for read ' + read1.id)
if random.randint(0, 100) <= percent_wanted:
print(read1, file=out1)
print(read2, file=out2)
reads_written += 2
pyfastaq.utils.close(out1)
pyfastaq.utils.close(out2)
return reads_written
else:
os.symlink(reads_in1, reads_out1)
os.symlink(reads_in2, reads_out2)
return total_reads | python | def _make_reads_for_assembly(number_of_wanted_reads, total_reads, reads_in1, reads_in2, reads_out1, reads_out2, random_seed=None):
random.seed(random_seed)
if number_of_wanted_reads < total_reads:
reads_written = 0
percent_wanted = 100 * number_of_wanted_reads / total_reads
file_reader1 = pyfastaq.sequences.file_reader(reads_in1)
file_reader2 = pyfastaq.sequences.file_reader(reads_in2)
out1 = pyfastaq.utils.open_file_write(reads_out1)
out2 = pyfastaq.utils.open_file_write(reads_out2)
for read1 in file_reader1:
try:
read2 = next(file_reader2)
except StopIteration:
pyfastaq.utils.close(out1)
pyfastaq.utils.close(out2)
raise Error('Error subsetting reads. No mate found for read ' + read1.id)
if random.randint(0, 100) <= percent_wanted:
print(read1, file=out1)
print(read2, file=out2)
reads_written += 2
pyfastaq.utils.close(out1)
pyfastaq.utils.close(out2)
return reads_written
else:
os.symlink(reads_in1, reads_out1)
os.symlink(reads_in2, reads_out2)
return total_reads | [
"def",
"_make_reads_for_assembly",
"(",
"number_of_wanted_reads",
",",
"total_reads",
",",
"reads_in1",
",",
"reads_in2",
",",
"reads_out1",
",",
"reads_out2",
",",
"random_seed",
"=",
"None",
")",
":",
"random",
".",
"seed",
"(",
"random_seed",
")",
"if",
"number_of_wanted_reads",
"<",
"total_reads",
":",
"reads_written",
"=",
"0",
"percent_wanted",
"=",
"100",
"*",
"number_of_wanted_reads",
"/",
"total_reads",
"file_reader1",
"=",
"pyfastaq",
".",
"sequences",
".",
"file_reader",
"(",
"reads_in1",
")",
"file_reader2",
"=",
"pyfastaq",
".",
"sequences",
".",
"file_reader",
"(",
"reads_in2",
")",
"out1",
"=",
"pyfastaq",
".",
"utils",
".",
"open_file_write",
"(",
"reads_out1",
")",
"out2",
"=",
"pyfastaq",
".",
"utils",
".",
"open_file_write",
"(",
"reads_out2",
")",
"for",
"read1",
"in",
"file_reader1",
":",
"try",
":",
"read2",
"=",
"next",
"(",
"file_reader2",
")",
"except",
"StopIteration",
":",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"out1",
")",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"out2",
")",
"raise",
"Error",
"(",
"'Error subsetting reads. No mate found for read '",
"+",
"read1",
".",
"id",
")",
"if",
"random",
".",
"randint",
"(",
"0",
",",
"100",
")",
"<=",
"percent_wanted",
":",
"print",
"(",
"read1",
",",
"file",
"=",
"out1",
")",
"print",
"(",
"read2",
",",
"file",
"=",
"out2",
")",
"reads_written",
"+=",
"2",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"out1",
")",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"out2",
")",
"return",
"reads_written",
"else",
":",
"os",
".",
"symlink",
"(",
"reads_in1",
",",
"reads_out1",
")",
"os",
".",
"symlink",
"(",
"reads_in2",
",",
"reads_out2",
")",
"return",
"total_reads"
]
| Makes fastq files that are random subset of input files. Returns total number of reads in output files.
If the number of wanted reads is >= total reads, then just makes symlinks instead of making
new copies of the input files. | [
"Makes",
"fastq",
"files",
"that",
"are",
"random",
"subset",
"of",
"input",
"files",
".",
"Returns",
"total",
"number",
"of",
"reads",
"in",
"output",
"files",
".",
"If",
"the",
"number",
"of",
"wanted",
"reads",
"is",
">",
"=",
"total",
"reads",
"then",
"just",
"makes",
"symlinks",
"instead",
"of",
"making",
"new",
"copies",
"of",
"the",
"input",
"files",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/cluster.py#L262-L295 |
sanger-pathogens/ariba | ariba/tb.py | report_to_resistance_dict | def report_to_resistance_dict(infile):
'''Takes final ariba report.tsv file, and extracts
resistance calls, returning a dict of
drug name -> list of mutations.
each "mutation" in the list is a tuple of (gene name, mutation).
Mutation is of the form X42Y, or "incomplete_gene" for katG and
pncA when they are not complete.
This all assumes that the reference data are in the "correct"
form, where the variant descriptions in the var_description column of the
TSV file ends with a comma-separated list of the drug names'''
complete_genes = {'katG': 'Isoniazid', 'pncA': 'Pyrazinamide'}
res_calls = {}
incomplete_genes = set()
with open(infile) as f:
reader = csv.DictReader(f, delimiter='\t')
for d in reader:
if d['ref_name'] in complete_genes and d['gene'] == '1':
f = flag.Flag(int(d['flag']))
if not f.has('complete_gene'):
incomplete_genes.add(d['ref_name'])
if d['has_known_var'] == '1':
if 'Original mutation' in d['var_description']:
drugs = d['var_description'].split(':')[-1].split('.')[0].split()[-1].split(',')
change = d['var_description'].split()[-1]
else:
drugs = d['var_description'].split()[-1].split(',')
change = d['known_var_change']
for drug in drugs:
if drug not in res_calls:
res_calls[drug] = []
res_calls[drug].append((d['ref_name'], change))
for gene in incomplete_genes:
drug = complete_genes[gene]
if drug not in res_calls:
res_calls[drug] = []
res_calls[drug].append((gene, 'Incomplete_gene'))
return res_calls | python | def report_to_resistance_dict(infile):
complete_genes = {'katG': 'Isoniazid', 'pncA': 'Pyrazinamide'}
res_calls = {}
incomplete_genes = set()
with open(infile) as f:
reader = csv.DictReader(f, delimiter='\t')
for d in reader:
if d['ref_name'] in complete_genes and d['gene'] == '1':
f = flag.Flag(int(d['flag']))
if not f.has('complete_gene'):
incomplete_genes.add(d['ref_name'])
if d['has_known_var'] == '1':
if 'Original mutation' in d['var_description']:
drugs = d['var_description'].split(':')[-1].split('.')[0].split()[-1].split(',')
change = d['var_description'].split()[-1]
else:
drugs = d['var_description'].split()[-1].split(',')
change = d['known_var_change']
for drug in drugs:
if drug not in res_calls:
res_calls[drug] = []
res_calls[drug].append((d['ref_name'], change))
for gene in incomplete_genes:
drug = complete_genes[gene]
if drug not in res_calls:
res_calls[drug] = []
res_calls[drug].append((gene, 'Incomplete_gene'))
return res_calls | [
"def",
"report_to_resistance_dict",
"(",
"infile",
")",
":",
"complete_genes",
"=",
"{",
"'katG'",
":",
"'Isoniazid'",
",",
"'pncA'",
":",
"'Pyrazinamide'",
"}",
"res_calls",
"=",
"{",
"}",
"incomplete_genes",
"=",
"set",
"(",
")",
"with",
"open",
"(",
"infile",
")",
"as",
"f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
")",
"for",
"d",
"in",
"reader",
":",
"if",
"d",
"[",
"'ref_name'",
"]",
"in",
"complete_genes",
"and",
"d",
"[",
"'gene'",
"]",
"==",
"'1'",
":",
"f",
"=",
"flag",
".",
"Flag",
"(",
"int",
"(",
"d",
"[",
"'flag'",
"]",
")",
")",
"if",
"not",
"f",
".",
"has",
"(",
"'complete_gene'",
")",
":",
"incomplete_genes",
".",
"add",
"(",
"d",
"[",
"'ref_name'",
"]",
")",
"if",
"d",
"[",
"'has_known_var'",
"]",
"==",
"'1'",
":",
"if",
"'Original mutation'",
"in",
"d",
"[",
"'var_description'",
"]",
":",
"drugs",
"=",
"d",
"[",
"'var_description'",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"','",
")",
"change",
"=",
"d",
"[",
"'var_description'",
"]",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
"else",
":",
"drugs",
"=",
"d",
"[",
"'var_description'",
"]",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"','",
")",
"change",
"=",
"d",
"[",
"'known_var_change'",
"]",
"for",
"drug",
"in",
"drugs",
":",
"if",
"drug",
"not",
"in",
"res_calls",
":",
"res_calls",
"[",
"drug",
"]",
"=",
"[",
"]",
"res_calls",
"[",
"drug",
"]",
".",
"append",
"(",
"(",
"d",
"[",
"'ref_name'",
"]",
",",
"change",
")",
")",
"for",
"gene",
"in",
"incomplete_genes",
":",
"drug",
"=",
"complete_genes",
"[",
"gene",
"]",
"if",
"drug",
"not",
"in",
"res_calls",
":",
"res_calls",
"[",
"drug",
"]",
"=",
"[",
"]",
"res_calls",
"[",
"drug",
"]",
".",
"append",
"(",
"(",
"gene",
",",
"'Incomplete_gene'",
")",
")",
"return",
"res_calls"
]
| Takes final ariba report.tsv file, and extracts
resistance calls, returning a dict of
drug name -> list of mutations.
each "mutation" in the list is a tuple of (gene name, mutation).
Mutation is of the form X42Y, or "incomplete_gene" for katG and
pncA when they are not complete.
This all assumes that the reference data are in the "correct"
form, where the variant descriptions in the var_description column of the
TSV file ends with a comma-separated list of the drug names | [
"Takes",
"final",
"ariba",
"report",
".",
"tsv",
"file",
"and",
"extracts",
"resistance",
"calls",
"returning",
"a",
"dict",
"of",
"drug",
"name",
"-",
">",
"list",
"of",
"mutations",
".",
"each",
"mutation",
"in",
"the",
"list",
"is",
"a",
"tuple",
"of",
"(",
"gene",
"name",
"mutation",
")",
".",
"Mutation",
"is",
"of",
"the",
"form",
"X42Y",
"or",
"incomplete_gene",
"for",
"katG",
"and",
"pncA",
"when",
"they",
"are",
"not",
"complete",
".",
"This",
"all",
"assumes",
"that",
"the",
"reference",
"data",
"are",
"in",
"the",
"correct",
"form",
"where",
"the",
"variant",
"descriptions",
"in",
"the",
"var_description",
"column",
"of",
"the",
"TSV",
"file",
"ends",
"with",
"a",
"comma",
"-",
"separated",
"list",
"of",
"the",
"drug",
"names"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/tb.py#L17-L56 |
sanger-pathogens/ariba | ariba/tb.py | genbank_to_gene_coords | def genbank_to_gene_coords(infile, genes):
'''Input file in genbank format. genes = list of gene names to find.
Returns dict of gene name -> {start: x, end: y}, where x and y are
zero-based. x<y iff gene is on forwards strand'''
coords = {}
for seq_record in SeqIO.parse(infile, "genbank"):
for feature in seq_record.features:
if feature.type == 'gene':
gene_name = feature.qualifiers.get('gene', [None])[0]
if gene_name not in genes:
continue
if feature.location.strand == 1:
coords[gene_name] = {'start': int(feature.location.start), 'end': int(feature.location.end) - 1}
else:
coords[gene_name] = {'end': int(feature.location.start), 'start': int(feature.location.end) - 1}
return coords | python | def genbank_to_gene_coords(infile, genes):
coords = {}
for seq_record in SeqIO.parse(infile, "genbank"):
for feature in seq_record.features:
if feature.type == 'gene':
gene_name = feature.qualifiers.get('gene', [None])[0]
if gene_name not in genes:
continue
if feature.location.strand == 1:
coords[gene_name] = {'start': int(feature.location.start), 'end': int(feature.location.end) - 1}
else:
coords[gene_name] = {'end': int(feature.location.start), 'start': int(feature.location.end) - 1}
return coords | [
"def",
"genbank_to_gene_coords",
"(",
"infile",
",",
"genes",
")",
":",
"coords",
"=",
"{",
"}",
"for",
"seq_record",
"in",
"SeqIO",
".",
"parse",
"(",
"infile",
",",
"\"genbank\"",
")",
":",
"for",
"feature",
"in",
"seq_record",
".",
"features",
":",
"if",
"feature",
".",
"type",
"==",
"'gene'",
":",
"gene_name",
"=",
"feature",
".",
"qualifiers",
".",
"get",
"(",
"'gene'",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"if",
"gene_name",
"not",
"in",
"genes",
":",
"continue",
"if",
"feature",
".",
"location",
".",
"strand",
"==",
"1",
":",
"coords",
"[",
"gene_name",
"]",
"=",
"{",
"'start'",
":",
"int",
"(",
"feature",
".",
"location",
".",
"start",
")",
",",
"'end'",
":",
"int",
"(",
"feature",
".",
"location",
".",
"end",
")",
"-",
"1",
"}",
"else",
":",
"coords",
"[",
"gene_name",
"]",
"=",
"{",
"'end'",
":",
"int",
"(",
"feature",
".",
"location",
".",
"start",
")",
",",
"'start'",
":",
"int",
"(",
"feature",
".",
"location",
".",
"end",
")",
"-",
"1",
"}",
"return",
"coords"
]
| Input file in genbank format. genes = list of gene names to find.
Returns dict of gene name -> {start: x, end: y}, where x and y are
zero-based. x<y iff gene is on forwards strand | [
"Input",
"file",
"in",
"genbank",
"format",
".",
"genes",
"=",
"list",
"of",
"gene",
"names",
"to",
"find",
".",
"Returns",
"dict",
"of",
"gene",
"name",
"-",
">",
"{",
"start",
":",
"x",
"end",
":",
"y",
"}",
"where",
"x",
"and",
"y",
"are",
"zero",
"-",
"based",
".",
"x<y",
"iff",
"gene",
"is",
"on",
"forwards",
"strand"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/tb.py#L59-L77 |
sanger-pathogens/ariba | ariba/tb.py | load_mutations | def load_mutations(gene_coords, mutation_to_drug_json, variants_txt, upstream_before=100):
'''Load mutations from "mykrobe-style" files. mutation_to_drug_json is json file
of mutation -> list of drugs. variants_txt is text file of variants used my mykrobe's
make probes. gene_coords should be dict of gene coords made by the function
genbank_to_gene_coords'''
with open(mutation_to_drug_json) as f:
drug_data = json.load(f)
mutations = []
genes_with_indels = set()
genes_need_upstream = set()
genes_non_upstream = set()
with open(variants_txt) as f:
for line in f:
gene, variant, d_or_p = line.rstrip().split('\t')
coding = 0 if gene == 'rrs' else 1
d = {'gene': gene, 'var': variant, 'coding': coding, 'upstream': False}
drug_data_key = d['gene'] + '_' + d['var']
if drug_data_key not in drug_data:
print('KEY', drug_data_key, 'NOT FOUND', file=sys.stderr)
else:
d['drugs'] = ','.join(sorted(drug_data[drug_data_key]))
if d_or_p == 'DNA' and gene != 'rrs':
assert gene != 'rrs'
re_match = re.match('([ACGT]+)(-?[0-9]+)([ACGTX]+)', d['var'])
try:
ref, pos, alt = re_match.groups()
except:
print('regex error:', d['var'], file=sys.stderr)
continue
pos = int(pos)
if len(ref) != len(alt):
genes_with_indels.add(d['gene'])
continue
elif pos > 0:
#print('ignoring synonymous change (not implemented):', d['gene'], d['var'], d['drugs'], file=sys.stderr)
continue
elif pos < 0:
this_gene_coords = gene_coords[d['gene']]
d['upstream'] = True
if this_gene_coords['start'] < this_gene_coords['end']:
variant_pos_in_output_seq = upstream_before + pos + 1
else:
variant_pos_in_output_seq = upstream_before + pos + 1
assert variant_pos_in_output_seq > 0
d['var'] = ref + str(variant_pos_in_output_seq) + alt
d['original_mutation'] = variant
genes_need_upstream.add(d['gene'])
elif pos == 0:
print('Zero coord!', d, file=sys.stderr)
continue
else:
print('deal with?', d, file=sys.stderr)
continue
mutations.append(d)
if not d['upstream']:
genes_non_upstream.add(d['gene'])
return mutations, genes_with_indels, genes_need_upstream, genes_non_upstream | python | def load_mutations(gene_coords, mutation_to_drug_json, variants_txt, upstream_before=100):
with open(mutation_to_drug_json) as f:
drug_data = json.load(f)
mutations = []
genes_with_indels = set()
genes_need_upstream = set()
genes_non_upstream = set()
with open(variants_txt) as f:
for line in f:
gene, variant, d_or_p = line.rstrip().split('\t')
coding = 0 if gene == 'rrs' else 1
d = {'gene': gene, 'var': variant, 'coding': coding, 'upstream': False}
drug_data_key = d['gene'] + '_' + d['var']
if drug_data_key not in drug_data:
print('KEY', drug_data_key, 'NOT FOUND', file=sys.stderr)
else:
d['drugs'] = ','.join(sorted(drug_data[drug_data_key]))
if d_or_p == 'DNA' and gene != 'rrs':
assert gene != 'rrs'
re_match = re.match('([ACGT]+)(-?[0-9]+)([ACGTX]+)', d['var'])
try:
ref, pos, alt = re_match.groups()
except:
print('regex error:', d['var'], file=sys.stderr)
continue
pos = int(pos)
if len(ref) != len(alt):
genes_with_indels.add(d['gene'])
continue
elif pos > 0:
continue
elif pos < 0:
this_gene_coords = gene_coords[d['gene']]
d['upstream'] = True
if this_gene_coords['start'] < this_gene_coords['end']:
variant_pos_in_output_seq = upstream_before + pos + 1
else:
variant_pos_in_output_seq = upstream_before + pos + 1
assert variant_pos_in_output_seq > 0
d['var'] = ref + str(variant_pos_in_output_seq) + alt
d['original_mutation'] = variant
genes_need_upstream.add(d['gene'])
elif pos == 0:
print('Zero coord!', d, file=sys.stderr)
continue
else:
print('deal with?', d, file=sys.stderr)
continue
mutations.append(d)
if not d['upstream']:
genes_non_upstream.add(d['gene'])
return mutations, genes_with_indels, genes_need_upstream, genes_non_upstream | [
"def",
"load_mutations",
"(",
"gene_coords",
",",
"mutation_to_drug_json",
",",
"variants_txt",
",",
"upstream_before",
"=",
"100",
")",
":",
"with",
"open",
"(",
"mutation_to_drug_json",
")",
"as",
"f",
":",
"drug_data",
"=",
"json",
".",
"load",
"(",
"f",
")",
"mutations",
"=",
"[",
"]",
"genes_with_indels",
"=",
"set",
"(",
")",
"genes_need_upstream",
"=",
"set",
"(",
")",
"genes_non_upstream",
"=",
"set",
"(",
")",
"with",
"open",
"(",
"variants_txt",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"gene",
",",
"variant",
",",
"d_or_p",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"coding",
"=",
"0",
"if",
"gene",
"==",
"'rrs'",
"else",
"1",
"d",
"=",
"{",
"'gene'",
":",
"gene",
",",
"'var'",
":",
"variant",
",",
"'coding'",
":",
"coding",
",",
"'upstream'",
":",
"False",
"}",
"drug_data_key",
"=",
"d",
"[",
"'gene'",
"]",
"+",
"'_'",
"+",
"d",
"[",
"'var'",
"]",
"if",
"drug_data_key",
"not",
"in",
"drug_data",
":",
"print",
"(",
"'KEY'",
",",
"drug_data_key",
",",
"'NOT FOUND'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"else",
":",
"d",
"[",
"'drugs'",
"]",
"=",
"','",
".",
"join",
"(",
"sorted",
"(",
"drug_data",
"[",
"drug_data_key",
"]",
")",
")",
"if",
"d_or_p",
"==",
"'DNA'",
"and",
"gene",
"!=",
"'rrs'",
":",
"assert",
"gene",
"!=",
"'rrs'",
"re_match",
"=",
"re",
".",
"match",
"(",
"'([ACGT]+)(-?[0-9]+)([ACGTX]+)'",
",",
"d",
"[",
"'var'",
"]",
")",
"try",
":",
"ref",
",",
"pos",
",",
"alt",
"=",
"re_match",
".",
"groups",
"(",
")",
"except",
":",
"print",
"(",
"'regex error:'",
",",
"d",
"[",
"'var'",
"]",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"continue",
"pos",
"=",
"int",
"(",
"pos",
")",
"if",
"len",
"(",
"ref",
")",
"!=",
"len",
"(",
"alt",
")",
":",
"genes_with_indels",
".",
"add",
"(",
"d",
"[",
"'gene'",
"]",
")",
"continue",
"elif",
"pos",
">",
"0",
":",
"#print('ignoring synonymous change (not implemented):', d['gene'], d['var'], d['drugs'], file=sys.stderr)",
"continue",
"elif",
"pos",
"<",
"0",
":",
"this_gene_coords",
"=",
"gene_coords",
"[",
"d",
"[",
"'gene'",
"]",
"]",
"d",
"[",
"'upstream'",
"]",
"=",
"True",
"if",
"this_gene_coords",
"[",
"'start'",
"]",
"<",
"this_gene_coords",
"[",
"'end'",
"]",
":",
"variant_pos_in_output_seq",
"=",
"upstream_before",
"+",
"pos",
"+",
"1",
"else",
":",
"variant_pos_in_output_seq",
"=",
"upstream_before",
"+",
"pos",
"+",
"1",
"assert",
"variant_pos_in_output_seq",
">",
"0",
"d",
"[",
"'var'",
"]",
"=",
"ref",
"+",
"str",
"(",
"variant_pos_in_output_seq",
")",
"+",
"alt",
"d",
"[",
"'original_mutation'",
"]",
"=",
"variant",
"genes_need_upstream",
".",
"add",
"(",
"d",
"[",
"'gene'",
"]",
")",
"elif",
"pos",
"==",
"0",
":",
"print",
"(",
"'Zero coord!'",
",",
"d",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"continue",
"else",
":",
"print",
"(",
"'deal with?'",
",",
"d",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"continue",
"mutations",
".",
"append",
"(",
"d",
")",
"if",
"not",
"d",
"[",
"'upstream'",
"]",
":",
"genes_non_upstream",
".",
"add",
"(",
"d",
"[",
"'gene'",
"]",
")",
"return",
"mutations",
",",
"genes_with_indels",
",",
"genes_need_upstream",
",",
"genes_non_upstream"
]
| Load mutations from "mykrobe-style" files. mutation_to_drug_json is json file
of mutation -> list of drugs. variants_txt is text file of variants used my mykrobe's
make probes. gene_coords should be dict of gene coords made by the function
genbank_to_gene_coords | [
"Load",
"mutations",
"from",
"mykrobe",
"-",
"style",
"files",
".",
"mutation_to_drug_json",
"is",
"json",
"file",
"of",
"mutation",
"-",
">",
"list",
"of",
"drugs",
".",
"variants_txt",
"is",
"text",
"file",
"of",
"variants",
"used",
"my",
"mykrobe",
"s",
"make",
"probes",
".",
"gene_coords",
"should",
"be",
"dict",
"of",
"gene",
"coords",
"made",
"by",
"the",
"function",
"genbank_to_gene_coords"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/tb.py#L80-L142 |
sanger-pathogens/ariba | ariba/tb.py | write_prepareref_fasta_file | def write_prepareref_fasta_file(outfile, gene_coords, genes_need_upstream, genes_non_upstream, upstream_before=100, upstream_after=100):
'''Writes fasta file to be used with -f option of prepareref'''
tmp_dict = {}
fasta_in = os.path.join(data_dir, 'NC_000962.3.fa.gz')
pyfastaq.tasks.file_to_dict(fasta_in, tmp_dict)
ref_seq = tmp_dict['NC_000962.3']
with open(outfile, 'w') as f:
for gene in genes_non_upstream:
start = gene_coords[gene]['start']
end = gene_coords[gene]['end']
if start < end:
gene_fa = pyfastaq.sequences.Fasta(gene, ref_seq[start:end+1])
else:
gene_fa = pyfastaq.sequences.Fasta(gene, ref_seq[end:start+1])
gene_fa.revcomp()
print(gene_fa, file=f)
for gene in genes_need_upstream:
start = gene_coords[gene]['start']
end = gene_coords[gene]['end']
if start < end:
gene_fa = pyfastaq.sequences.Fasta(gene, ref_seq[start - upstream_before:start + upstream_after])
else:
gene_fa = pyfastaq.sequences.Fasta(gene, ref_seq[start - upstream_after + 1:start + upstream_before + 1])
gene_fa.revcomp()
gene_fa.id += '_upstream'
print(gene_fa, file=f) | python | def write_prepareref_fasta_file(outfile, gene_coords, genes_need_upstream, genes_non_upstream, upstream_before=100, upstream_after=100):
tmp_dict = {}
fasta_in = os.path.join(data_dir, 'NC_000962.3.fa.gz')
pyfastaq.tasks.file_to_dict(fasta_in, tmp_dict)
ref_seq = tmp_dict['NC_000962.3']
with open(outfile, 'w') as f:
for gene in genes_non_upstream:
start = gene_coords[gene]['start']
end = gene_coords[gene]['end']
if start < end:
gene_fa = pyfastaq.sequences.Fasta(gene, ref_seq[start:end+1])
else:
gene_fa = pyfastaq.sequences.Fasta(gene, ref_seq[end:start+1])
gene_fa.revcomp()
print(gene_fa, file=f)
for gene in genes_need_upstream:
start = gene_coords[gene]['start']
end = gene_coords[gene]['end']
if start < end:
gene_fa = pyfastaq.sequences.Fasta(gene, ref_seq[start - upstream_before:start + upstream_after])
else:
gene_fa = pyfastaq.sequences.Fasta(gene, ref_seq[start - upstream_after + 1:start + upstream_before + 1])
gene_fa.revcomp()
gene_fa.id += '_upstream'
print(gene_fa, file=f) | [
"def",
"write_prepareref_fasta_file",
"(",
"outfile",
",",
"gene_coords",
",",
"genes_need_upstream",
",",
"genes_non_upstream",
",",
"upstream_before",
"=",
"100",
",",
"upstream_after",
"=",
"100",
")",
":",
"tmp_dict",
"=",
"{",
"}",
"fasta_in",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"'NC_000962.3.fa.gz'",
")",
"pyfastaq",
".",
"tasks",
".",
"file_to_dict",
"(",
"fasta_in",
",",
"tmp_dict",
")",
"ref_seq",
"=",
"tmp_dict",
"[",
"'NC_000962.3'",
"]",
"with",
"open",
"(",
"outfile",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"gene",
"in",
"genes_non_upstream",
":",
"start",
"=",
"gene_coords",
"[",
"gene",
"]",
"[",
"'start'",
"]",
"end",
"=",
"gene_coords",
"[",
"gene",
"]",
"[",
"'end'",
"]",
"if",
"start",
"<",
"end",
":",
"gene_fa",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"gene",
",",
"ref_seq",
"[",
"start",
":",
"end",
"+",
"1",
"]",
")",
"else",
":",
"gene_fa",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"gene",
",",
"ref_seq",
"[",
"end",
":",
"start",
"+",
"1",
"]",
")",
"gene_fa",
".",
"revcomp",
"(",
")",
"print",
"(",
"gene_fa",
",",
"file",
"=",
"f",
")",
"for",
"gene",
"in",
"genes_need_upstream",
":",
"start",
"=",
"gene_coords",
"[",
"gene",
"]",
"[",
"'start'",
"]",
"end",
"=",
"gene_coords",
"[",
"gene",
"]",
"[",
"'end'",
"]",
"if",
"start",
"<",
"end",
":",
"gene_fa",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"gene",
",",
"ref_seq",
"[",
"start",
"-",
"upstream_before",
":",
"start",
"+",
"upstream_after",
"]",
")",
"else",
":",
"gene_fa",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"gene",
",",
"ref_seq",
"[",
"start",
"-",
"upstream_after",
"+",
"1",
":",
"start",
"+",
"upstream_before",
"+",
"1",
"]",
")",
"gene_fa",
".",
"revcomp",
"(",
")",
"gene_fa",
".",
"id",
"+=",
"'_upstream'",
"print",
"(",
"gene_fa",
",",
"file",
"=",
"f",
")"
]
| Writes fasta file to be used with -f option of prepareref | [
"Writes",
"fasta",
"file",
"to",
"be",
"used",
"with",
"-",
"f",
"option",
"of",
"prepareref"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/tb.py#L145-L174 |
sanger-pathogens/ariba | ariba/summary_cluster.py | SummaryCluster._get_known_noncoding_het_snp | def _get_known_noncoding_het_snp(data_dict):
'''If ref is coding, return None. If the data dict has a known snp, and
samtools made a call, then return the string ref_name_change and the
% of reads supporting the variant type. If noncoding, but no
samtools call, then return None'''
if data_dict['gene'] == '1':
return None
if data_dict['known_var'] == '1' and data_dict['ref_ctg_effect'] == 'SNP' \
and data_dict['smtls_nts'] != '.' and ';' not in data_dict['smtls_nts']:
nucleotides = data_dict['smtls_nts'].split(',')
depths = data_dict['smtls_nts_depth'].split(',')
if len(nucleotides) != len(depths):
raise Error('Mismatch in number of inferred nucleotides from ctg_nt, smtls_nts, smtls_nts_depth columns. Cannot continue\n' + str(data_dict))
try:
var_nucleotide = data_dict['known_var_change'][-1]
depths = [int(x) for x in depths]
nuc_to_depth = dict(zip(nucleotides, depths))
total_depth = sum(depths)
var_depth = nuc_to_depth.get(var_nucleotide, 0)
percent_depth = round(100 * var_depth / total_depth, 1)
except:
return None
return data_dict['known_var_change'], percent_depth
else:
return None | python | def _get_known_noncoding_het_snp(data_dict):
if data_dict['gene'] == '1':
return None
if data_dict['known_var'] == '1' and data_dict['ref_ctg_effect'] == 'SNP' \
and data_dict['smtls_nts'] != '.' and ';' not in data_dict['smtls_nts']:
nucleotides = data_dict['smtls_nts'].split(',')
depths = data_dict['smtls_nts_depth'].split(',')
if len(nucleotides) != len(depths):
raise Error('Mismatch in number of inferred nucleotides from ctg_nt, smtls_nts, smtls_nts_depth columns. Cannot continue\n' + str(data_dict))
try:
var_nucleotide = data_dict['known_var_change'][-1]
depths = [int(x) for x in depths]
nuc_to_depth = dict(zip(nucleotides, depths))
total_depth = sum(depths)
var_depth = nuc_to_depth.get(var_nucleotide, 0)
percent_depth = round(100 * var_depth / total_depth, 1)
except:
return None
return data_dict['known_var_change'], percent_depth
else:
return None | [
"def",
"_get_known_noncoding_het_snp",
"(",
"data_dict",
")",
":",
"if",
"data_dict",
"[",
"'gene'",
"]",
"==",
"'1'",
":",
"return",
"None",
"if",
"data_dict",
"[",
"'known_var'",
"]",
"==",
"'1'",
"and",
"data_dict",
"[",
"'ref_ctg_effect'",
"]",
"==",
"'SNP'",
"and",
"data_dict",
"[",
"'smtls_nts'",
"]",
"!=",
"'.'",
"and",
"';'",
"not",
"in",
"data_dict",
"[",
"'smtls_nts'",
"]",
":",
"nucleotides",
"=",
"data_dict",
"[",
"'smtls_nts'",
"]",
".",
"split",
"(",
"','",
")",
"depths",
"=",
"data_dict",
"[",
"'smtls_nts_depth'",
"]",
".",
"split",
"(",
"','",
")",
"if",
"len",
"(",
"nucleotides",
")",
"!=",
"len",
"(",
"depths",
")",
":",
"raise",
"Error",
"(",
"'Mismatch in number of inferred nucleotides from ctg_nt, smtls_nts, smtls_nts_depth columns. Cannot continue\\n'",
"+",
"str",
"(",
"data_dict",
")",
")",
"try",
":",
"var_nucleotide",
"=",
"data_dict",
"[",
"'known_var_change'",
"]",
"[",
"-",
"1",
"]",
"depths",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"depths",
"]",
"nuc_to_depth",
"=",
"dict",
"(",
"zip",
"(",
"nucleotides",
",",
"depths",
")",
")",
"total_depth",
"=",
"sum",
"(",
"depths",
")",
"var_depth",
"=",
"nuc_to_depth",
".",
"get",
"(",
"var_nucleotide",
",",
"0",
")",
"percent_depth",
"=",
"round",
"(",
"100",
"*",
"var_depth",
"/",
"total_depth",
",",
"1",
")",
"except",
":",
"return",
"None",
"return",
"data_dict",
"[",
"'known_var_change'",
"]",
",",
"percent_depth",
"else",
":",
"return",
"None"
]
| If ref is coding, return None. If the data dict has a known snp, and
samtools made a call, then return the string ref_name_change and the
% of reads supporting the variant type. If noncoding, but no
samtools call, then return None | [
"If",
"ref",
"is",
"coding",
"return",
"None",
".",
"If",
"the",
"data",
"dict",
"has",
"a",
"known",
"snp",
"and",
"samtools",
"made",
"a",
"call",
"then",
"return",
"the",
"string",
"ref_name_change",
"and",
"the",
"%",
"of",
"reads",
"supporting",
"the",
"variant",
"type",
".",
"If",
"noncoding",
"but",
"no",
"samtools",
"call",
"then",
"return",
"None"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/summary_cluster.py#L236-L264 |
sanger-pathogens/ariba | ariba/summary_cluster.py | SummaryCluster._get_nonsynonymous_var | def _get_nonsynonymous_var(data_dict):
'''if data_dict has a non synonymous variant, return string:
ref_name.change. Otherwise return None'''
has_nonsyn = SummaryCluster._has_nonsynonymous(data_dict)
if has_nonsyn == 'no':
return None
elif data_dict['known_var_change'] == data_dict['ref_ctg_change'] == '.' == data_dict['ref_ctg_effect']:
raise Error('Unexpected data in ariba summary... \n' + str(data_dict) + '\n... known_var_change, ref_ctg_change, ref_ctg_effect all equal to ".", but has a non synonymous change. Something is inconsistent. Cannot continue')
else:
if '.' not in [data_dict['known_var_change'], data_dict['ref_ctg_change']] and \
data_dict['known_var_change'] != data_dict['ref_ctg_change']:
raise Error('Unexpected data in ariba summary... \n' + str(data_dict) + '\n... known_var_change != ref_ctg_change. Cannot continue')
var_group = 'novel', None
if data_dict['known_var'] == '1' and data_dict['known_var_change'] != '.':
var_change = data_dict['known_var_change']
if data_dict['var_group'] == '.':
var_group = 'ungrouped', None
else:
var_group = 'grouped', data_dict['var_group']
elif data_dict['ref_ctg_change'] != '.':
var_change = data_dict['ref_ctg_change']
else:
var_change = data_dict['ref_ctg_effect']
return (data_dict['ref_name'], var_change) + var_group | python | def _get_nonsynonymous_var(data_dict):
has_nonsyn = SummaryCluster._has_nonsynonymous(data_dict)
if has_nonsyn == 'no':
return None
elif data_dict['known_var_change'] == data_dict['ref_ctg_change'] == '.' == data_dict['ref_ctg_effect']:
raise Error('Unexpected data in ariba summary... \n' + str(data_dict) + '\n... known_var_change, ref_ctg_change, ref_ctg_effect all equal to ".", but has a non synonymous change. Something is inconsistent. Cannot continue')
else:
if '.' not in [data_dict['known_var_change'], data_dict['ref_ctg_change']] and \
data_dict['known_var_change'] != data_dict['ref_ctg_change']:
raise Error('Unexpected data in ariba summary... \n' + str(data_dict) + '\n... known_var_change != ref_ctg_change. Cannot continue')
var_group = 'novel', None
if data_dict['known_var'] == '1' and data_dict['known_var_change'] != '.':
var_change = data_dict['known_var_change']
if data_dict['var_group'] == '.':
var_group = 'ungrouped', None
else:
var_group = 'grouped', data_dict['var_group']
elif data_dict['ref_ctg_change'] != '.':
var_change = data_dict['ref_ctg_change']
else:
var_change = data_dict['ref_ctg_effect']
return (data_dict['ref_name'], var_change) + var_group | [
"def",
"_get_nonsynonymous_var",
"(",
"data_dict",
")",
":",
"has_nonsyn",
"=",
"SummaryCluster",
".",
"_has_nonsynonymous",
"(",
"data_dict",
")",
"if",
"has_nonsyn",
"==",
"'no'",
":",
"return",
"None",
"elif",
"data_dict",
"[",
"'known_var_change'",
"]",
"==",
"data_dict",
"[",
"'ref_ctg_change'",
"]",
"==",
"'.'",
"==",
"data_dict",
"[",
"'ref_ctg_effect'",
"]",
":",
"raise",
"Error",
"(",
"'Unexpected data in ariba summary... \\n'",
"+",
"str",
"(",
"data_dict",
")",
"+",
"'\\n... known_var_change, ref_ctg_change, ref_ctg_effect all equal to \".\", but has a non synonymous change. Something is inconsistent. Cannot continue'",
")",
"else",
":",
"if",
"'.'",
"not",
"in",
"[",
"data_dict",
"[",
"'known_var_change'",
"]",
",",
"data_dict",
"[",
"'ref_ctg_change'",
"]",
"]",
"and",
"data_dict",
"[",
"'known_var_change'",
"]",
"!=",
"data_dict",
"[",
"'ref_ctg_change'",
"]",
":",
"raise",
"Error",
"(",
"'Unexpected data in ariba summary... \\n'",
"+",
"str",
"(",
"data_dict",
")",
"+",
"'\\n... known_var_change != ref_ctg_change. Cannot continue'",
")",
"var_group",
"=",
"'novel'",
",",
"None",
"if",
"data_dict",
"[",
"'known_var'",
"]",
"==",
"'1'",
"and",
"data_dict",
"[",
"'known_var_change'",
"]",
"!=",
"'.'",
":",
"var_change",
"=",
"data_dict",
"[",
"'known_var_change'",
"]",
"if",
"data_dict",
"[",
"'var_group'",
"]",
"==",
"'.'",
":",
"var_group",
"=",
"'ungrouped'",
",",
"None",
"else",
":",
"var_group",
"=",
"'grouped'",
",",
"data_dict",
"[",
"'var_group'",
"]",
"elif",
"data_dict",
"[",
"'ref_ctg_change'",
"]",
"!=",
"'.'",
":",
"var_change",
"=",
"data_dict",
"[",
"'ref_ctg_change'",
"]",
"else",
":",
"var_change",
"=",
"data_dict",
"[",
"'ref_ctg_effect'",
"]",
"return",
"(",
"data_dict",
"[",
"'ref_name'",
"]",
",",
"var_change",
")",
"+",
"var_group"
]
| if data_dict has a non synonymous variant, return string:
ref_name.change. Otherwise return None | [
"if",
"data_dict",
"has",
"a",
"non",
"synonymous",
"variant",
"return",
"string",
":",
"ref_name",
".",
"change",
".",
"Otherwise",
"return",
"None"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/summary_cluster.py#L268-L295 |
sanger-pathogens/ariba | ariba/summary_cluster.py | SummaryCluster._has_match | def _has_match(self, assembled_summary):
'''assembled_summary should be output of _to_cluster_summary_assembled'''
if assembled_summary.startswith('yes'):
if self.data[0]['var_only'] == '0' or self._to_cluster_summary_has_known_nonsynonymous(assembled_summary) == 'yes':
return 'yes'
else:
return 'no'
else:
return 'no' | python | def _has_match(self, assembled_summary):
if assembled_summary.startswith('yes'):
if self.data[0]['var_only'] == '0' or self._to_cluster_summary_has_known_nonsynonymous(assembled_summary) == 'yes':
return 'yes'
else:
return 'no'
else:
return 'no' | [
"def",
"_has_match",
"(",
"self",
",",
"assembled_summary",
")",
":",
"if",
"assembled_summary",
".",
"startswith",
"(",
"'yes'",
")",
":",
"if",
"self",
".",
"data",
"[",
"0",
"]",
"[",
"'var_only'",
"]",
"==",
"'0'",
"or",
"self",
".",
"_to_cluster_summary_has_known_nonsynonymous",
"(",
"assembled_summary",
")",
"==",
"'yes'",
":",
"return",
"'yes'",
"else",
":",
"return",
"'no'",
"else",
":",
"return",
"'no'"
]
| assembled_summary should be output of _to_cluster_summary_assembled | [
"assembled_summary",
"should",
"be",
"output",
"of",
"_to_cluster_summary_assembled"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/summary_cluster.py#L298-L306 |
sanger-pathogens/ariba | ariba/summary_cluster.py | SummaryCluster.has_var_groups | def has_var_groups(self):
'''Returns a set of the variant group ids that this cluster has'''
ids = set()
for d in self.data:
if self._has_known_variant(d) != 'no' and d['var_group'] != '.':
ids.add(d['var_group'])
return ids | python | def has_var_groups(self):
ids = set()
for d in self.data:
if self._has_known_variant(d) != 'no' and d['var_group'] != '.':
ids.add(d['var_group'])
return ids | [
"def",
"has_var_groups",
"(",
"self",
")",
":",
"ids",
"=",
"set",
"(",
")",
"for",
"d",
"in",
"self",
".",
"data",
":",
"if",
"self",
".",
"_has_known_variant",
"(",
"d",
")",
"!=",
"'no'",
"and",
"d",
"[",
"'var_group'",
"]",
"!=",
"'.'",
":",
"ids",
".",
"add",
"(",
"d",
"[",
"'var_group'",
"]",
")",
"return",
"ids"
]
| Returns a set of the variant group ids that this cluster has | [
"Returns",
"a",
"set",
"of",
"the",
"variant",
"group",
"ids",
"that",
"this",
"cluster",
"has"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/summary_cluster.py#L309-L315 |
sanger-pathogens/ariba | ariba/summary_cluster.py | SummaryCluster.column_summary_data | def column_summary_data(self):
'''Returns a dictionary of column name -> value, for cluster-level results'''
assembled_summary = self._to_cluster_summary_assembled()
pct_id, read_depth = self._pc_id_and_read_depth_of_longest()
columns = {
'assembled': self._to_cluster_summary_assembled(),
'match': self._has_match(assembled_summary),
'ref_seq': self.ref_name,
'pct_id': str(pct_id),
'ctg_cov': str(read_depth),
'known_var': self._to_cluster_summary_has_known_nonsynonymous(assembled_summary),
'novel_var': self._to_cluster_summary_has_novel_nonsynonymous(assembled_summary)
}
return columns | python | def column_summary_data(self):
assembled_summary = self._to_cluster_summary_assembled()
pct_id, read_depth = self._pc_id_and_read_depth_of_longest()
columns = {
'assembled': self._to_cluster_summary_assembled(),
'match': self._has_match(assembled_summary),
'ref_seq': self.ref_name,
'pct_id': str(pct_id),
'ctg_cov': str(read_depth),
'known_var': self._to_cluster_summary_has_known_nonsynonymous(assembled_summary),
'novel_var': self._to_cluster_summary_has_novel_nonsynonymous(assembled_summary)
}
return columns | [
"def",
"column_summary_data",
"(",
"self",
")",
":",
"assembled_summary",
"=",
"self",
".",
"_to_cluster_summary_assembled",
"(",
")",
"pct_id",
",",
"read_depth",
"=",
"self",
".",
"_pc_id_and_read_depth_of_longest",
"(",
")",
"columns",
"=",
"{",
"'assembled'",
":",
"self",
".",
"_to_cluster_summary_assembled",
"(",
")",
",",
"'match'",
":",
"self",
".",
"_has_match",
"(",
"assembled_summary",
")",
",",
"'ref_seq'",
":",
"self",
".",
"ref_name",
",",
"'pct_id'",
":",
"str",
"(",
"pct_id",
")",
",",
"'ctg_cov'",
":",
"str",
"(",
"read_depth",
")",
",",
"'known_var'",
":",
"self",
".",
"_to_cluster_summary_has_known_nonsynonymous",
"(",
"assembled_summary",
")",
",",
"'novel_var'",
":",
"self",
".",
"_to_cluster_summary_has_novel_nonsynonymous",
"(",
"assembled_summary",
")",
"}",
"return",
"columns"
]
| Returns a dictionary of column name -> value, for cluster-level results | [
"Returns",
"a",
"dictionary",
"of",
"column",
"name",
"-",
">",
"value",
"for",
"cluster",
"-",
"level",
"results"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/summary_cluster.py#L318-L333 |
sanger-pathogens/ariba | ariba/common.py | cat_files | def cat_files(infiles, outfile):
'''Cats all files in list infiles into outfile'''
f_out = pyfastaq.utils.open_file_write(outfile)
for filename in infiles:
if os.path.exists(filename):
f_in = pyfastaq.utils.open_file_read(filename)
for line in f_in:
print(line, end='', file=f_out)
pyfastaq.utils.close(f_in)
pyfastaq.utils.close(f_out) | python | def cat_files(infiles, outfile):
f_out = pyfastaq.utils.open_file_write(outfile)
for filename in infiles:
if os.path.exists(filename):
f_in = pyfastaq.utils.open_file_read(filename)
for line in f_in:
print(line, end='', file=f_out)
pyfastaq.utils.close(f_in)
pyfastaq.utils.close(f_out) | [
"def",
"cat_files",
"(",
"infiles",
",",
"outfile",
")",
":",
"f_out",
"=",
"pyfastaq",
".",
"utils",
".",
"open_file_write",
"(",
"outfile",
")",
"for",
"filename",
"in",
"infiles",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"f_in",
"=",
"pyfastaq",
".",
"utils",
".",
"open_file_read",
"(",
"filename",
")",
"for",
"line",
"in",
"f_in",
":",
"print",
"(",
"line",
",",
"end",
"=",
"''",
",",
"file",
"=",
"f_out",
")",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f_in",
")",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f_out",
")"
]
| Cats all files in list infiles into outfile | [
"Cats",
"all",
"files",
"in",
"list",
"infiles",
"into",
"outfile"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/common.py#L45-L56 |
sanger-pathogens/ariba | ariba/assembly.py | Assembly._check_spades_log_file | def _check_spades_log_file(logfile):
'''SPAdes can fail with a strange error. Stop everything if this happens'''
f = pyfastaq.utils.open_file_read(logfile)
for line in f:
if line.startswith('== Error == system call for:') and line.rstrip().endswith('finished abnormally, err code: -7'):
pyfastaq.utils.close(f)
print('Error running SPAdes. Cannot continue. This is the error from the log file', logfile, '...', file=sys.stderr)
print(line, file=sys.stderr)
raise Error('Fatal error ("err code: -7") running spades. Cannot continue')
pyfastaq.utils.close(f)
return True | python | def _check_spades_log_file(logfile):
f = pyfastaq.utils.open_file_read(logfile)
for line in f:
if line.startswith('== Error == system call for:') and line.rstrip().endswith('finished abnormally, err code: -7'):
pyfastaq.utils.close(f)
print('Error running SPAdes. Cannot continue. This is the error from the log file', logfile, '...', file=sys.stderr)
print(line, file=sys.stderr)
raise Error('Fatal error ("err code: -7") running spades. Cannot continue')
pyfastaq.utils.close(f)
return True | [
"def",
"_check_spades_log_file",
"(",
"logfile",
")",
":",
"f",
"=",
"pyfastaq",
".",
"utils",
".",
"open_file_read",
"(",
"logfile",
")",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"startswith",
"(",
"'== Error == system call for:'",
")",
"and",
"line",
".",
"rstrip",
"(",
")",
".",
"endswith",
"(",
"'finished abnormally, err code: -7'",
")",
":",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f",
")",
"print",
"(",
"'Error running SPAdes. Cannot continue. This is the error from the log file'",
",",
"logfile",
",",
"'...'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"line",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"raise",
"Error",
"(",
"'Fatal error (\"err code: -7\") running spades. Cannot continue'",
")",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f",
")",
"return",
"True"
]
| SPAdes can fail with a strange error. Stop everything if this happens | [
"SPAdes",
"can",
"fail",
"with",
"a",
"strange",
"error",
".",
"Stop",
"everything",
"if",
"this",
"happens"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly.py#L104-L116 |
sanger-pathogens/ariba | ariba/assembly.py | Assembly._fix_contig_orientation | def _fix_contig_orientation(contigs_fa, ref_fa, outfile, min_id=90, min_length=20, breaklen=200):
'''Changes orientation of each contig to match the reference, when possible.
Returns a set of names of contigs that had hits in both orientations to the reference'''
if not os.path.exists(contigs_fa):
raise Error('Cannot fix orientation of assembly contigs because file not found: ' + contigs_fa)
tmp_coords = os.path.join(outfile + '.tmp.rename.coords')
pymummer.nucmer.Runner(
ref_fa,
contigs_fa,
tmp_coords,
min_id=min_id,
min_length=min_length,
breaklen=breaklen,
maxmatch=True,
).run()
to_revcomp = set()
not_revcomp = set()
file_reader = pymummer.coords_file.reader(tmp_coords)
for hit in file_reader:
if hit.on_same_strand():
not_revcomp.add(hit.qry_name)
else:
to_revcomp.add(hit.qry_name)
os.unlink(tmp_coords)
in_both = to_revcomp.intersection(not_revcomp)
f = pyfastaq.utils.open_file_write(outfile)
seq_reader = pyfastaq.sequences.file_reader(contigs_fa)
for seq in seq_reader:
if seq.id in to_revcomp and seq.id not in in_both:
seq.revcomp()
print(seq, file=f)
pyfastaq.utils.close(f)
return in_both | python | def _fix_contig_orientation(contigs_fa, ref_fa, outfile, min_id=90, min_length=20, breaklen=200):
if not os.path.exists(contigs_fa):
raise Error('Cannot fix orientation of assembly contigs because file not found: ' + contigs_fa)
tmp_coords = os.path.join(outfile + '.tmp.rename.coords')
pymummer.nucmer.Runner(
ref_fa,
contigs_fa,
tmp_coords,
min_id=min_id,
min_length=min_length,
breaklen=breaklen,
maxmatch=True,
).run()
to_revcomp = set()
not_revcomp = set()
file_reader = pymummer.coords_file.reader(tmp_coords)
for hit in file_reader:
if hit.on_same_strand():
not_revcomp.add(hit.qry_name)
else:
to_revcomp.add(hit.qry_name)
os.unlink(tmp_coords)
in_both = to_revcomp.intersection(not_revcomp)
f = pyfastaq.utils.open_file_write(outfile)
seq_reader = pyfastaq.sequences.file_reader(contigs_fa)
for seq in seq_reader:
if seq.id in to_revcomp and seq.id not in in_both:
seq.revcomp()
print(seq, file=f)
pyfastaq.utils.close(f)
return in_both | [
"def",
"_fix_contig_orientation",
"(",
"contigs_fa",
",",
"ref_fa",
",",
"outfile",
",",
"min_id",
"=",
"90",
",",
"min_length",
"=",
"20",
",",
"breaklen",
"=",
"200",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"contigs_fa",
")",
":",
"raise",
"Error",
"(",
"'Cannot fix orientation of assembly contigs because file not found: '",
"+",
"contigs_fa",
")",
"tmp_coords",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outfile",
"+",
"'.tmp.rename.coords'",
")",
"pymummer",
".",
"nucmer",
".",
"Runner",
"(",
"ref_fa",
",",
"contigs_fa",
",",
"tmp_coords",
",",
"min_id",
"=",
"min_id",
",",
"min_length",
"=",
"min_length",
",",
"breaklen",
"=",
"breaklen",
",",
"maxmatch",
"=",
"True",
",",
")",
".",
"run",
"(",
")",
"to_revcomp",
"=",
"set",
"(",
")",
"not_revcomp",
"=",
"set",
"(",
")",
"file_reader",
"=",
"pymummer",
".",
"coords_file",
".",
"reader",
"(",
"tmp_coords",
")",
"for",
"hit",
"in",
"file_reader",
":",
"if",
"hit",
".",
"on_same_strand",
"(",
")",
":",
"not_revcomp",
".",
"add",
"(",
"hit",
".",
"qry_name",
")",
"else",
":",
"to_revcomp",
".",
"add",
"(",
"hit",
".",
"qry_name",
")",
"os",
".",
"unlink",
"(",
"tmp_coords",
")",
"in_both",
"=",
"to_revcomp",
".",
"intersection",
"(",
"not_revcomp",
")",
"f",
"=",
"pyfastaq",
".",
"utils",
".",
"open_file_write",
"(",
"outfile",
")",
"seq_reader",
"=",
"pyfastaq",
".",
"sequences",
".",
"file_reader",
"(",
"contigs_fa",
")",
"for",
"seq",
"in",
"seq_reader",
":",
"if",
"seq",
".",
"id",
"in",
"to_revcomp",
"and",
"seq",
".",
"id",
"not",
"in",
"in_both",
":",
"seq",
".",
"revcomp",
"(",
")",
"print",
"(",
"seq",
",",
"file",
"=",
"f",
")",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f",
")",
"return",
"in_both"
]
| Changes orientation of each contig to match the reference, when possible.
Returns a set of names of contigs that had hits in both orientations to the reference | [
"Changes",
"orientation",
"of",
"each",
"contig",
"to",
"match",
"the",
"reference",
"when",
"possible",
".",
"Returns",
"a",
"set",
"of",
"names",
"of",
"contigs",
"that",
"had",
"hits",
"in",
"both",
"orientations",
"to",
"the",
"reference"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly.py#L205-L242 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare._parse_nucmer_coords_file | def _parse_nucmer_coords_file(coords_file, ref_name):
'''Input is coords file made by self._run_nucmer. Reference should have one sequence only.
ref_name is name fo the reference sequence, to sanity check the coords file.
Returns dictionary. Key = assembly contig name. Value = list of nucmer hits to that contig'''
file_reader = pymummer.coords_file.reader(coords_file)
nucmer_hits = {}
for hit in file_reader:
assert hit.ref_name == ref_name
contig = hit.qry_name
if contig not in nucmer_hits:
nucmer_hits[contig] = []
nucmer_hits[contig].append(copy.copy(hit))
return nucmer_hits | python | def _parse_nucmer_coords_file(coords_file, ref_name):
file_reader = pymummer.coords_file.reader(coords_file)
nucmer_hits = {}
for hit in file_reader:
assert hit.ref_name == ref_name
contig = hit.qry_name
if contig not in nucmer_hits:
nucmer_hits[contig] = []
nucmer_hits[contig].append(copy.copy(hit))
return nucmer_hits | [
"def",
"_parse_nucmer_coords_file",
"(",
"coords_file",
",",
"ref_name",
")",
":",
"file_reader",
"=",
"pymummer",
".",
"coords_file",
".",
"reader",
"(",
"coords_file",
")",
"nucmer_hits",
"=",
"{",
"}",
"for",
"hit",
"in",
"file_reader",
":",
"assert",
"hit",
".",
"ref_name",
"==",
"ref_name",
"contig",
"=",
"hit",
".",
"qry_name",
"if",
"contig",
"not",
"in",
"nucmer_hits",
":",
"nucmer_hits",
"[",
"contig",
"]",
"=",
"[",
"]",
"nucmer_hits",
"[",
"contig",
"]",
".",
"append",
"(",
"copy",
".",
"copy",
"(",
"hit",
")",
")",
"return",
"nucmer_hits"
]
| Input is coords file made by self._run_nucmer. Reference should have one sequence only.
ref_name is name fo the reference sequence, to sanity check the coords file.
Returns dictionary. Key = assembly contig name. Value = list of nucmer hits to that contig | [
"Input",
"is",
"coords",
"file",
"made",
"by",
"self",
".",
"_run_nucmer",
".",
"Reference",
"should",
"have",
"one",
"sequence",
"only",
".",
"ref_name",
"is",
"name",
"fo",
"the",
"reference",
"sequence",
"to",
"sanity",
"check",
"the",
"coords",
"file",
".",
"Returns",
"dictionary",
".",
"Key",
"=",
"assembly",
"contig",
"name",
".",
"Value",
"=",
"list",
"of",
"nucmer",
"hits",
"to",
"that",
"contig"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L61-L74 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare._nucmer_hits_to_percent_identity | def _nucmer_hits_to_percent_identity(nucmer_hits):
'''Input is hits made by self._parse_nucmer_coords_file.
Returns dictionary. key = contig name. Value = percent identity of hits to that contig'''
percent_identities = {}
max_lengths = {}
for contig in nucmer_hits:
max_length = -1
percent_identity = 0
for hit in nucmer_hits[contig]:
if hit.hit_length_qry > max_length:
max_length = hit.hit_length_qry
percent_identity = hit.percent_identity
percent_identities[contig] = percent_identity
return percent_identities | python | def _nucmer_hits_to_percent_identity(nucmer_hits):
percent_identities = {}
max_lengths = {}
for contig in nucmer_hits:
max_length = -1
percent_identity = 0
for hit in nucmer_hits[contig]:
if hit.hit_length_qry > max_length:
max_length = hit.hit_length_qry
percent_identity = hit.percent_identity
percent_identities[contig] = percent_identity
return percent_identities | [
"def",
"_nucmer_hits_to_percent_identity",
"(",
"nucmer_hits",
")",
":",
"percent_identities",
"=",
"{",
"}",
"max_lengths",
"=",
"{",
"}",
"for",
"contig",
"in",
"nucmer_hits",
":",
"max_length",
"=",
"-",
"1",
"percent_identity",
"=",
"0",
"for",
"hit",
"in",
"nucmer_hits",
"[",
"contig",
"]",
":",
"if",
"hit",
".",
"hit_length_qry",
">",
"max_length",
":",
"max_length",
"=",
"hit",
".",
"hit_length_qry",
"percent_identity",
"=",
"hit",
".",
"percent_identity",
"percent_identities",
"[",
"contig",
"]",
"=",
"percent_identity",
"return",
"percent_identities"
]
| Input is hits made by self._parse_nucmer_coords_file.
Returns dictionary. key = contig name. Value = percent identity of hits to that contig | [
"Input",
"is",
"hits",
"made",
"by",
"self",
".",
"_parse_nucmer_coords_file",
".",
"Returns",
"dictionary",
".",
"key",
"=",
"contig",
"name",
".",
"Value",
"=",
"percent",
"identity",
"of",
"hits",
"to",
"that",
"contig"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L78-L93 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare._nucmer_hits_to_assembly_coords | def _nucmer_hits_to_assembly_coords(nucmer_hits):
'''Input is hits made by self._parse_nucmer_coords_file.
Returns dictionary. key = contig name. Value = list of coords that match
to the reference gene'''
coords = {}
for l in nucmer_hits.values():
for hit in l:
if hit.qry_name not in coords:
coords[hit.qry_name] = []
coords[hit.qry_name].append(hit.qry_coords())
for scaff in coords:
pyfastaq.intervals.merge_overlapping_in_list(coords[scaff])
return coords | python | def _nucmer_hits_to_assembly_coords(nucmer_hits):
coords = {}
for l in nucmer_hits.values():
for hit in l:
if hit.qry_name not in coords:
coords[hit.qry_name] = []
coords[hit.qry_name].append(hit.qry_coords())
for scaff in coords:
pyfastaq.intervals.merge_overlapping_in_list(coords[scaff])
return coords | [
"def",
"_nucmer_hits_to_assembly_coords",
"(",
"nucmer_hits",
")",
":",
"coords",
"=",
"{",
"}",
"for",
"l",
"in",
"nucmer_hits",
".",
"values",
"(",
")",
":",
"for",
"hit",
"in",
"l",
":",
"if",
"hit",
".",
"qry_name",
"not",
"in",
"coords",
":",
"coords",
"[",
"hit",
".",
"qry_name",
"]",
"=",
"[",
"]",
"coords",
"[",
"hit",
".",
"qry_name",
"]",
".",
"append",
"(",
"hit",
".",
"qry_coords",
"(",
")",
")",
"for",
"scaff",
"in",
"coords",
":",
"pyfastaq",
".",
"intervals",
".",
"merge_overlapping_in_list",
"(",
"coords",
"[",
"scaff",
"]",
")",
"return",
"coords"
]
| Input is hits made by self._parse_nucmer_coords_file.
Returns dictionary. key = contig name. Value = list of coords that match
to the reference gene | [
"Input",
"is",
"hits",
"made",
"by",
"self",
".",
"_parse_nucmer_coords_file",
".",
"Returns",
"dictionary",
".",
"key",
"=",
"contig",
"name",
".",
"Value",
"=",
"list",
"of",
"coords",
"that",
"match",
"to",
"the",
"reference",
"gene"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L97-L111 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare.nucmer_hits_to_ref_coords | def nucmer_hits_to_ref_coords(cls, nucmer_hits, contig=None):
'''Input is hits made by self._parse_nucmer_coords_file.
Returns dictionary. Key = contig name. Value = list of coords in the
reference sequence for that contig.
if contig=contig_name, then just gets the ref coords from that contig,
instead of using all the contigs'''
coords = []
if contig is None:
coords = {key: [] for key in nucmer_hits.keys()}
else:
coords = {contig: []}
for key in coords:
coords[key] = [hit.ref_coords() for hit in nucmer_hits[key]]
pyfastaq.intervals.merge_overlapping_in_list(coords[key])
return coords | python | def nucmer_hits_to_ref_coords(cls, nucmer_hits, contig=None):
coords = []
if contig is None:
coords = {key: [] for key in nucmer_hits.keys()}
else:
coords = {contig: []}
for key in coords:
coords[key] = [hit.ref_coords() for hit in nucmer_hits[key]]
pyfastaq.intervals.merge_overlapping_in_list(coords[key])
return coords | [
"def",
"nucmer_hits_to_ref_coords",
"(",
"cls",
",",
"nucmer_hits",
",",
"contig",
"=",
"None",
")",
":",
"coords",
"=",
"[",
"]",
"if",
"contig",
"is",
"None",
":",
"coords",
"=",
"{",
"key",
":",
"[",
"]",
"for",
"key",
"in",
"nucmer_hits",
".",
"keys",
"(",
")",
"}",
"else",
":",
"coords",
"=",
"{",
"contig",
":",
"[",
"]",
"}",
"for",
"key",
"in",
"coords",
":",
"coords",
"[",
"key",
"]",
"=",
"[",
"hit",
".",
"ref_coords",
"(",
")",
"for",
"hit",
"in",
"nucmer_hits",
"[",
"key",
"]",
"]",
"pyfastaq",
".",
"intervals",
".",
"merge_overlapping_in_list",
"(",
"coords",
"[",
"key",
"]",
")",
"return",
"coords"
]
| Input is hits made by self._parse_nucmer_coords_file.
Returns dictionary. Key = contig name. Value = list of coords in the
reference sequence for that contig.
if contig=contig_name, then just gets the ref coords from that contig,
instead of using all the contigs | [
"Input",
"is",
"hits",
"made",
"by",
"self",
".",
"_parse_nucmer_coords_file",
".",
"Returns",
"dictionary",
".",
"Key",
"=",
"contig",
"name",
".",
"Value",
"=",
"list",
"of",
"coords",
"in",
"the",
"reference",
"sequence",
"for",
"that",
"contig",
".",
"if",
"contig",
"=",
"contig_name",
"then",
"just",
"gets",
"the",
"ref",
"coords",
"from",
"that",
"contig",
"instead",
"of",
"using",
"all",
"the",
"contigs"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L119-L135 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare.nucmer_hits_to_ref_and_qry_coords | def nucmer_hits_to_ref_and_qry_coords(cls, nucmer_hits, contig=None):
'''Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists'''
if contig is None:
ctg_coords = {key: [] for key in nucmer_hits.keys()}
else:
ctg_coords = {contig: []}
ref_coords = {}
for key in ctg_coords:
hits = copy.copy(nucmer_hits[key])
hits.sort(key=lambda x: len(x.ref_coords()))
if len(hits) > 1:
i = 0
while i < len(hits) - 1:
c1 = hits[i].ref_coords()
c2 = hits[i+1].ref_coords()
if c2.contains(c1):
hits.pop(i)
else:
i += 1
ref_coords[key] = [hit.ref_coords() for hit in hits]
ctg_coords[key] = [hit.qry_coords() for hit in hits]
pyfastaq.intervals.merge_overlapping_in_list(ref_coords[key])
pyfastaq.intervals.merge_overlapping_in_list(ctg_coords[key])
return ctg_coords, ref_coords | python | def nucmer_hits_to_ref_and_qry_coords(cls, nucmer_hits, contig=None):
if contig is None:
ctg_coords = {key: [] for key in nucmer_hits.keys()}
else:
ctg_coords = {contig: []}
ref_coords = {}
for key in ctg_coords:
hits = copy.copy(nucmer_hits[key])
hits.sort(key=lambda x: len(x.ref_coords()))
if len(hits) > 1:
i = 0
while i < len(hits) - 1:
c1 = hits[i].ref_coords()
c2 = hits[i+1].ref_coords()
if c2.contains(c1):
hits.pop(i)
else:
i += 1
ref_coords[key] = [hit.ref_coords() for hit in hits]
ctg_coords[key] = [hit.qry_coords() for hit in hits]
pyfastaq.intervals.merge_overlapping_in_list(ref_coords[key])
pyfastaq.intervals.merge_overlapping_in_list(ctg_coords[key])
return ctg_coords, ref_coords | [
"def",
"nucmer_hits_to_ref_and_qry_coords",
"(",
"cls",
",",
"nucmer_hits",
",",
"contig",
"=",
"None",
")",
":",
"if",
"contig",
"is",
"None",
":",
"ctg_coords",
"=",
"{",
"key",
":",
"[",
"]",
"for",
"key",
"in",
"nucmer_hits",
".",
"keys",
"(",
")",
"}",
"else",
":",
"ctg_coords",
"=",
"{",
"contig",
":",
"[",
"]",
"}",
"ref_coords",
"=",
"{",
"}",
"for",
"key",
"in",
"ctg_coords",
":",
"hits",
"=",
"copy",
".",
"copy",
"(",
"nucmer_hits",
"[",
"key",
"]",
")",
"hits",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
".",
"ref_coords",
"(",
")",
")",
")",
"if",
"len",
"(",
"hits",
")",
">",
"1",
":",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"hits",
")",
"-",
"1",
":",
"c1",
"=",
"hits",
"[",
"i",
"]",
".",
"ref_coords",
"(",
")",
"c2",
"=",
"hits",
"[",
"i",
"+",
"1",
"]",
".",
"ref_coords",
"(",
")",
"if",
"c2",
".",
"contains",
"(",
"c1",
")",
":",
"hits",
".",
"pop",
"(",
"i",
")",
"else",
":",
"i",
"+=",
"1",
"ref_coords",
"[",
"key",
"]",
"=",
"[",
"hit",
".",
"ref_coords",
"(",
")",
"for",
"hit",
"in",
"hits",
"]",
"ctg_coords",
"[",
"key",
"]",
"=",
"[",
"hit",
".",
"qry_coords",
"(",
")",
"for",
"hit",
"in",
"hits",
"]",
"pyfastaq",
".",
"intervals",
".",
"merge_overlapping_in_list",
"(",
"ref_coords",
"[",
"key",
"]",
")",
"pyfastaq",
".",
"intervals",
".",
"merge_overlapping_in_list",
"(",
"ctg_coords",
"[",
"key",
"]",
")",
"return",
"ctg_coords",
",",
"ref_coords"
]
| Same as nucmer_hits_to_ref_coords, except removes containing hits first,
and returns ref and qry coords lists | [
"Same",
"as",
"nucmer_hits_to_ref_coords",
"except",
"removes",
"containing",
"hits",
"first",
"and",
"returns",
"ref",
"and",
"qry",
"coords",
"lists"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L139-L168 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare.ref_cov_per_contig | def ref_cov_per_contig(nucmer_hits):
'''Input is hits made by self._parse_nucmer_coords_file.
Returns dictionary. key = contig name. Value = number of bases that
match to the reference sequence.'''
coords = AssemblyCompare.nucmer_hits_to_ref_coords(nucmer_hits)
return {x: pyfastaq.intervals.length_sum_from_list(coords[x]) for x in coords} | python | def ref_cov_per_contig(nucmer_hits):
coords = AssemblyCompare.nucmer_hits_to_ref_coords(nucmer_hits)
return {x: pyfastaq.intervals.length_sum_from_list(coords[x]) for x in coords} | [
"def",
"ref_cov_per_contig",
"(",
"nucmer_hits",
")",
":",
"coords",
"=",
"AssemblyCompare",
".",
"nucmer_hits_to_ref_coords",
"(",
"nucmer_hits",
")",
"return",
"{",
"x",
":",
"pyfastaq",
".",
"intervals",
".",
"length_sum_from_list",
"(",
"coords",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"coords",
"}"
]
| Input is hits made by self._parse_nucmer_coords_file.
Returns dictionary. key = contig name. Value = number of bases that
match to the reference sequence. | [
"Input",
"is",
"hits",
"made",
"by",
"self",
".",
"_parse_nucmer_coords_file",
".",
"Returns",
"dictionary",
".",
"key",
"=",
"contig",
"name",
".",
"Value",
"=",
"number",
"of",
"bases",
"that",
"match",
"to",
"the",
"reference",
"sequence",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L172-L177 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare._get_assembled_reference_sequences | def _get_assembled_reference_sequences(nucmer_hits, ref_sequence, assembly):
'''nucmer_hits = hits made by self._parse_nucmer_coords_file.
ref_gene = reference sequence (pyfastaq.sequences.Fasta object)
assembly = dictionary of contig name -> contig.
Makes a set of Fasta objects of each piece of assembly that
corresponds to the reference sequeunce.'''
sequences = {}
for contig in sorted(nucmer_hits):
for hit in nucmer_hits[contig]:
qry_coords = hit.qry_coords()
fa = assembly[hit.qry_name].subseq(qry_coords.start, qry_coords.end + 1)
if hit.on_same_strand():
strand = '+'
else:
fa.revcomp()
strand = '-'
ref_coords = hit.ref_coords()
fa.id = '.'.join([
ref_sequence.id,
str(ref_coords.start + 1),
str(ref_coords.end + 1),
contig,
str(qry_coords.start + 1),
str(qry_coords.end + 1),
strand
])
if hit.hit_length_ref == hit.ref_length:
fa.id += '.complete'
sequences[fa.id] = fa
return sequences | python | def _get_assembled_reference_sequences(nucmer_hits, ref_sequence, assembly):
sequences = {}
for contig in sorted(nucmer_hits):
for hit in nucmer_hits[contig]:
qry_coords = hit.qry_coords()
fa = assembly[hit.qry_name].subseq(qry_coords.start, qry_coords.end + 1)
if hit.on_same_strand():
strand = '+'
else:
fa.revcomp()
strand = '-'
ref_coords = hit.ref_coords()
fa.id = '.'.join([
ref_sequence.id,
str(ref_coords.start + 1),
str(ref_coords.end + 1),
contig,
str(qry_coords.start + 1),
str(qry_coords.end + 1),
strand
])
if hit.hit_length_ref == hit.ref_length:
fa.id += '.complete'
sequences[fa.id] = fa
return sequences | [
"def",
"_get_assembled_reference_sequences",
"(",
"nucmer_hits",
",",
"ref_sequence",
",",
"assembly",
")",
":",
"sequences",
"=",
"{",
"}",
"for",
"contig",
"in",
"sorted",
"(",
"nucmer_hits",
")",
":",
"for",
"hit",
"in",
"nucmer_hits",
"[",
"contig",
"]",
":",
"qry_coords",
"=",
"hit",
".",
"qry_coords",
"(",
")",
"fa",
"=",
"assembly",
"[",
"hit",
".",
"qry_name",
"]",
".",
"subseq",
"(",
"qry_coords",
".",
"start",
",",
"qry_coords",
".",
"end",
"+",
"1",
")",
"if",
"hit",
".",
"on_same_strand",
"(",
")",
":",
"strand",
"=",
"'+'",
"else",
":",
"fa",
".",
"revcomp",
"(",
")",
"strand",
"=",
"'-'",
"ref_coords",
"=",
"hit",
".",
"ref_coords",
"(",
")",
"fa",
".",
"id",
"=",
"'.'",
".",
"join",
"(",
"[",
"ref_sequence",
".",
"id",
",",
"str",
"(",
"ref_coords",
".",
"start",
"+",
"1",
")",
",",
"str",
"(",
"ref_coords",
".",
"end",
"+",
"1",
")",
",",
"contig",
",",
"str",
"(",
"qry_coords",
".",
"start",
"+",
"1",
")",
",",
"str",
"(",
"qry_coords",
".",
"end",
"+",
"1",
")",
",",
"strand",
"]",
")",
"if",
"hit",
".",
"hit_length_ref",
"==",
"hit",
".",
"ref_length",
":",
"fa",
".",
"id",
"+=",
"'.complete'",
"sequences",
"[",
"fa",
".",
"id",
"]",
"=",
"fa",
"return",
"sequences"
]
| nucmer_hits = hits made by self._parse_nucmer_coords_file.
ref_gene = reference sequence (pyfastaq.sequences.Fasta object)
assembly = dictionary of contig name -> contig.
Makes a set of Fasta objects of each piece of assembly that
corresponds to the reference sequeunce. | [
"nucmer_hits",
"=",
"hits",
"made",
"by",
"self",
".",
"_parse_nucmer_coords_file",
".",
"ref_gene",
"=",
"reference",
"sequence",
"(",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"object",
")",
"assembly",
"=",
"dictionary",
"of",
"contig",
"name",
"-",
">",
"contig",
".",
"Makes",
"a",
"set",
"of",
"Fasta",
"objects",
"of",
"each",
"piece",
"of",
"assembly",
"that",
"corresponds",
"to",
"the",
"reference",
"sequeunce",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L181-L214 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare._whole_gene_covered_by_nucmer_hits | def _whole_gene_covered_by_nucmer_hits(nucmer_hits, ref_seq, percent_threshold, max_nt_extend):
'''Returns true iff the reference sequence is covered by nucmer hits.
nucmer_hits = hits made by self._parse_nucmer_coords_file.
Counts as covered if (total ref bases covered) / len(ref_seq) >= threshold'''
coords = AssemblyCompare.nucmer_hits_to_ref_coords(nucmer_hits)
covered = []
for coords_list in coords.values():
covered.extend(coords_list)
pyfastaq.intervals.merge_overlapping_in_list(covered)
return (2 * max_nt_extend + pyfastaq.intervals.length_sum_from_list(covered)) / len(ref_seq) >= percent_threshold | python | def _whole_gene_covered_by_nucmer_hits(nucmer_hits, ref_seq, percent_threshold, max_nt_extend):
coords = AssemblyCompare.nucmer_hits_to_ref_coords(nucmer_hits)
covered = []
for coords_list in coords.values():
covered.extend(coords_list)
pyfastaq.intervals.merge_overlapping_in_list(covered)
return (2 * max_nt_extend + pyfastaq.intervals.length_sum_from_list(covered)) / len(ref_seq) >= percent_threshold | [
"def",
"_whole_gene_covered_by_nucmer_hits",
"(",
"nucmer_hits",
",",
"ref_seq",
",",
"percent_threshold",
",",
"max_nt_extend",
")",
":",
"coords",
"=",
"AssemblyCompare",
".",
"nucmer_hits_to_ref_coords",
"(",
"nucmer_hits",
")",
"covered",
"=",
"[",
"]",
"for",
"coords_list",
"in",
"coords",
".",
"values",
"(",
")",
":",
"covered",
".",
"extend",
"(",
"coords_list",
")",
"pyfastaq",
".",
"intervals",
".",
"merge_overlapping_in_list",
"(",
"covered",
")",
"return",
"(",
"2",
"*",
"max_nt_extend",
"+",
"pyfastaq",
".",
"intervals",
".",
"length_sum_from_list",
"(",
"covered",
")",
")",
"/",
"len",
"(",
"ref_seq",
")",
">=",
"percent_threshold"
]
| Returns true iff the reference sequence is covered by nucmer hits.
nucmer_hits = hits made by self._parse_nucmer_coords_file.
Counts as covered if (total ref bases covered) / len(ref_seq) >= threshold | [
"Returns",
"true",
"iff",
"the",
"reference",
"sequence",
"is",
"covered",
"by",
"nucmer",
"hits",
".",
"nucmer_hits",
"=",
"hits",
"made",
"by",
"self",
".",
"_parse_nucmer_coords_file",
".",
"Counts",
"as",
"covered",
"if",
"(",
"total",
"ref",
"bases",
"covered",
")",
"/",
"len",
"(",
"ref_seq",
")",
">",
"=",
"threshold"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L218-L227 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare._ref_has_region_assembled_twice | def _ref_has_region_assembled_twice(nucmer_hits, ref_seq, threshold):
'''Returns true iff there is a part of the reference that is assembled
more than once (ie covered by >1 nucmer hit).
Needs a minimum proportin of the ref to be assembled more than once,
determined by threshold.
nucmer_hits = hits made by self._parse_nucmer_coords_file.'''
coords = AssemblyCompare.nucmer_hits_to_ref_coords(nucmer_hits)
covered = []
for coords_list in coords.values():
covered.extend(coords_list)
covered.sort()
if len(covered) <= 1:
return False
coverage = {}
for i in covered:
for j in range(i.start, i.end + 1):
coverage[j] = coverage.get(j, 0) + 1
bases_depth_at_least_two = len([1 for x in coverage.values() if x > 1])
return bases_depth_at_least_two / len(ref_seq) >= threshold | python | def _ref_has_region_assembled_twice(nucmer_hits, ref_seq, threshold):
coords = AssemblyCompare.nucmer_hits_to_ref_coords(nucmer_hits)
covered = []
for coords_list in coords.values():
covered.extend(coords_list)
covered.sort()
if len(covered) <= 1:
return False
coverage = {}
for i in covered:
for j in range(i.start, i.end + 1):
coverage[j] = coverage.get(j, 0) + 1
bases_depth_at_least_two = len([1 for x in coverage.values() if x > 1])
return bases_depth_at_least_two / len(ref_seq) >= threshold | [
"def",
"_ref_has_region_assembled_twice",
"(",
"nucmer_hits",
",",
"ref_seq",
",",
"threshold",
")",
":",
"coords",
"=",
"AssemblyCompare",
".",
"nucmer_hits_to_ref_coords",
"(",
"nucmer_hits",
")",
"covered",
"=",
"[",
"]",
"for",
"coords_list",
"in",
"coords",
".",
"values",
"(",
")",
":",
"covered",
".",
"extend",
"(",
"coords_list",
")",
"covered",
".",
"sort",
"(",
")",
"if",
"len",
"(",
"covered",
")",
"<=",
"1",
":",
"return",
"False",
"coverage",
"=",
"{",
"}",
"for",
"i",
"in",
"covered",
":",
"for",
"j",
"in",
"range",
"(",
"i",
".",
"start",
",",
"i",
".",
"end",
"+",
"1",
")",
":",
"coverage",
"[",
"j",
"]",
"=",
"coverage",
".",
"get",
"(",
"j",
",",
"0",
")",
"+",
"1",
"bases_depth_at_least_two",
"=",
"len",
"(",
"[",
"1",
"for",
"x",
"in",
"coverage",
".",
"values",
"(",
")",
"if",
"x",
">",
"1",
"]",
")",
"return",
"bases_depth_at_least_two",
"/",
"len",
"(",
"ref_seq",
")",
">=",
"threshold"
]
| Returns true iff there is a part of the reference that is assembled
more than once (ie covered by >1 nucmer hit).
Needs a minimum proportin of the ref to be assembled more than once,
determined by threshold.
nucmer_hits = hits made by self._parse_nucmer_coords_file. | [
"Returns",
"true",
"iff",
"there",
"is",
"a",
"part",
"of",
"the",
"reference",
"that",
"is",
"assembled",
"more",
"than",
"once",
"(",
"ie",
"covered",
"by",
">",
"1",
"nucmer",
"hit",
")",
".",
"Needs",
"a",
"minimum",
"proportin",
"of",
"the",
"ref",
"to",
"be",
"assembled",
"more",
"than",
"once",
"determined",
"by",
"threshold",
".",
"nucmer_hits",
"=",
"hits",
"made",
"by",
"self",
".",
"_parse_nucmer_coords_file",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L231-L252 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare._ref_covered_by_at_least_one_full_length_contig | def _ref_covered_by_at_least_one_full_length_contig(nucmer_hits, percent_threshold, max_nt_extend):
'''Returns true iff there exists a contig that completely
covers the reference sequence
nucmer_hits = hits made by self._parse_nucmer_coords_file.'''
for l in nucmer_hits.values():
for hit in l:
if ( (2 * max_nt_extend) + len(hit.ref_coords()) ) / hit.ref_length >= percent_threshold:
return True
return False | python | def _ref_covered_by_at_least_one_full_length_contig(nucmer_hits, percent_threshold, max_nt_extend):
for l in nucmer_hits.values():
for hit in l:
if ( (2 * max_nt_extend) + len(hit.ref_coords()) ) / hit.ref_length >= percent_threshold:
return True
return False | [
"def",
"_ref_covered_by_at_least_one_full_length_contig",
"(",
"nucmer_hits",
",",
"percent_threshold",
",",
"max_nt_extend",
")",
":",
"for",
"l",
"in",
"nucmer_hits",
".",
"values",
"(",
")",
":",
"for",
"hit",
"in",
"l",
":",
"if",
"(",
"(",
"2",
"*",
"max_nt_extend",
")",
"+",
"len",
"(",
"hit",
".",
"ref_coords",
"(",
")",
")",
")",
"/",
"hit",
".",
"ref_length",
">=",
"percent_threshold",
":",
"return",
"True",
"return",
"False"
]
| Returns true iff there exists a contig that completely
covers the reference sequence
nucmer_hits = hits made by self._parse_nucmer_coords_file. | [
"Returns",
"true",
"iff",
"there",
"exists",
"a",
"contig",
"that",
"completely",
"covers",
"the",
"reference",
"sequence",
"nucmer_hits",
"=",
"hits",
"made",
"by",
"self",
".",
"_parse_nucmer_coords_file",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L352-L360 |
sanger-pathogens/ariba | ariba/assembly_compare.py | AssemblyCompare.nucmer_hit_containing_reference_position | def nucmer_hit_containing_reference_position(nucmer_hits, ref_name, ref_position, qry_name=None):
'''Returns the first nucmer match found that contains the given
reference location. nucmer_hits = hits made by self._parse_nucmer_coords_file.
Returns None if no matching hit found'''
for contig_name in nucmer_hits:
for hit in nucmer_hits[contig_name]:
if hit.ref_name == ref_name and (qry_name is None or qry_name == hit.qry_name) and hit.ref_coords().distance_to_point(ref_position) == 0:
return hit
return None | python | def nucmer_hit_containing_reference_position(nucmer_hits, ref_name, ref_position, qry_name=None):
for contig_name in nucmer_hits:
for hit in nucmer_hits[contig_name]:
if hit.ref_name == ref_name and (qry_name is None or qry_name == hit.qry_name) and hit.ref_coords().distance_to_point(ref_position) == 0:
return hit
return None | [
"def",
"nucmer_hit_containing_reference_position",
"(",
"nucmer_hits",
",",
"ref_name",
",",
"ref_position",
",",
"qry_name",
"=",
"None",
")",
":",
"for",
"contig_name",
"in",
"nucmer_hits",
":",
"for",
"hit",
"in",
"nucmer_hits",
"[",
"contig_name",
"]",
":",
"if",
"hit",
".",
"ref_name",
"==",
"ref_name",
"and",
"(",
"qry_name",
"is",
"None",
"or",
"qry_name",
"==",
"hit",
".",
"qry_name",
")",
"and",
"hit",
".",
"ref_coords",
"(",
")",
".",
"distance_to_point",
"(",
"ref_position",
")",
"==",
"0",
":",
"return",
"hit",
"return",
"None"
]
| Returns the first nucmer match found that contains the given
reference location. nucmer_hits = hits made by self._parse_nucmer_coords_file.
Returns None if no matching hit found | [
"Returns",
"the",
"first",
"nucmer",
"match",
"found",
"that",
"contains",
"the",
"given",
"reference",
"location",
".",
"nucmer_hits",
"=",
"hits",
"made",
"by",
"self",
".",
"_parse_nucmer_coords_file",
".",
"Returns",
"None",
"if",
"no",
"matching",
"hit",
"found"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_compare.py#L384-L393 |
sanger-pathogens/ariba | ariba/external_progs.py | ExternalProgs._get_exe | def _get_exe(prog):
'''Given a program name, return what we expect its exectuable to be called'''
if prog in prog_to_env_var:
env_var = prog_to_env_var[prog]
if env_var in os.environ:
return os.environ[env_var]
return prog_to_default[prog] | python | def _get_exe(prog):
if prog in prog_to_env_var:
env_var = prog_to_env_var[prog]
if env_var in os.environ:
return os.environ[env_var]
return prog_to_default[prog] | [
"def",
"_get_exe",
"(",
"prog",
")",
":",
"if",
"prog",
"in",
"prog_to_env_var",
":",
"env_var",
"=",
"prog_to_env_var",
"[",
"prog",
"]",
"if",
"env_var",
"in",
"os",
".",
"environ",
":",
"return",
"os",
".",
"environ",
"[",
"env_var",
"]",
"return",
"prog_to_default",
"[",
"prog",
"]"
]
| Given a program name, return what we expect its exectuable to be called | [
"Given",
"a",
"program",
"name",
"return",
"what",
"we",
"expect",
"its",
"exectuable",
"to",
"be",
"called"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/external_progs.py#L131-L138 |
sanger-pathogens/ariba | ariba/external_progs.py | ExternalProgs._get_version | def _get_version(prog, path):
'''Given a program name and expected path, tries to determine its version.
Returns tuple (bool, version). First element True iff found version ok.
Second element is version string (if found), otherwise an error message'''
assert prog in prog_to_version_cmd
args, regex = prog_to_version_cmd[prog]
cmd = path + ' ' + args
if prog == 'spades':
cmd_output = subprocess.Popen(['python3', path, args], shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
else:
cmd_output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
cmd_output = common.decode(cmd_output[0]).split('\n')[:-1] + common.decode(cmd_output[1]).split('\n')[:-1]
for line in cmd_output:
hits = regex.search(line)
if hits:
return True, hits.group(1)
return False, 'I tried to get the version of ' + prog + ' with: "' + cmd + '" and the output didn\'t match this regular expression: "' + regex.pattern + '"' | python | def _get_version(prog, path):
assert prog in prog_to_version_cmd
args, regex = prog_to_version_cmd[prog]
cmd = path + ' ' + args
if prog == 'spades':
cmd_output = subprocess.Popen(['python3', path, args], shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
else:
cmd_output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
cmd_output = common.decode(cmd_output[0]).split('\n')[:-1] + common.decode(cmd_output[1]).split('\n')[:-1]
for line in cmd_output:
hits = regex.search(line)
if hits:
return True, hits.group(1)
return False, 'I tried to get the version of ' + prog + ' with: "' + cmd + '" and the output didn\'t match this regular expression: "' + regex.pattern + '"' | [
"def",
"_get_version",
"(",
"prog",
",",
"path",
")",
":",
"assert",
"prog",
"in",
"prog_to_version_cmd",
"args",
",",
"regex",
"=",
"prog_to_version_cmd",
"[",
"prog",
"]",
"cmd",
"=",
"path",
"+",
"' '",
"+",
"args",
"if",
"prog",
"==",
"'spades'",
":",
"cmd_output",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'python3'",
",",
"path",
",",
"args",
"]",
",",
"shell",
"=",
"False",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"else",
":",
"cmd_output",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"cmd_output",
"=",
"common",
".",
"decode",
"(",
"cmd_output",
"[",
"0",
"]",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
":",
"-",
"1",
"]",
"+",
"common",
".",
"decode",
"(",
"cmd_output",
"[",
"1",
"]",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
":",
"-",
"1",
"]",
"for",
"line",
"in",
"cmd_output",
":",
"hits",
"=",
"regex",
".",
"search",
"(",
"line",
")",
"if",
"hits",
":",
"return",
"True",
",",
"hits",
".",
"group",
"(",
"1",
")",
"return",
"False",
",",
"'I tried to get the version of '",
"+",
"prog",
"+",
"' with: \"'",
"+",
"cmd",
"+",
"'\" and the output didn\\'t match this regular expression: \"'",
"+",
"regex",
".",
"pattern",
"+",
"'\"'"
]
| Given a program name and expected path, tries to determine its version.
Returns tuple (bool, version). First element True iff found version ok.
Second element is version string (if found), otherwise an error message | [
"Given",
"a",
"program",
"name",
"and",
"expected",
"path",
"tries",
"to",
"determine",
"its",
"version",
".",
"Returns",
"tuple",
"(",
"bool",
"version",
")",
".",
"First",
"element",
"True",
"iff",
"found",
"version",
"ok",
".",
"Second",
"element",
"is",
"version",
"string",
"(",
"if",
"found",
")",
"otherwise",
"an",
"error",
"message"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/external_progs.py#L142-L162 |
sanger-pathogens/ariba | ariba/samtools_variants.py | SamtoolsVariants._get_read_depths | def _get_read_depths(cls, read_depths_file, sequence_name, position):
'''Returns total read depth and depth of reads supporting alternative (if present)'''
assert os.path.exists(read_depths_file)
assert os.path.exists(read_depths_file + '.tbi')
tbx = pysam.TabixFile(read_depths_file)
try:
rows = [x for x in tbx.fetch(sequence_name, position, position + 1)]
except:
return None
if len(rows) > 1: # which happens with indels, mutiple lines for same base of reference
test_rows = [x for x in rows if x.rstrip().split()[3] != '.']
if len(test_rows) != 1:
rows = [rows[-1]]
else:
rows = test_rows
if len(rows) == 1:
r, p, ref_base, alt_base, ref_counts, alt_counts = rows[0].rstrip().split()
bases = ref_base if alt_base == '.' else ref_base + ',' + alt_base
return bases, int(ref_counts), alt_counts
else:
return None | python | def _get_read_depths(cls, read_depths_file, sequence_name, position):
assert os.path.exists(read_depths_file)
assert os.path.exists(read_depths_file + '.tbi')
tbx = pysam.TabixFile(read_depths_file)
try:
rows = [x for x in tbx.fetch(sequence_name, position, position + 1)]
except:
return None
if len(rows) > 1:
test_rows = [x for x in rows if x.rstrip().split()[3] != '.']
if len(test_rows) != 1:
rows = [rows[-1]]
else:
rows = test_rows
if len(rows) == 1:
r, p, ref_base, alt_base, ref_counts, alt_counts = rows[0].rstrip().split()
bases = ref_base if alt_base == '.' else ref_base + ',' + alt_base
return bases, int(ref_counts), alt_counts
else:
return None | [
"def",
"_get_read_depths",
"(",
"cls",
",",
"read_depths_file",
",",
"sequence_name",
",",
"position",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"read_depths_file",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"read_depths_file",
"+",
"'.tbi'",
")",
"tbx",
"=",
"pysam",
".",
"TabixFile",
"(",
"read_depths_file",
")",
"try",
":",
"rows",
"=",
"[",
"x",
"for",
"x",
"in",
"tbx",
".",
"fetch",
"(",
"sequence_name",
",",
"position",
",",
"position",
"+",
"1",
")",
"]",
"except",
":",
"return",
"None",
"if",
"len",
"(",
"rows",
")",
">",
"1",
":",
"# which happens with indels, mutiple lines for same base of reference",
"test_rows",
"=",
"[",
"x",
"for",
"x",
"in",
"rows",
"if",
"x",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
")",
"[",
"3",
"]",
"!=",
"'.'",
"]",
"if",
"len",
"(",
"test_rows",
")",
"!=",
"1",
":",
"rows",
"=",
"[",
"rows",
"[",
"-",
"1",
"]",
"]",
"else",
":",
"rows",
"=",
"test_rows",
"if",
"len",
"(",
"rows",
")",
"==",
"1",
":",
"r",
",",
"p",
",",
"ref_base",
",",
"alt_base",
",",
"ref_counts",
",",
"alt_counts",
"=",
"rows",
"[",
"0",
"]",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
")",
"bases",
"=",
"ref_base",
"if",
"alt_base",
"==",
"'.'",
"else",
"ref_base",
"+",
"','",
"+",
"alt_base",
"return",
"bases",
",",
"int",
"(",
"ref_counts",
")",
",",
"alt_counts",
"else",
":",
"return",
"None"
]
| Returns total read depth and depth of reads supporting alternative (if present) | [
"Returns",
"total",
"read",
"depth",
"and",
"depth",
"of",
"reads",
"supporting",
"alternative",
"(",
"if",
"present",
")"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/samtools_variants.py#L60-L82 |
sanger-pathogens/ariba | ariba/samtools_variants.py | SamtoolsVariants.variants_in_coords | def variants_in_coords(nucmer_matches, vcf_file):
'''nucmer_matches = made by assembly_compare.assembly_match_coords().
Returns number of variants that lie in nucmer_matches'''
found_variants = {}
f = pyfastaq.utils.open_file_read(vcf_file)
for line in f:
if line.startswith('#'):
continue
data = line.rstrip().split('\t')
scaff = data[0]
if scaff in nucmer_matches:
position = int(data[1]) - 1
i = pyfastaq.intervals.Interval(position, position)
intersects = len([x for x in nucmer_matches[scaff] if x.intersects(i)]) > 0
if intersects:
if scaff not in found_variants:
found_variants[scaff] = set()
found_variants[scaff].add(position)
pyfastaq.utils.close(f)
return found_variants | python | def variants_in_coords(nucmer_matches, vcf_file):
found_variants = {}
f = pyfastaq.utils.open_file_read(vcf_file)
for line in f:
if line.startswith('
continue
data = line.rstrip().split('\t')
scaff = data[0]
if scaff in nucmer_matches:
position = int(data[1]) - 1
i = pyfastaq.intervals.Interval(position, position)
intersects = len([x for x in nucmer_matches[scaff] if x.intersects(i)]) > 0
if intersects:
if scaff not in found_variants:
found_variants[scaff] = set()
found_variants[scaff].add(position)
pyfastaq.utils.close(f)
return found_variants | [
"def",
"variants_in_coords",
"(",
"nucmer_matches",
",",
"vcf_file",
")",
":",
"found_variants",
"=",
"{",
"}",
"f",
"=",
"pyfastaq",
".",
"utils",
".",
"open_file_read",
"(",
"vcf_file",
")",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"data",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"scaff",
"=",
"data",
"[",
"0",
"]",
"if",
"scaff",
"in",
"nucmer_matches",
":",
"position",
"=",
"int",
"(",
"data",
"[",
"1",
"]",
")",
"-",
"1",
"i",
"=",
"pyfastaq",
".",
"intervals",
".",
"Interval",
"(",
"position",
",",
"position",
")",
"intersects",
"=",
"len",
"(",
"[",
"x",
"for",
"x",
"in",
"nucmer_matches",
"[",
"scaff",
"]",
"if",
"x",
".",
"intersects",
"(",
"i",
")",
"]",
")",
">",
"0",
"if",
"intersects",
":",
"if",
"scaff",
"not",
"in",
"found_variants",
":",
"found_variants",
"[",
"scaff",
"]",
"=",
"set",
"(",
")",
"found_variants",
"[",
"scaff",
"]",
".",
"add",
"(",
"position",
")",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f",
")",
"return",
"found_variants"
]
| nucmer_matches = made by assembly_compare.assembly_match_coords().
Returns number of variants that lie in nucmer_matches | [
"nucmer_matches",
"=",
"made",
"by",
"assembly_compare",
".",
"assembly_match_coords",
"()",
".",
"Returns",
"number",
"of",
"variants",
"that",
"lie",
"in",
"nucmer_matches"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/samtools_variants.py#L135-L157 |
sanger-pathogens/ariba | ariba/cdhit.py | Runner.fake_run | def fake_run(self):
'''Doesn't actually run cd-hit. Instead, puts each input sequence into its own cluster. So it's as if cdhit was run, but didn't cluster anything'''
clusters = {}
used_names = set()
seq_reader = pyfastaq.sequences.file_reader(self.infile)
for seq in seq_reader:
if seq.id in used_names:
raise Error('Sequence name "' + seq.id + '" not unique. Cannot continue')
clusters[str(len(clusters) + self.min_cluster_number)] = {seq.id}
used_names.add(seq.id)
return clusters | python | def fake_run(self):
clusters = {}
used_names = set()
seq_reader = pyfastaq.sequences.file_reader(self.infile)
for seq in seq_reader:
if seq.id in used_names:
raise Error('Sequence name "' + seq.id + '" not unique. Cannot continue')
clusters[str(len(clusters) + self.min_cluster_number)] = {seq.id}
used_names.add(seq.id)
return clusters | [
"def",
"fake_run",
"(",
"self",
")",
":",
"clusters",
"=",
"{",
"}",
"used_names",
"=",
"set",
"(",
")",
"seq_reader",
"=",
"pyfastaq",
".",
"sequences",
".",
"file_reader",
"(",
"self",
".",
"infile",
")",
"for",
"seq",
"in",
"seq_reader",
":",
"if",
"seq",
".",
"id",
"in",
"used_names",
":",
"raise",
"Error",
"(",
"'Sequence name \"'",
"+",
"seq",
".",
"id",
"+",
"'\" not unique. Cannot continue'",
")",
"clusters",
"[",
"str",
"(",
"len",
"(",
"clusters",
")",
"+",
"self",
".",
"min_cluster_number",
")",
"]",
"=",
"{",
"seq",
".",
"id",
"}",
"used_names",
".",
"add",
"(",
"seq",
".",
"id",
")",
"return",
"clusters"
]
| Doesn't actually run cd-hit. Instead, puts each input sequence into its own cluster. So it's as if cdhit was run, but didn't cluster anything | [
"Doesn",
"t",
"actually",
"run",
"cd",
"-",
"hit",
".",
"Instead",
"puts",
"each",
"input",
"sequence",
"into",
"its",
"own",
"cluster",
".",
"So",
"it",
"s",
"as",
"if",
"cdhit",
"was",
"run",
"but",
"didn",
"t",
"cluster",
"anything"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/cdhit.py#L38-L51 |
sanger-pathogens/ariba | ariba/cdhit.py | Runner.run_get_clusters_from_file | def run_get_clusters_from_file(self, clusters_infile, all_ref_seqs, rename_dict=None):
'''Instead of running cdhit, gets the clusters info from the input file.'''
if rename_dict is None:
rename_dict = {}
# check that every sequence in the clusters file can be
# found in the fasta file
seq_reader = pyfastaq.sequences.file_reader(self.infile)
names_list_from_fasta_file = [seq.id for seq in seq_reader]
names_set_from_fasta_file = set(names_list_from_fasta_file)
clusters = self._load_user_clusters_file(clusters_infile, all_ref_seqs, rename_dict=rename_dict)
if len(names_set_from_fasta_file) != len(names_list_from_fasta_file):
raise Error('At least one duplicate name in fasta file ' + self.infile + '. Cannot continue')
names_from_clusters_file = set()
for new_names in clusters.values():
names_from_clusters_file.update(new_names)
if not names_set_from_fasta_file.issubset(names_from_clusters_file):
raise Error('Some names in fasta file "' + self.infile + '" not given in cluster file. Cannot continue')
return clusters | python | def run_get_clusters_from_file(self, clusters_infile, all_ref_seqs, rename_dict=None):
if rename_dict is None:
rename_dict = {}
seq_reader = pyfastaq.sequences.file_reader(self.infile)
names_list_from_fasta_file = [seq.id for seq in seq_reader]
names_set_from_fasta_file = set(names_list_from_fasta_file)
clusters = self._load_user_clusters_file(clusters_infile, all_ref_seqs, rename_dict=rename_dict)
if len(names_set_from_fasta_file) != len(names_list_from_fasta_file):
raise Error('At least one duplicate name in fasta file ' + self.infile + '. Cannot continue')
names_from_clusters_file = set()
for new_names in clusters.values():
names_from_clusters_file.update(new_names)
if not names_set_from_fasta_file.issubset(names_from_clusters_file):
raise Error('Some names in fasta file "' + self.infile + '" not given in cluster file. Cannot continue')
return clusters | [
"def",
"run_get_clusters_from_file",
"(",
"self",
",",
"clusters_infile",
",",
"all_ref_seqs",
",",
"rename_dict",
"=",
"None",
")",
":",
"if",
"rename_dict",
"is",
"None",
":",
"rename_dict",
"=",
"{",
"}",
"# check that every sequence in the clusters file can be",
"# found in the fasta file",
"seq_reader",
"=",
"pyfastaq",
".",
"sequences",
".",
"file_reader",
"(",
"self",
".",
"infile",
")",
"names_list_from_fasta_file",
"=",
"[",
"seq",
".",
"id",
"for",
"seq",
"in",
"seq_reader",
"]",
"names_set_from_fasta_file",
"=",
"set",
"(",
"names_list_from_fasta_file",
")",
"clusters",
"=",
"self",
".",
"_load_user_clusters_file",
"(",
"clusters_infile",
",",
"all_ref_seqs",
",",
"rename_dict",
"=",
"rename_dict",
")",
"if",
"len",
"(",
"names_set_from_fasta_file",
")",
"!=",
"len",
"(",
"names_list_from_fasta_file",
")",
":",
"raise",
"Error",
"(",
"'At least one duplicate name in fasta file '",
"+",
"self",
".",
"infile",
"+",
"'. Cannot continue'",
")",
"names_from_clusters_file",
"=",
"set",
"(",
")",
"for",
"new_names",
"in",
"clusters",
".",
"values",
"(",
")",
":",
"names_from_clusters_file",
".",
"update",
"(",
"new_names",
")",
"if",
"not",
"names_set_from_fasta_file",
".",
"issubset",
"(",
"names_from_clusters_file",
")",
":",
"raise",
"Error",
"(",
"'Some names in fasta file \"'",
"+",
"self",
".",
"infile",
"+",
"'\" not given in cluster file. Cannot continue'",
")",
"return",
"clusters"
]
| Instead of running cdhit, gets the clusters info from the input file. | [
"Instead",
"of",
"running",
"cdhit",
"gets",
"the",
"clusters",
"info",
"from",
"the",
"input",
"file",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/cdhit.py#L86-L109 |
sanger-pathogens/ariba | ariba/mapping.py | get_total_alignment_score | def get_total_alignment_score(bam):
'''Returns total of AS: tags in the input BAM'''
sam_reader = pysam.Samfile(bam, "rb")
total = 0
for sam in sam_reader.fetch(until_eof=True):
try:
total += sam.opt('AS')
except:
pass
return total | python | def get_total_alignment_score(bam):
sam_reader = pysam.Samfile(bam, "rb")
total = 0
for sam in sam_reader.fetch(until_eof=True):
try:
total += sam.opt('AS')
except:
pass
return total | [
"def",
"get_total_alignment_score",
"(",
"bam",
")",
":",
"sam_reader",
"=",
"pysam",
".",
"Samfile",
"(",
"bam",
",",
"\"rb\"",
")",
"total",
"=",
"0",
"for",
"sam",
"in",
"sam_reader",
".",
"fetch",
"(",
"until_eof",
"=",
"True",
")",
":",
"try",
":",
"total",
"+=",
"sam",
".",
"opt",
"(",
"'AS'",
")",
"except",
":",
"pass",
"return",
"total"
]
| Returns total of AS: tags in the input BAM | [
"Returns",
"total",
"of",
"AS",
":",
"tags",
"in",
"the",
"input",
"BAM"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/mapping.py#L123-L132 |
sanger-pathogens/ariba | ariba/mapping.py | sam_to_fastq | def sam_to_fastq(sam):
'''Given a pysam alignment, returns the sequence a Fastq object.
Reverse complements as required and add suffix /1 or /2 as appropriate from the flag'''
name = sam.qname
if sam.is_read1:
name += '/1'
elif sam.is_read2:
name += '/2'
else:
raise Error('Read ' + name + ' must be first or second of pair according to flag. Cannot continue')
seq = pyfastaq.sequences.Fastq(name, common.decode(sam.seq), common.decode(sam.qual))
if sam.is_reverse:
seq.revcomp()
return seq | python | def sam_to_fastq(sam):
name = sam.qname
if sam.is_read1:
name += '/1'
elif sam.is_read2:
name += '/2'
else:
raise Error('Read ' + name + ' must be first or second of pair according to flag. Cannot continue')
seq = pyfastaq.sequences.Fastq(name, common.decode(sam.seq), common.decode(sam.qual))
if sam.is_reverse:
seq.revcomp()
return seq | [
"def",
"sam_to_fastq",
"(",
"sam",
")",
":",
"name",
"=",
"sam",
".",
"qname",
"if",
"sam",
".",
"is_read1",
":",
"name",
"+=",
"'/1'",
"elif",
"sam",
".",
"is_read2",
":",
"name",
"+=",
"'/2'",
"else",
":",
"raise",
"Error",
"(",
"'Read '",
"+",
"name",
"+",
"' must be first or second of pair according to flag. Cannot continue'",
")",
"seq",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fastq",
"(",
"name",
",",
"common",
".",
"decode",
"(",
"sam",
".",
"seq",
")",
",",
"common",
".",
"decode",
"(",
"sam",
".",
"qual",
")",
")",
"if",
"sam",
".",
"is_reverse",
":",
"seq",
".",
"revcomp",
"(",
")",
"return",
"seq"
]
| Given a pysam alignment, returns the sequence a Fastq object.
Reverse complements as required and add suffix /1 or /2 as appropriate from the flag | [
"Given",
"a",
"pysam",
"alignment",
"returns",
"the",
"sequence",
"a",
"Fastq",
"object",
".",
"Reverse",
"complements",
"as",
"required",
"and",
"add",
"suffix",
"/",
"1",
"or",
"/",
"2",
"as",
"appropriate",
"from",
"the",
"flag"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/mapping.py#L135-L150 |
sanger-pathogens/ariba | ariba/mapping.py | sam_pair_to_insert | def sam_pair_to_insert(s1, s2):
'''Returns insert size from pair of sam records, as long as their orientation is "innies".
Otherwise returns None.'''
if s1.is_unmapped or s2.is_unmapped or (s1.tid != s2.tid) or (s1.is_reverse == s2.is_reverse):
return None
# If here, reads are both mapped to the same ref, and in opposite orientations
if s1.is_reverse:
end = s1.reference_end - 1
start = s2.reference_start
else:
end = s2.reference_end - 1
start = s1.reference_start
if start < end:
return end - start + 1
else:
return None | python | def sam_pair_to_insert(s1, s2):
if s1.is_unmapped or s2.is_unmapped or (s1.tid != s2.tid) or (s1.is_reverse == s2.is_reverse):
return None
if s1.is_reverse:
end = s1.reference_end - 1
start = s2.reference_start
else:
end = s2.reference_end - 1
start = s1.reference_start
if start < end:
return end - start + 1
else:
return None | [
"def",
"sam_pair_to_insert",
"(",
"s1",
",",
"s2",
")",
":",
"if",
"s1",
".",
"is_unmapped",
"or",
"s2",
".",
"is_unmapped",
"or",
"(",
"s1",
".",
"tid",
"!=",
"s2",
".",
"tid",
")",
"or",
"(",
"s1",
".",
"is_reverse",
"==",
"s2",
".",
"is_reverse",
")",
":",
"return",
"None",
"# If here, reads are both mapped to the same ref, and in opposite orientations",
"if",
"s1",
".",
"is_reverse",
":",
"end",
"=",
"s1",
".",
"reference_end",
"-",
"1",
"start",
"=",
"s2",
".",
"reference_start",
"else",
":",
"end",
"=",
"s2",
".",
"reference_end",
"-",
"1",
"start",
"=",
"s1",
".",
"reference_start",
"if",
"start",
"<",
"end",
":",
"return",
"end",
"-",
"start",
"+",
"1",
"else",
":",
"return",
"None"
]
| Returns insert size from pair of sam records, as long as their orientation is "innies".
Otherwise returns None. | [
"Returns",
"insert",
"size",
"from",
"pair",
"of",
"sam",
"records",
"as",
"long",
"as",
"their",
"orientation",
"is",
"innies",
".",
"Otherwise",
"returns",
"None",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/mapping.py#L153-L170 |
sanger-pathogens/ariba | ariba/scaffold_graph.py | Graph.update_from_sam | def update_from_sam(self, sam, sam_reader):
'''Updates graph info from a pysam.AlignedSegment object'''
if sam.is_unmapped \
or sam.mate_is_unmapped \
or (sam.reference_id == sam.next_reference_id):
return
new_link = link.Link(sam, sam_reader, self.ref_lengths)
read_name = sam.query_name
if read_name in self.partial_links:
new_link.merge(self.partial_links[read_name])
del self.partial_links[read_name]
key = tuple(sorted((new_link.refnames[0], new_link.refnames[1])))
if key not in self.links:
self.links[key] = []
new_link.sort()
self.links[key].append(new_link)
else:
self.partial_links[read_name] = new_link | python | def update_from_sam(self, sam, sam_reader):
if sam.is_unmapped \
or sam.mate_is_unmapped \
or (sam.reference_id == sam.next_reference_id):
return
new_link = link.Link(sam, sam_reader, self.ref_lengths)
read_name = sam.query_name
if read_name in self.partial_links:
new_link.merge(self.partial_links[read_name])
del self.partial_links[read_name]
key = tuple(sorted((new_link.refnames[0], new_link.refnames[1])))
if key not in self.links:
self.links[key] = []
new_link.sort()
self.links[key].append(new_link)
else:
self.partial_links[read_name] = new_link | [
"def",
"update_from_sam",
"(",
"self",
",",
"sam",
",",
"sam_reader",
")",
":",
"if",
"sam",
".",
"is_unmapped",
"or",
"sam",
".",
"mate_is_unmapped",
"or",
"(",
"sam",
".",
"reference_id",
"==",
"sam",
".",
"next_reference_id",
")",
":",
"return",
"new_link",
"=",
"link",
".",
"Link",
"(",
"sam",
",",
"sam_reader",
",",
"self",
".",
"ref_lengths",
")",
"read_name",
"=",
"sam",
".",
"query_name",
"if",
"read_name",
"in",
"self",
".",
"partial_links",
":",
"new_link",
".",
"merge",
"(",
"self",
".",
"partial_links",
"[",
"read_name",
"]",
")",
"del",
"self",
".",
"partial_links",
"[",
"read_name",
"]",
"key",
"=",
"tuple",
"(",
"sorted",
"(",
"(",
"new_link",
".",
"refnames",
"[",
"0",
"]",
",",
"new_link",
".",
"refnames",
"[",
"1",
"]",
")",
")",
")",
"if",
"key",
"not",
"in",
"self",
".",
"links",
":",
"self",
".",
"links",
"[",
"key",
"]",
"=",
"[",
"]",
"new_link",
".",
"sort",
"(",
")",
"self",
".",
"links",
"[",
"key",
"]",
".",
"append",
"(",
"new_link",
")",
"else",
":",
"self",
".",
"partial_links",
"[",
"read_name",
"]",
"=",
"new_link"
]
| Updates graph info from a pysam.AlignedSegment object | [
"Updates",
"graph",
"info",
"from",
"a",
"pysam",
".",
"AlignedSegment",
"object"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/scaffold_graph.py#L13-L32 |
sanger-pathogens/ariba | ariba/scaffold_graph.py | Graph._make_graph | def _make_graph(self, max_insert):
'''helper function to construct graph from current state of object'''
if len(self.partial_links) != 0:
raise Error('Error in _make_graph(). Cannot continue because there are partial links')
self.contig_links = {}
for key in self.links:
for l in self.links[key]:
insert_size = l.insert_size()
if insert_size <= max_insert:
if key not in self.contig_links:
self.contig_links[key] = {}
dirs = ''.join(l.dirs)
self.contig_links[key][dirs] = self.contig_links[key].get(dirs, 0) + 1 | python | def _make_graph(self, max_insert):
if len(self.partial_links) != 0:
raise Error('Error in _make_graph(). Cannot continue because there are partial links')
self.contig_links = {}
for key in self.links:
for l in self.links[key]:
insert_size = l.insert_size()
if insert_size <= max_insert:
if key not in self.contig_links:
self.contig_links[key] = {}
dirs = ''.join(l.dirs)
self.contig_links[key][dirs] = self.contig_links[key].get(dirs, 0) + 1 | [
"def",
"_make_graph",
"(",
"self",
",",
"max_insert",
")",
":",
"if",
"len",
"(",
"self",
".",
"partial_links",
")",
"!=",
"0",
":",
"raise",
"Error",
"(",
"'Error in _make_graph(). Cannot continue because there are partial links'",
")",
"self",
".",
"contig_links",
"=",
"{",
"}",
"for",
"key",
"in",
"self",
".",
"links",
":",
"for",
"l",
"in",
"self",
".",
"links",
"[",
"key",
"]",
":",
"insert_size",
"=",
"l",
".",
"insert_size",
"(",
")",
"if",
"insert_size",
"<=",
"max_insert",
":",
"if",
"key",
"not",
"in",
"self",
".",
"contig_links",
":",
"self",
".",
"contig_links",
"[",
"key",
"]",
"=",
"{",
"}",
"dirs",
"=",
"''",
".",
"join",
"(",
"l",
".",
"dirs",
")",
"self",
".",
"contig_links",
"[",
"key",
"]",
"[",
"dirs",
"]",
"=",
"self",
".",
"contig_links",
"[",
"key",
"]",
".",
"get",
"(",
"dirs",
",",
"0",
")",
"+",
"1"
]
| helper function to construct graph from current state of object | [
"helper",
"function",
"to",
"construct",
"graph",
"from",
"current",
"state",
"of",
"object"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/scaffold_graph.py#L35-L48 |
sanger-pathogens/ariba | ariba/sequence_variant.py | Variant.nucleotide_range | def nucleotide_range(self):
'''Returns the nucleotide (start, end) positions inclusive of this variant.
start==end if it's an amino acid variant, otherwise start+2==end'''
if self.variant_type == 'p':
return 3 * self.position, 3 * self.position + 2
else:
return self.position, self.position | python | def nucleotide_range(self):
if self.variant_type == 'p':
return 3 * self.position, 3 * self.position + 2
else:
return self.position, self.position | [
"def",
"nucleotide_range",
"(",
"self",
")",
":",
"if",
"self",
".",
"variant_type",
"==",
"'p'",
":",
"return",
"3",
"*",
"self",
".",
"position",
",",
"3",
"*",
"self",
".",
"position",
"+",
"2",
"else",
":",
"return",
"self",
".",
"position",
",",
"self",
".",
"position"
]
| Returns the nucleotide (start, end) positions inclusive of this variant.
start==end if it's an amino acid variant, otherwise start+2==end | [
"Returns",
"the",
"nucleotide",
"(",
"start",
"end",
")",
"positions",
"inclusive",
"of",
"this",
"variant",
".",
"start",
"==",
"end",
"if",
"it",
"s",
"an",
"amino",
"acid",
"variant",
"otherwise",
"start",
"+",
"2",
"==",
"end"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/sequence_variant.py#L63-L69 |
sanger-pathogens/ariba | ariba/bam_parse.py | Parser._sam_to_soft_clipped | def _sam_to_soft_clipped(self, sam):
'''Returns tuple of whether or not the left and right end of the mapped read in the sam record is soft-clipped'''
if sam.is_unmapped:
raise Error('Cannot get soft clip info from an unmapped read')
if sam.cigar is None or len(sam.cigar) == 0:
return False, False
return (sam.cigar[0][0] == 4, sam.cigar[-1][0] == 4) | python | def _sam_to_soft_clipped(self, sam):
if sam.is_unmapped:
raise Error('Cannot get soft clip info from an unmapped read')
if sam.cigar is None or len(sam.cigar) == 0:
return False, False
return (sam.cigar[0][0] == 4, sam.cigar[-1][0] == 4) | [
"def",
"_sam_to_soft_clipped",
"(",
"self",
",",
"sam",
")",
":",
"if",
"sam",
".",
"is_unmapped",
":",
"raise",
"Error",
"(",
"'Cannot get soft clip info from an unmapped read'",
")",
"if",
"sam",
".",
"cigar",
"is",
"None",
"or",
"len",
"(",
"sam",
".",
"cigar",
")",
"==",
"0",
":",
"return",
"False",
",",
"False",
"return",
"(",
"sam",
".",
"cigar",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"4",
",",
"sam",
".",
"cigar",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"4",
")"
]
| Returns tuple of whether or not the left and right end of the mapped read in the sam record is soft-clipped | [
"Returns",
"tuple",
"of",
"whether",
"or",
"not",
"the",
"left",
"and",
"right",
"end",
"of",
"the",
"mapped",
"read",
"in",
"the",
"sam",
"record",
"is",
"soft",
"-",
"clipped"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/bam_parse.py#L21-L28 |
sanger-pathogens/ariba | ariba/report_filter.py | ReportFilter._report_line_to_dict | def _report_line_to_dict(cls, line):
'''Takes report line string as input. Returns a dict of column name -> value in line'''
data = line.split('\t')
if len(data) != len(report.columns):
return None
d = dict(zip(report.columns, data))
for key in report.int_columns:
try:
d[key] = int(d[key])
except:
assert d[key] == '.'
for key in report.float_columns:
try:
d[key] = float(d[key])
except:
assert d[key] == '.'
d['flag'] = flag.Flag(int(d['flag']))
return d | python | def _report_line_to_dict(cls, line):
data = line.split('\t')
if len(data) != len(report.columns):
return None
d = dict(zip(report.columns, data))
for key in report.int_columns:
try:
d[key] = int(d[key])
except:
assert d[key] == '.'
for key in report.float_columns:
try:
d[key] = float(d[key])
except:
assert d[key] == '.'
d['flag'] = flag.Flag(int(d['flag']))
return d | [
"def",
"_report_line_to_dict",
"(",
"cls",
",",
"line",
")",
":",
"data",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"data",
")",
"!=",
"len",
"(",
"report",
".",
"columns",
")",
":",
"return",
"None",
"d",
"=",
"dict",
"(",
"zip",
"(",
"report",
".",
"columns",
",",
"data",
")",
")",
"for",
"key",
"in",
"report",
".",
"int_columns",
":",
"try",
":",
"d",
"[",
"key",
"]",
"=",
"int",
"(",
"d",
"[",
"key",
"]",
")",
"except",
":",
"assert",
"d",
"[",
"key",
"]",
"==",
"'.'",
"for",
"key",
"in",
"report",
".",
"float_columns",
":",
"try",
":",
"d",
"[",
"key",
"]",
"=",
"float",
"(",
"d",
"[",
"key",
"]",
")",
"except",
":",
"assert",
"d",
"[",
"key",
"]",
"==",
"'.'",
"d",
"[",
"'flag'",
"]",
"=",
"flag",
".",
"Flag",
"(",
"int",
"(",
"d",
"[",
"'flag'",
"]",
")",
")",
"return",
"d"
]
| Takes report line string as input. Returns a dict of column name -> value in line | [
"Takes",
"report",
"line",
"string",
"as",
"input",
".",
"Returns",
"a",
"dict",
"of",
"column",
"name",
"-",
">",
"value",
"in",
"line"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/report_filter.py#L33-L53 |
sanger-pathogens/ariba | ariba/report_filter.py | ReportFilter._dict_to_report_line | def _dict_to_report_line(cls, report_dict):
'''Takes a report_dict as input and returns a report line'''
return '\t'.join([str(report_dict[x]) for x in report.columns]) | python | def _dict_to_report_line(cls, report_dict):
return '\t'.join([str(report_dict[x]) for x in report.columns]) | [
"def",
"_dict_to_report_line",
"(",
"cls",
",",
"report_dict",
")",
":",
"return",
"'\\t'",
".",
"join",
"(",
"[",
"str",
"(",
"report_dict",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"report",
".",
"columns",
"]",
")"
]
| Takes a report_dict as input and returns a report line | [
"Takes",
"a",
"report_dict",
"as",
"input",
"and",
"returns",
"a",
"report",
"line"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/report_filter.py#L57-L59 |
sanger-pathogens/ariba | ariba/report_filter.py | ReportFilter._load_report | def _load_report(infile):
'''Loads report file into a dictionary. Key=reference name.
Value = list of report lines for that reference'''
report_dict = {}
f = pyfastaq.utils.open_file_read(infile)
first_line = True
for line in f:
line = line.rstrip()
if first_line:
expected_first_line = '#' + '\t'.join(report.columns)
if line != expected_first_line:
pyfastaq.utils.close(f)
raise Error('Error reading report file. Expected first line of file is\n' + expected_first_line + '\nbut got:\n' + line)
first_line = False
else:
line_dict = ReportFilter._report_line_to_dict(line)
if line_dict is None:
pyfastaq.utils.close(f)
raise Error('Error reading report file at this line:\n' + line)
ref_name = line_dict['ref_name']
ctg_name = line_dict['ctg']
if ref_name not in report_dict:
report_dict[ref_name] = {}
if ctg_name not in report_dict[ref_name]:
report_dict[ref_name][ctg_name] = []
report_dict[ref_name][ctg_name].append(line_dict)
pyfastaq.utils.close(f)
return report_dict | python | def _load_report(infile):
report_dict = {}
f = pyfastaq.utils.open_file_read(infile)
first_line = True
for line in f:
line = line.rstrip()
if first_line:
expected_first_line = '
if line != expected_first_line:
pyfastaq.utils.close(f)
raise Error('Error reading report file. Expected first line of file is\n' + expected_first_line + '\nbut got:\n' + line)
first_line = False
else:
line_dict = ReportFilter._report_line_to_dict(line)
if line_dict is None:
pyfastaq.utils.close(f)
raise Error('Error reading report file at this line:\n' + line)
ref_name = line_dict['ref_name']
ctg_name = line_dict['ctg']
if ref_name not in report_dict:
report_dict[ref_name] = {}
if ctg_name not in report_dict[ref_name]:
report_dict[ref_name][ctg_name] = []
report_dict[ref_name][ctg_name].append(line_dict)
pyfastaq.utils.close(f)
return report_dict | [
"def",
"_load_report",
"(",
"infile",
")",
":",
"report_dict",
"=",
"{",
"}",
"f",
"=",
"pyfastaq",
".",
"utils",
".",
"open_file_read",
"(",
"infile",
")",
"first_line",
"=",
"True",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"if",
"first_line",
":",
"expected_first_line",
"=",
"'#'",
"+",
"'\\t'",
".",
"join",
"(",
"report",
".",
"columns",
")",
"if",
"line",
"!=",
"expected_first_line",
":",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f",
")",
"raise",
"Error",
"(",
"'Error reading report file. Expected first line of file is\\n'",
"+",
"expected_first_line",
"+",
"'\\nbut got:\\n'",
"+",
"line",
")",
"first_line",
"=",
"False",
"else",
":",
"line_dict",
"=",
"ReportFilter",
".",
"_report_line_to_dict",
"(",
"line",
")",
"if",
"line_dict",
"is",
"None",
":",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f",
")",
"raise",
"Error",
"(",
"'Error reading report file at this line:\\n'",
"+",
"line",
")",
"ref_name",
"=",
"line_dict",
"[",
"'ref_name'",
"]",
"ctg_name",
"=",
"line_dict",
"[",
"'ctg'",
"]",
"if",
"ref_name",
"not",
"in",
"report_dict",
":",
"report_dict",
"[",
"ref_name",
"]",
"=",
"{",
"}",
"if",
"ctg_name",
"not",
"in",
"report_dict",
"[",
"ref_name",
"]",
":",
"report_dict",
"[",
"ref_name",
"]",
"[",
"ctg_name",
"]",
"=",
"[",
"]",
"report_dict",
"[",
"ref_name",
"]",
"[",
"ctg_name",
"]",
".",
"append",
"(",
"line_dict",
")",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f",
")",
"return",
"report_dict"
]
| Loads report file into a dictionary. Key=reference name.
Value = list of report lines for that reference | [
"Loads",
"report",
"file",
"into",
"a",
"dictionary",
".",
"Key",
"=",
"reference",
"name",
".",
"Value",
"=",
"list",
"of",
"report",
"lines",
"for",
"that",
"reference"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/report_filter.py#L63-L94 |
sanger-pathogens/ariba | ariba/report_filter.py | ReportFilter._filter_dicts | def _filter_dicts(self):
'''Filters out all the report_dicts that do not pass the cutoffs. If any ref sequence
loses all of its report_dicts, then it is completely removed.'''
keys_to_remove = set()
for ref_name in self.report:
for ctg_name in self.report[ref_name]:
self.report[ref_name][ctg_name] = self._filter_list_of_dicts(self.report[ref_name][ctg_name])
if len(self.report[ref_name][ctg_name]) == 0:
keys_to_remove.add((ref_name, ctg_name))
refs_to_remove = set()
for ref_name, ctg_name in keys_to_remove:
del self.report[ref_name][ctg_name]
if len(self.report[ref_name]) == 0:
refs_to_remove.add(ref_name)
for ref_name in refs_to_remove:
del self.report[ref_name] | python | def _filter_dicts(self):
keys_to_remove = set()
for ref_name in self.report:
for ctg_name in self.report[ref_name]:
self.report[ref_name][ctg_name] = self._filter_list_of_dicts(self.report[ref_name][ctg_name])
if len(self.report[ref_name][ctg_name]) == 0:
keys_to_remove.add((ref_name, ctg_name))
refs_to_remove = set()
for ref_name, ctg_name in keys_to_remove:
del self.report[ref_name][ctg_name]
if len(self.report[ref_name]) == 0:
refs_to_remove.add(ref_name)
for ref_name in refs_to_remove:
del self.report[ref_name] | [
"def",
"_filter_dicts",
"(",
"self",
")",
":",
"keys_to_remove",
"=",
"set",
"(",
")",
"for",
"ref_name",
"in",
"self",
".",
"report",
":",
"for",
"ctg_name",
"in",
"self",
".",
"report",
"[",
"ref_name",
"]",
":",
"self",
".",
"report",
"[",
"ref_name",
"]",
"[",
"ctg_name",
"]",
"=",
"self",
".",
"_filter_list_of_dicts",
"(",
"self",
".",
"report",
"[",
"ref_name",
"]",
"[",
"ctg_name",
"]",
")",
"if",
"len",
"(",
"self",
".",
"report",
"[",
"ref_name",
"]",
"[",
"ctg_name",
"]",
")",
"==",
"0",
":",
"keys_to_remove",
".",
"add",
"(",
"(",
"ref_name",
",",
"ctg_name",
")",
")",
"refs_to_remove",
"=",
"set",
"(",
")",
"for",
"ref_name",
",",
"ctg_name",
"in",
"keys_to_remove",
":",
"del",
"self",
".",
"report",
"[",
"ref_name",
"]",
"[",
"ctg_name",
"]",
"if",
"len",
"(",
"self",
".",
"report",
"[",
"ref_name",
"]",
")",
"==",
"0",
":",
"refs_to_remove",
".",
"add",
"(",
"ref_name",
")",
"for",
"ref_name",
"in",
"refs_to_remove",
":",
"del",
"self",
".",
"report",
"[",
"ref_name",
"]"
]
| Filters out all the report_dicts that do not pass the cutoffs. If any ref sequence
loses all of its report_dicts, then it is completely removed. | [
"Filters",
"out",
"all",
"the",
"report_dicts",
"that",
"do",
"not",
"pass",
"the",
"cutoffs",
".",
"If",
"any",
"ref",
"sequence",
"loses",
"all",
"of",
"its",
"report_dicts",
"then",
"it",
"is",
"completely",
"removed",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/report_filter.py#L167-L186 |
sanger-pathogens/ariba | ariba/link.py | Link.merge | def merge(self, other):
'''Merge another link into this one. Expected that each link was created from each mate from a pair. We only know both distances to contig ends when we have read info from both mappings in a BAM file. All other info should be the same.'''
assert self.refnames == other.refnames
assert self.dirs == other.dirs
assert self.lengths == other.lengths
for i in range(2):
if self.pos[i] is None:
if other.pos[i] is None:
raise Error('Error merging these two links:\n' + str(self) + '\n' + str(other))
self.pos[i] = other.pos[i]
else:
if other.pos[i] is not None:
raise Error('Error merging these two links:\n' + str(self) + '\n' + str(other)) | python | def merge(self, other):
assert self.refnames == other.refnames
assert self.dirs == other.dirs
assert self.lengths == other.lengths
for i in range(2):
if self.pos[i] is None:
if other.pos[i] is None:
raise Error('Error merging these two links:\n' + str(self) + '\n' + str(other))
self.pos[i] = other.pos[i]
else:
if other.pos[i] is not None:
raise Error('Error merging these two links:\n' + str(self) + '\n' + str(other)) | [
"def",
"merge",
"(",
"self",
",",
"other",
")",
":",
"assert",
"self",
".",
"refnames",
"==",
"other",
".",
"refnames",
"assert",
"self",
".",
"dirs",
"==",
"other",
".",
"dirs",
"assert",
"self",
".",
"lengths",
"==",
"other",
".",
"lengths",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"if",
"self",
".",
"pos",
"[",
"i",
"]",
"is",
"None",
":",
"if",
"other",
".",
"pos",
"[",
"i",
"]",
"is",
"None",
":",
"raise",
"Error",
"(",
"'Error merging these two links:\\n'",
"+",
"str",
"(",
"self",
")",
"+",
"'\\n'",
"+",
"str",
"(",
"other",
")",
")",
"self",
".",
"pos",
"[",
"i",
"]",
"=",
"other",
".",
"pos",
"[",
"i",
"]",
"else",
":",
"if",
"other",
".",
"pos",
"[",
"i",
"]",
"is",
"not",
"None",
":",
"raise",
"Error",
"(",
"'Error merging these two links:\\n'",
"+",
"str",
"(",
"self",
")",
"+",
"'\\n'",
"+",
"str",
"(",
"other",
")",
")"
]
| Merge another link into this one. Expected that each link was created from each mate from a pair. We only know both distances to contig ends when we have read info from both mappings in a BAM file. All other info should be the same. | [
"Merge",
"another",
"link",
"into",
"this",
"one",
".",
"Expected",
"that",
"each",
"link",
"was",
"created",
"from",
"each",
"mate",
"from",
"a",
"pair",
".",
"We",
"only",
"know",
"both",
"distances",
"to",
"contig",
"ends",
"when",
"we",
"have",
"read",
"info",
"from",
"both",
"mappings",
"in",
"a",
"BAM",
"file",
".",
"All",
"other",
"info",
"should",
"be",
"the",
"same",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/link.py#L80-L93 |
sanger-pathogens/ariba | ariba/link.py | Link.insert_size | def insert_size(self):
'''Returns insert size, defined as distance from outer edges of reads (and assumes gap length of zero)'''
try:
distances = self._distance_to_contig_ends()
except:
raise Error('Error getting insert size from Link:\n' + str(self))
return sum(distances) | python | def insert_size(self):
try:
distances = self._distance_to_contig_ends()
except:
raise Error('Error getting insert size from Link:\n' + str(self))
return sum(distances) | [
"def",
"insert_size",
"(",
"self",
")",
":",
"try",
":",
"distances",
"=",
"self",
".",
"_distance_to_contig_ends",
"(",
")",
"except",
":",
"raise",
"Error",
"(",
"'Error getting insert size from Link:\\n'",
"+",
"str",
"(",
"self",
")",
")",
"return",
"sum",
"(",
"distances",
")"
]
| Returns insert size, defined as distance from outer edges of reads (and assumes gap length of zero) | [
"Returns",
"insert",
"size",
"defined",
"as",
"distance",
"from",
"outer",
"edges",
"of",
"reads",
"(",
"and",
"assumes",
"gap",
"length",
"of",
"zero",
")"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/link.py#L100-L107 |
sanger-pathogens/ariba | ariba/summary.py | Summary._load_fofn | def _load_fofn(cls, fofn):
'''Returns dictionary of filename -> short name. Value is None
whenever short name is not provided'''
filenames = {}
f = pyfastaq.utils.open_file_read(fofn)
for line in f:
fields = line.rstrip().split()
if len(fields) == 1:
filenames[fields[0]] = None
elif len(fields) == 2:
filenames[fields[0]] = fields[1]
else:
raise Error('Error at the following line of file ' + fofn + '. Expected at most 2 fields.\n' + line)
pyfastaq.utils.close(f)
return filenames | python | def _load_fofn(cls, fofn):
filenames = {}
f = pyfastaq.utils.open_file_read(fofn)
for line in f:
fields = line.rstrip().split()
if len(fields) == 1:
filenames[fields[0]] = None
elif len(fields) == 2:
filenames[fields[0]] = fields[1]
else:
raise Error('Error at the following line of file ' + fofn + '. Expected at most 2 fields.\n' + line)
pyfastaq.utils.close(f)
return filenames | [
"def",
"_load_fofn",
"(",
"cls",
",",
"fofn",
")",
":",
"filenames",
"=",
"{",
"}",
"f",
"=",
"pyfastaq",
".",
"utils",
".",
"open_file_read",
"(",
"fofn",
")",
"for",
"line",
"in",
"f",
":",
"fields",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"fields",
")",
"==",
"1",
":",
"filenames",
"[",
"fields",
"[",
"0",
"]",
"]",
"=",
"None",
"elif",
"len",
"(",
"fields",
")",
"==",
"2",
":",
"filenames",
"[",
"fields",
"[",
"0",
"]",
"]",
"=",
"fields",
"[",
"1",
"]",
"else",
":",
"raise",
"Error",
"(",
"'Error at the following line of file '",
"+",
"fofn",
"+",
"'. Expected at most 2 fields.\\n'",
"+",
"line",
")",
"pyfastaq",
".",
"utils",
".",
"close",
"(",
"f",
")",
"return",
"filenames"
]
| Returns dictionary of filename -> short name. Value is None
whenever short name is not provided | [
"Returns",
"dictionary",
"of",
"filename",
"-",
">",
"short",
"name",
".",
"Value",
"is",
"None",
"whenever",
"short",
"name",
"is",
"not",
"provided"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/summary.py#L70-L85 |
sanger-pathogens/ariba | ariba/summary.py | Summary._filter_matrix_rows | def _filter_matrix_rows(cls, matrix):
'''matrix = output from _to_matrix'''
indexes_to_keep = []
for i in range(len(matrix)):
keep_row = False
for element in matrix[i]:
if element not in {'NA', 'no'}:
keep_row = True
break
if keep_row:
indexes_to_keep.append(i)
return [matrix[i] for i in indexes_to_keep] | python | def _filter_matrix_rows(cls, matrix):
indexes_to_keep = []
for i in range(len(matrix)):
keep_row = False
for element in matrix[i]:
if element not in {'NA', 'no'}:
keep_row = True
break
if keep_row:
indexes_to_keep.append(i)
return [matrix[i] for i in indexes_to_keep] | [
"def",
"_filter_matrix_rows",
"(",
"cls",
",",
"matrix",
")",
":",
"indexes_to_keep",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"matrix",
")",
")",
":",
"keep_row",
"=",
"False",
"for",
"element",
"in",
"matrix",
"[",
"i",
"]",
":",
"if",
"element",
"not",
"in",
"{",
"'NA'",
",",
"'no'",
"}",
":",
"keep_row",
"=",
"True",
"break",
"if",
"keep_row",
":",
"indexes_to_keep",
".",
"append",
"(",
"i",
")",
"return",
"[",
"matrix",
"[",
"i",
"]",
"for",
"i",
"in",
"indexes_to_keep",
"]"
]
| matrix = output from _to_matrix | [
"matrix",
"=",
"output",
"from",
"_to_matrix"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/summary.py#L223-L236 |
sanger-pathogens/ariba | ariba/summary.py | Summary._filter_matrix_columns | def _filter_matrix_columns(cls, matrix, phandango_header, csv_header):
'''phandango_header, csv_header, matrix = output from _to_matrix'''
indexes_to_keep = set()
for row in matrix:
for i in range(len(row)):
if row[i] not in {'NA', 'no'}:
indexes_to_keep.add(i)
indexes_to_keep = sorted(list(indexes_to_keep))
for i in range(len(matrix)):
matrix[i] = [matrix[i][j] for j in indexes_to_keep]
phandango_header = [phandango_header[i] for i in indexes_to_keep]
csv_header = [csv_header[i] for i in indexes_to_keep]
return phandango_header, csv_header, matrix | python | def _filter_matrix_columns(cls, matrix, phandango_header, csv_header):
indexes_to_keep = set()
for row in matrix:
for i in range(len(row)):
if row[i] not in {'NA', 'no'}:
indexes_to_keep.add(i)
indexes_to_keep = sorted(list(indexes_to_keep))
for i in range(len(matrix)):
matrix[i] = [matrix[i][j] for j in indexes_to_keep]
phandango_header = [phandango_header[i] for i in indexes_to_keep]
csv_header = [csv_header[i] for i in indexes_to_keep]
return phandango_header, csv_header, matrix | [
"def",
"_filter_matrix_columns",
"(",
"cls",
",",
"matrix",
",",
"phandango_header",
",",
"csv_header",
")",
":",
"indexes_to_keep",
"=",
"set",
"(",
")",
"for",
"row",
"in",
"matrix",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"row",
")",
")",
":",
"if",
"row",
"[",
"i",
"]",
"not",
"in",
"{",
"'NA'",
",",
"'no'",
"}",
":",
"indexes_to_keep",
".",
"add",
"(",
"i",
")",
"indexes_to_keep",
"=",
"sorted",
"(",
"list",
"(",
"indexes_to_keep",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"matrix",
")",
")",
":",
"matrix",
"[",
"i",
"]",
"=",
"[",
"matrix",
"[",
"i",
"]",
"[",
"j",
"]",
"for",
"j",
"in",
"indexes_to_keep",
"]",
"phandango_header",
"=",
"[",
"phandango_header",
"[",
"i",
"]",
"for",
"i",
"in",
"indexes_to_keep",
"]",
"csv_header",
"=",
"[",
"csv_header",
"[",
"i",
"]",
"for",
"i",
"in",
"indexes_to_keep",
"]",
"return",
"phandango_header",
",",
"csv_header",
",",
"matrix"
]
| phandango_header, csv_header, matrix = output from _to_matrix | [
"phandango_header",
"csv_header",
"matrix",
"=",
"output",
"from",
"_to_matrix"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/summary.py#L240-L256 |
sanger-pathogens/ariba | ariba/assembly_variants.py | AssemblyVariants._get_variant_effect | def _get_variant_effect(cls, variants, ref_sequence):
'''variants = list of variants in the same codon.
returns type of variant (cannot handle more than one indel in the same codon).'''
assert len(variants) != 0
var_types = [x.var_type for x in variants]
if len(set(var_types)) != 1:
return 'MULTIPLE', '.', '.'
var_type = var_types[0]
assert set([x.ref_name for x in variants]) == set([ref_sequence.id])
codon_starts = [AssemblyVariants._get_codon_start(0, x.ref_start) for x in variants]
assert len(set(codon_starts)) == 1
codon_start = codon_starts[0]
aa_start = codon_start // 3
ref_codon = pyfastaq.sequences.Fasta('codon', ref_sequence[codon_start:codon_start+3])
ref_aa = ref_codon.translate()
if var_type == pymummer.variant.SNP:
new_codon = list(ref_codon.seq)
for v in variants:
new_codon[v.ref_start - codon_start] = v.qry_base
new_codon = pyfastaq.sequences.Fasta('new', ''.join(new_codon))
qry_aa = new_codon.translate()
if ref_aa.seq == qry_aa.seq:
return ('SYN', '.', aa_start)
elif qry_aa.seq == '*':
return ('TRUNC', ref_aa.seq + str(aa_start + 1) + 'trunc', aa_start)
else:
return ('NONSYN', ref_aa.seq + str(aa_start + 1) + qry_aa.seq, aa_start)
elif var_type in [pymummer.variant.INS, pymummer.variant.DEL]:
if len(variants) > 1:
return 'INDELS', '.', aa_start
var = variants[0]
if var_type == pymummer.variant.INS:
new_seq = pyfastaq.sequences.Fasta('seq', var.qry_base)
else:
new_seq = pyfastaq.sequences.Fasta('seq', var.ref_base)
if len(new_seq) % 3 != 0:
return ('FSHIFT', ref_aa.seq + str(aa_start + 1) + 'fs', aa_start)
new_seq_aa = new_seq.translate()
if '*' in new_seq_aa.seq:
return ('TRUNC', ref_aa.seq + str(aa_start + 1) + 'trunc', aa_start)
elif var_type == pymummer.variant.INS:
ref_codon_after_ins = pyfastaq.sequences.Fasta('codon', ref_sequence[codon_start+3:codon_start+6])
aa_after_ins = ref_codon_after_ins.translate()
return ('INS', ref_aa.seq + str(aa_start + 1) + '_' + aa_after_ins.seq + str(aa_start + 2) + 'ins' + new_seq_aa.seq , aa_start)
else:
if len(new_seq) == 3:
return ('DEL', ref_aa.seq + str(aa_start + 1) + 'del', aa_start)
else:
assert len(new_seq) % 3 == 0
ref_codon_after_ins = pyfastaq.sequences.Fasta('codon', ref_sequence[codon_start+3:codon_start+6])
aa_after_ins = ref_codon_after_ins.translate()
return ('DEL', ref_aa.seq + str(aa_start + 1)+ '_' + aa_after_ins.seq + str(aa_start + 2) + 'del', aa_start)
else:
return ('UNKNOWN', '.', aa_start) | python | def _get_variant_effect(cls, variants, ref_sequence):
assert len(variants) != 0
var_types = [x.var_type for x in variants]
if len(set(var_types)) != 1:
return 'MULTIPLE', '.', '.'
var_type = var_types[0]
assert set([x.ref_name for x in variants]) == set([ref_sequence.id])
codon_starts = [AssemblyVariants._get_codon_start(0, x.ref_start) for x in variants]
assert len(set(codon_starts)) == 1
codon_start = codon_starts[0]
aa_start = codon_start // 3
ref_codon = pyfastaq.sequences.Fasta('codon', ref_sequence[codon_start:codon_start+3])
ref_aa = ref_codon.translate()
if var_type == pymummer.variant.SNP:
new_codon = list(ref_codon.seq)
for v in variants:
new_codon[v.ref_start - codon_start] = v.qry_base
new_codon = pyfastaq.sequences.Fasta('new', ''.join(new_codon))
qry_aa = new_codon.translate()
if ref_aa.seq == qry_aa.seq:
return ('SYN', '.', aa_start)
elif qry_aa.seq == '*':
return ('TRUNC', ref_aa.seq + str(aa_start + 1) + 'trunc', aa_start)
else:
return ('NONSYN', ref_aa.seq + str(aa_start + 1) + qry_aa.seq, aa_start)
elif var_type in [pymummer.variant.INS, pymummer.variant.DEL]:
if len(variants) > 1:
return 'INDELS', '.', aa_start
var = variants[0]
if var_type == pymummer.variant.INS:
new_seq = pyfastaq.sequences.Fasta('seq', var.qry_base)
else:
new_seq = pyfastaq.sequences.Fasta('seq', var.ref_base)
if len(new_seq) % 3 != 0:
return ('FSHIFT', ref_aa.seq + str(aa_start + 1) + 'fs', aa_start)
new_seq_aa = new_seq.translate()
if '*' in new_seq_aa.seq:
return ('TRUNC', ref_aa.seq + str(aa_start + 1) + 'trunc', aa_start)
elif var_type == pymummer.variant.INS:
ref_codon_after_ins = pyfastaq.sequences.Fasta('codon', ref_sequence[codon_start+3:codon_start+6])
aa_after_ins = ref_codon_after_ins.translate()
return ('INS', ref_aa.seq + str(aa_start + 1) + '_' + aa_after_ins.seq + str(aa_start + 2) + 'ins' + new_seq_aa.seq , aa_start)
else:
if len(new_seq) == 3:
return ('DEL', ref_aa.seq + str(aa_start + 1) + 'del', aa_start)
else:
assert len(new_seq) % 3 == 0
ref_codon_after_ins = pyfastaq.sequences.Fasta('codon', ref_sequence[codon_start+3:codon_start+6])
aa_after_ins = ref_codon_after_ins.translate()
return ('DEL', ref_aa.seq + str(aa_start + 1)+ '_' + aa_after_ins.seq + str(aa_start + 2) + 'del', aa_start)
else:
return ('UNKNOWN', '.', aa_start) | [
"def",
"_get_variant_effect",
"(",
"cls",
",",
"variants",
",",
"ref_sequence",
")",
":",
"assert",
"len",
"(",
"variants",
")",
"!=",
"0",
"var_types",
"=",
"[",
"x",
".",
"var_type",
"for",
"x",
"in",
"variants",
"]",
"if",
"len",
"(",
"set",
"(",
"var_types",
")",
")",
"!=",
"1",
":",
"return",
"'MULTIPLE'",
",",
"'.'",
",",
"'.'",
"var_type",
"=",
"var_types",
"[",
"0",
"]",
"assert",
"set",
"(",
"[",
"x",
".",
"ref_name",
"for",
"x",
"in",
"variants",
"]",
")",
"==",
"set",
"(",
"[",
"ref_sequence",
".",
"id",
"]",
")",
"codon_starts",
"=",
"[",
"AssemblyVariants",
".",
"_get_codon_start",
"(",
"0",
",",
"x",
".",
"ref_start",
")",
"for",
"x",
"in",
"variants",
"]",
"assert",
"len",
"(",
"set",
"(",
"codon_starts",
")",
")",
"==",
"1",
"codon_start",
"=",
"codon_starts",
"[",
"0",
"]",
"aa_start",
"=",
"codon_start",
"//",
"3",
"ref_codon",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"'codon'",
",",
"ref_sequence",
"[",
"codon_start",
":",
"codon_start",
"+",
"3",
"]",
")",
"ref_aa",
"=",
"ref_codon",
".",
"translate",
"(",
")",
"if",
"var_type",
"==",
"pymummer",
".",
"variant",
".",
"SNP",
":",
"new_codon",
"=",
"list",
"(",
"ref_codon",
".",
"seq",
")",
"for",
"v",
"in",
"variants",
":",
"new_codon",
"[",
"v",
".",
"ref_start",
"-",
"codon_start",
"]",
"=",
"v",
".",
"qry_base",
"new_codon",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"'new'",
",",
"''",
".",
"join",
"(",
"new_codon",
")",
")",
"qry_aa",
"=",
"new_codon",
".",
"translate",
"(",
")",
"if",
"ref_aa",
".",
"seq",
"==",
"qry_aa",
".",
"seq",
":",
"return",
"(",
"'SYN'",
",",
"'.'",
",",
"aa_start",
")",
"elif",
"qry_aa",
".",
"seq",
"==",
"'*'",
":",
"return",
"(",
"'TRUNC'",
",",
"ref_aa",
".",
"seq",
"+",
"str",
"(",
"aa_start",
"+",
"1",
")",
"+",
"'trunc'",
",",
"aa_start",
")",
"else",
":",
"return",
"(",
"'NONSYN'",
",",
"ref_aa",
".",
"seq",
"+",
"str",
"(",
"aa_start",
"+",
"1",
")",
"+",
"qry_aa",
".",
"seq",
",",
"aa_start",
")",
"elif",
"var_type",
"in",
"[",
"pymummer",
".",
"variant",
".",
"INS",
",",
"pymummer",
".",
"variant",
".",
"DEL",
"]",
":",
"if",
"len",
"(",
"variants",
")",
">",
"1",
":",
"return",
"'INDELS'",
",",
"'.'",
",",
"aa_start",
"var",
"=",
"variants",
"[",
"0",
"]",
"if",
"var_type",
"==",
"pymummer",
".",
"variant",
".",
"INS",
":",
"new_seq",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"'seq'",
",",
"var",
".",
"qry_base",
")",
"else",
":",
"new_seq",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"'seq'",
",",
"var",
".",
"ref_base",
")",
"if",
"len",
"(",
"new_seq",
")",
"%",
"3",
"!=",
"0",
":",
"return",
"(",
"'FSHIFT'",
",",
"ref_aa",
".",
"seq",
"+",
"str",
"(",
"aa_start",
"+",
"1",
")",
"+",
"'fs'",
",",
"aa_start",
")",
"new_seq_aa",
"=",
"new_seq",
".",
"translate",
"(",
")",
"if",
"'*'",
"in",
"new_seq_aa",
".",
"seq",
":",
"return",
"(",
"'TRUNC'",
",",
"ref_aa",
".",
"seq",
"+",
"str",
"(",
"aa_start",
"+",
"1",
")",
"+",
"'trunc'",
",",
"aa_start",
")",
"elif",
"var_type",
"==",
"pymummer",
".",
"variant",
".",
"INS",
":",
"ref_codon_after_ins",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"'codon'",
",",
"ref_sequence",
"[",
"codon_start",
"+",
"3",
":",
"codon_start",
"+",
"6",
"]",
")",
"aa_after_ins",
"=",
"ref_codon_after_ins",
".",
"translate",
"(",
")",
"return",
"(",
"'INS'",
",",
"ref_aa",
".",
"seq",
"+",
"str",
"(",
"aa_start",
"+",
"1",
")",
"+",
"'_'",
"+",
"aa_after_ins",
".",
"seq",
"+",
"str",
"(",
"aa_start",
"+",
"2",
")",
"+",
"'ins'",
"+",
"new_seq_aa",
".",
"seq",
",",
"aa_start",
")",
"else",
":",
"if",
"len",
"(",
"new_seq",
")",
"==",
"3",
":",
"return",
"(",
"'DEL'",
",",
"ref_aa",
".",
"seq",
"+",
"str",
"(",
"aa_start",
"+",
"1",
")",
"+",
"'del'",
",",
"aa_start",
")",
"else",
":",
"assert",
"len",
"(",
"new_seq",
")",
"%",
"3",
"==",
"0",
"ref_codon_after_ins",
"=",
"pyfastaq",
".",
"sequences",
".",
"Fasta",
"(",
"'codon'",
",",
"ref_sequence",
"[",
"codon_start",
"+",
"3",
":",
"codon_start",
"+",
"6",
"]",
")",
"aa_after_ins",
"=",
"ref_codon_after_ins",
".",
"translate",
"(",
")",
"return",
"(",
"'DEL'",
",",
"ref_aa",
".",
"seq",
"+",
"str",
"(",
"aa_start",
"+",
"1",
")",
"+",
"'_'",
"+",
"aa_after_ins",
".",
"seq",
"+",
"str",
"(",
"aa_start",
"+",
"2",
")",
"+",
"'del'",
",",
"aa_start",
")",
"else",
":",
"return",
"(",
"'UNKNOWN'",
",",
"'.'",
",",
"aa_start",
")"
]
| variants = list of variants in the same codon.
returns type of variant (cannot handle more than one indel in the same codon). | [
"variants",
"=",
"list",
"of",
"variants",
"in",
"the",
"same",
"codon",
".",
"returns",
"type",
"of",
"variant",
"(",
"cannot",
"handle",
"more",
"than",
"one",
"indel",
"in",
"the",
"same",
"codon",
")",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_variants.py#L63-L126 |
sanger-pathogens/ariba | ariba/assembly_variants.py | AssemblyVariants._get_remaining_known_ref_variants | def _get_remaining_known_ref_variants(known_ref_variants, used_ref_variants, nucmer_coords):
'''Finds variants where ref has the variant and so does the contig. Which means
that there was no mummer call to flag it up so need to look through the known
ref variants. Also need to check that the variant is in a nucmer match to an
assembly contig.'''
variants = []
for ref_variant_pos, ref_variants_set in sorted(known_ref_variants.items()):
for known_ref_variant in ref_variants_set:
if known_ref_variant not in used_ref_variants:
variant_pos_matches_contig = False
pos = known_ref_variant.variant.position
if known_ref_variant.seq_type == 'n':
ref_interval = intervals.Interval(pos, pos)
elif known_ref_variant.seq_type == 'p':
ref_interval = intervals.Interval(3 * pos, 3 * pos + 2)
else:
raise Error('Unexpected variant type "' + known_ref_variant.variant_type + '" in _get_remaining_known_ref_variants. Cannot continue')
for interval in nucmer_coords:
if ref_interval.intersects(interval):
variant_pos_matches_contig = True
break
if variant_pos_matches_contig:
variants.append((None, known_ref_variant.seq_type, None, None, None, {known_ref_variant}, set()))
return variants | python | def _get_remaining_known_ref_variants(known_ref_variants, used_ref_variants, nucmer_coords):
variants = []
for ref_variant_pos, ref_variants_set in sorted(known_ref_variants.items()):
for known_ref_variant in ref_variants_set:
if known_ref_variant not in used_ref_variants:
variant_pos_matches_contig = False
pos = known_ref_variant.variant.position
if known_ref_variant.seq_type == 'n':
ref_interval = intervals.Interval(pos, pos)
elif known_ref_variant.seq_type == 'p':
ref_interval = intervals.Interval(3 * pos, 3 * pos + 2)
else:
raise Error('Unexpected variant type "' + known_ref_variant.variant_type + '" in _get_remaining_known_ref_variants. Cannot continue')
for interval in nucmer_coords:
if ref_interval.intersects(interval):
variant_pos_matches_contig = True
break
if variant_pos_matches_contig:
variants.append((None, known_ref_variant.seq_type, None, None, None, {known_ref_variant}, set()))
return variants | [
"def",
"_get_remaining_known_ref_variants",
"(",
"known_ref_variants",
",",
"used_ref_variants",
",",
"nucmer_coords",
")",
":",
"variants",
"=",
"[",
"]",
"for",
"ref_variant_pos",
",",
"ref_variants_set",
"in",
"sorted",
"(",
"known_ref_variants",
".",
"items",
"(",
")",
")",
":",
"for",
"known_ref_variant",
"in",
"ref_variants_set",
":",
"if",
"known_ref_variant",
"not",
"in",
"used_ref_variants",
":",
"variant_pos_matches_contig",
"=",
"False",
"pos",
"=",
"known_ref_variant",
".",
"variant",
".",
"position",
"if",
"known_ref_variant",
".",
"seq_type",
"==",
"'n'",
":",
"ref_interval",
"=",
"intervals",
".",
"Interval",
"(",
"pos",
",",
"pos",
")",
"elif",
"known_ref_variant",
".",
"seq_type",
"==",
"'p'",
":",
"ref_interval",
"=",
"intervals",
".",
"Interval",
"(",
"3",
"*",
"pos",
",",
"3",
"*",
"pos",
"+",
"2",
")",
"else",
":",
"raise",
"Error",
"(",
"'Unexpected variant type \"'",
"+",
"known_ref_variant",
".",
"variant_type",
"+",
"'\" in _get_remaining_known_ref_variants. Cannot continue'",
")",
"for",
"interval",
"in",
"nucmer_coords",
":",
"if",
"ref_interval",
".",
"intersects",
"(",
"interval",
")",
":",
"variant_pos_matches_contig",
"=",
"True",
"break",
"if",
"variant_pos_matches_contig",
":",
"variants",
".",
"append",
"(",
"(",
"None",
",",
"known_ref_variant",
".",
"seq_type",
",",
"None",
",",
"None",
",",
"None",
",",
"{",
"known_ref_variant",
"}",
",",
"set",
"(",
")",
")",
")",
"return",
"variants"
]
| Finds variants where ref has the variant and so does the contig. Which means
that there was no mummer call to flag it up so need to look through the known
ref variants. Also need to check that the variant is in a nucmer match to an
assembly contig. | [
"Finds",
"variants",
"where",
"ref",
"has",
"the",
"variant",
"and",
"so",
"does",
"the",
"contig",
".",
"Which",
"means",
"that",
"there",
"was",
"no",
"mummer",
"call",
"to",
"flag",
"it",
"up",
"so",
"need",
"to",
"look",
"through",
"the",
"known",
"ref",
"variants",
".",
"Also",
"need",
"to",
"check",
"that",
"the",
"variant",
"is",
"in",
"a",
"nucmer",
"match",
"to",
"an",
"assembly",
"contig",
"."
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_variants.py#L232-L260 |
sanger-pathogens/ariba | ariba/assembly_variants.py | AssemblyVariants.get_variants | def get_variants(self, ref_sequence_name, allowed_ctg_coords, allowed_ref_coords, nucmer_matches=None):
'''Nucmr coords = dict. Key=contig name. Value = list of intervals of ref coords that match the contig.
Made by assembly_compare.AssemblyCompare.nucmer_hits_to_ref_coords
Returns dictionary. Key=contig name. Value = list of variants. Each variant
is a tuple: (
0 = position,
1 = type in ['n', 'p']
2 = Variant string, eg 'D2E',
3 = variant effect (as returned by _get_variant_effect)
4 = list of pymummer.variant.Variant that made up this variant (could be more than one because of
variants in the same codon)
5 = set {matching known variants from metadata (=sequence_metadata.SequenceMetadata)}
6 = set {known ref metadata (=sequence_metadata.SequenceMetadata) at same position as SNP}, excluding those from 4
)
'''
mummer_variants = self._get_mummer_variants(self.nucmer_snp_file)
variants = {}
seq_type, is_variant_only = self.refdata.sequence_type(ref_sequence_name)
ref_sequence = self.refdata.sequence(ref_sequence_name)
if ref_sequence_name in self.refdata.metadata:
refdata_var_dict = self.refdata.metadata[ref_sequence_name]
else:
refdata_var_dict = None
known_non_wild_variants_in_ref = self.refdata.all_non_wild_type_variants(ref_sequence_name)
for contig in allowed_ctg_coords:
if contig not in allowed_ref_coords:
continue
used_known_variants = set()
variants[contig] = []
if contig in mummer_variants:
for mummer_variant_list in mummer_variants[contig]:
ref_start = min([x.ref_start for x in mummer_variant_list])
ref_end = max([x.ref_end for x in mummer_variant_list])
ctg_start = min([x.qry_start for x in mummer_variant_list])
ctg_end = min([x.qry_end for x in mummer_variant_list])
ref_interval = intervals.Interval(ref_start, ref_end)
ctg_interval = intervals.Interval(ctg_start, ctg_end)
ref_ok = True in {x.intersects(ref_interval) for x in allowed_ref_coords[contig]}
qry_ok = True in {x.intersects(ctg_interval) for x in allowed_ctg_coords[contig]}
if not (ref_ok and qry_ok):
continue
if seq_type == 'p':
new_variant, used_variants = self._get_one_variant_for_one_contig_coding(ref_sequence, refdata_var_dict, mummer_variant_list)
else:
for mummer_variant in mummer_variant_list:
new_variant, used_variants = self._get_one_variant_for_one_contig_non_coding(refdata_var_dict, mummer_variant)
if new_variant is not None:
variants[contig].append(new_variant)
used_known_variants.update(used_variants)
# for this contig, need to know all the ref sequence and coords it maps to.
# Then report just the unused known variants, as the contig also has these variants
if seq_type == 'p':
new_variants = self._get_remaining_known_ref_variants(known_non_wild_variants_in_ref['p'], used_known_variants, allowed_ref_coords[contig])
else:
new_variants = self._get_remaining_known_ref_variants(known_non_wild_variants_in_ref['n'], used_known_variants, allowed_ref_coords[contig])
if is_variant_only:
new_variants = [x for x in new_variants if len(x[5]) > 0]
variants[contig].extend(new_variants)
if len(variants[contig]) == 0:
del variants[contig]
return variants | python | def get_variants(self, ref_sequence_name, allowed_ctg_coords, allowed_ref_coords, nucmer_matches=None):
mummer_variants = self._get_mummer_variants(self.nucmer_snp_file)
variants = {}
seq_type, is_variant_only = self.refdata.sequence_type(ref_sequence_name)
ref_sequence = self.refdata.sequence(ref_sequence_name)
if ref_sequence_name in self.refdata.metadata:
refdata_var_dict = self.refdata.metadata[ref_sequence_name]
else:
refdata_var_dict = None
known_non_wild_variants_in_ref = self.refdata.all_non_wild_type_variants(ref_sequence_name)
for contig in allowed_ctg_coords:
if contig not in allowed_ref_coords:
continue
used_known_variants = set()
variants[contig] = []
if contig in mummer_variants:
for mummer_variant_list in mummer_variants[contig]:
ref_start = min([x.ref_start for x in mummer_variant_list])
ref_end = max([x.ref_end for x in mummer_variant_list])
ctg_start = min([x.qry_start for x in mummer_variant_list])
ctg_end = min([x.qry_end for x in mummer_variant_list])
ref_interval = intervals.Interval(ref_start, ref_end)
ctg_interval = intervals.Interval(ctg_start, ctg_end)
ref_ok = True in {x.intersects(ref_interval) for x in allowed_ref_coords[contig]}
qry_ok = True in {x.intersects(ctg_interval) for x in allowed_ctg_coords[contig]}
if not (ref_ok and qry_ok):
continue
if seq_type == 'p':
new_variant, used_variants = self._get_one_variant_for_one_contig_coding(ref_sequence, refdata_var_dict, mummer_variant_list)
else:
for mummer_variant in mummer_variant_list:
new_variant, used_variants = self._get_one_variant_for_one_contig_non_coding(refdata_var_dict, mummer_variant)
if new_variant is not None:
variants[contig].append(new_variant)
used_known_variants.update(used_variants)
if seq_type == 'p':
new_variants = self._get_remaining_known_ref_variants(known_non_wild_variants_in_ref['p'], used_known_variants, allowed_ref_coords[contig])
else:
new_variants = self._get_remaining_known_ref_variants(known_non_wild_variants_in_ref['n'], used_known_variants, allowed_ref_coords[contig])
if is_variant_only:
new_variants = [x for x in new_variants if len(x[5]) > 0]
variants[contig].extend(new_variants)
if len(variants[contig]) == 0:
del variants[contig]
return variants | [
"def",
"get_variants",
"(",
"self",
",",
"ref_sequence_name",
",",
"allowed_ctg_coords",
",",
"allowed_ref_coords",
",",
"nucmer_matches",
"=",
"None",
")",
":",
"mummer_variants",
"=",
"self",
".",
"_get_mummer_variants",
"(",
"self",
".",
"nucmer_snp_file",
")",
"variants",
"=",
"{",
"}",
"seq_type",
",",
"is_variant_only",
"=",
"self",
".",
"refdata",
".",
"sequence_type",
"(",
"ref_sequence_name",
")",
"ref_sequence",
"=",
"self",
".",
"refdata",
".",
"sequence",
"(",
"ref_sequence_name",
")",
"if",
"ref_sequence_name",
"in",
"self",
".",
"refdata",
".",
"metadata",
":",
"refdata_var_dict",
"=",
"self",
".",
"refdata",
".",
"metadata",
"[",
"ref_sequence_name",
"]",
"else",
":",
"refdata_var_dict",
"=",
"None",
"known_non_wild_variants_in_ref",
"=",
"self",
".",
"refdata",
".",
"all_non_wild_type_variants",
"(",
"ref_sequence_name",
")",
"for",
"contig",
"in",
"allowed_ctg_coords",
":",
"if",
"contig",
"not",
"in",
"allowed_ref_coords",
":",
"continue",
"used_known_variants",
"=",
"set",
"(",
")",
"variants",
"[",
"contig",
"]",
"=",
"[",
"]",
"if",
"contig",
"in",
"mummer_variants",
":",
"for",
"mummer_variant_list",
"in",
"mummer_variants",
"[",
"contig",
"]",
":",
"ref_start",
"=",
"min",
"(",
"[",
"x",
".",
"ref_start",
"for",
"x",
"in",
"mummer_variant_list",
"]",
")",
"ref_end",
"=",
"max",
"(",
"[",
"x",
".",
"ref_end",
"for",
"x",
"in",
"mummer_variant_list",
"]",
")",
"ctg_start",
"=",
"min",
"(",
"[",
"x",
".",
"qry_start",
"for",
"x",
"in",
"mummer_variant_list",
"]",
")",
"ctg_end",
"=",
"min",
"(",
"[",
"x",
".",
"qry_end",
"for",
"x",
"in",
"mummer_variant_list",
"]",
")",
"ref_interval",
"=",
"intervals",
".",
"Interval",
"(",
"ref_start",
",",
"ref_end",
")",
"ctg_interval",
"=",
"intervals",
".",
"Interval",
"(",
"ctg_start",
",",
"ctg_end",
")",
"ref_ok",
"=",
"True",
"in",
"{",
"x",
".",
"intersects",
"(",
"ref_interval",
")",
"for",
"x",
"in",
"allowed_ref_coords",
"[",
"contig",
"]",
"}",
"qry_ok",
"=",
"True",
"in",
"{",
"x",
".",
"intersects",
"(",
"ctg_interval",
")",
"for",
"x",
"in",
"allowed_ctg_coords",
"[",
"contig",
"]",
"}",
"if",
"not",
"(",
"ref_ok",
"and",
"qry_ok",
")",
":",
"continue",
"if",
"seq_type",
"==",
"'p'",
":",
"new_variant",
",",
"used_variants",
"=",
"self",
".",
"_get_one_variant_for_one_contig_coding",
"(",
"ref_sequence",
",",
"refdata_var_dict",
",",
"mummer_variant_list",
")",
"else",
":",
"for",
"mummer_variant",
"in",
"mummer_variant_list",
":",
"new_variant",
",",
"used_variants",
"=",
"self",
".",
"_get_one_variant_for_one_contig_non_coding",
"(",
"refdata_var_dict",
",",
"mummer_variant",
")",
"if",
"new_variant",
"is",
"not",
"None",
":",
"variants",
"[",
"contig",
"]",
".",
"append",
"(",
"new_variant",
")",
"used_known_variants",
".",
"update",
"(",
"used_variants",
")",
"# for this contig, need to know all the ref sequence and coords it maps to.",
"# Then report just the unused known variants, as the contig also has these variants",
"if",
"seq_type",
"==",
"'p'",
":",
"new_variants",
"=",
"self",
".",
"_get_remaining_known_ref_variants",
"(",
"known_non_wild_variants_in_ref",
"[",
"'p'",
"]",
",",
"used_known_variants",
",",
"allowed_ref_coords",
"[",
"contig",
"]",
")",
"else",
":",
"new_variants",
"=",
"self",
".",
"_get_remaining_known_ref_variants",
"(",
"known_non_wild_variants_in_ref",
"[",
"'n'",
"]",
",",
"used_known_variants",
",",
"allowed_ref_coords",
"[",
"contig",
"]",
")",
"if",
"is_variant_only",
":",
"new_variants",
"=",
"[",
"x",
"for",
"x",
"in",
"new_variants",
"if",
"len",
"(",
"x",
"[",
"5",
"]",
")",
">",
"0",
"]",
"variants",
"[",
"contig",
"]",
".",
"extend",
"(",
"new_variants",
")",
"if",
"len",
"(",
"variants",
"[",
"contig",
"]",
")",
"==",
"0",
":",
"del",
"variants",
"[",
"contig",
"]",
"return",
"variants"
]
| Nucmr coords = dict. Key=contig name. Value = list of intervals of ref coords that match the contig.
Made by assembly_compare.AssemblyCompare.nucmer_hits_to_ref_coords
Returns dictionary. Key=contig name. Value = list of variants. Each variant
is a tuple: (
0 = position,
1 = type in ['n', 'p']
2 = Variant string, eg 'D2E',
3 = variant effect (as returned by _get_variant_effect)
4 = list of pymummer.variant.Variant that made up this variant (could be more than one because of
variants in the same codon)
5 = set {matching known variants from metadata (=sequence_metadata.SequenceMetadata)}
6 = set {known ref metadata (=sequence_metadata.SequenceMetadata) at same position as SNP}, excluding those from 4
) | [
"Nucmr",
"coords",
"=",
"dict",
".",
"Key",
"=",
"contig",
"name",
".",
"Value",
"=",
"list",
"of",
"intervals",
"of",
"ref",
"coords",
"that",
"match",
"the",
"contig",
".",
"Made",
"by",
"assembly_compare",
".",
"AssemblyCompare",
".",
"nucmer_hits_to_ref_coords",
"Returns",
"dictionary",
".",
"Key",
"=",
"contig",
"name",
".",
"Value",
"=",
"list",
"of",
"variants",
".",
"Each",
"variant",
"is",
"a",
"tuple",
":",
"(",
"0",
"=",
"position",
"1",
"=",
"type",
"in",
"[",
"n",
"p",
"]",
"2",
"=",
"Variant",
"string",
"eg",
"D2E",
"3",
"=",
"variant",
"effect",
"(",
"as",
"returned",
"by",
"_get_variant_effect",
")",
"4",
"=",
"list",
"of",
"pymummer",
".",
"variant",
".",
"Variant",
"that",
"made",
"up",
"this",
"variant",
"(",
"could",
"be",
"more",
"than",
"one",
"because",
"of",
"variants",
"in",
"the",
"same",
"codon",
")",
"5",
"=",
"set",
"{",
"matching",
"known",
"variants",
"from",
"metadata",
"(",
"=",
"sequence_metadata",
".",
"SequenceMetadata",
")",
"}",
"6",
"=",
"set",
"{",
"known",
"ref",
"metadata",
"(",
"=",
"sequence_metadata",
".",
"SequenceMetadata",
")",
"at",
"same",
"position",
"as",
"SNP",
"}",
"excluding",
"those",
"from",
"4",
")"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/assembly_variants.py#L263-L336 |
sanger-pathogens/ariba | ariba/ref_genes_getter.py | RefGenesGetter._fix_virulencefinder_fasta_file | def _fix_virulencefinder_fasta_file(cls, infile, outfile):
'''Some line breaks are missing in the FASTA files from
viruslence finder. Which means there are lines like this:
AAGATCCAATAACTGAAGATGTTGAACAAACAATTCATAATATTTATGGTCAATATGCTATTTTCGTTGA
AGGTGTTGCGCATTTACCTGGACATCTCTCTCCATTATTAAAAAAATTACTACTTAAATCTTTATAA>coa:1:BA000018.3
ATGAAAAAGCAAATAATTTCGCTAGGCGCATTAGCAGTTGCATCTAGCTTATTTACATGGGATAACAAAG
and therefore the sequences are messed up when we parse them. Also
one has a > at the end, then the seq name on the next line.
This function fixes the file by adding line breaks'''
with open(infile) as f_in, open(outfile, 'w') as f_out:
for line in f_in:
if line.startswith('>') or '>' not in line:
print(line, end='', file=f_out)
elif line.endswith('>\n'):
print('WARNING: found line with ">" at the end! Fixing. Line:' + line.rstrip() + ' in file ' + infile, file=sys.stderr)
print(line.rstrip('>\n'), file=f_out)
print('>', end='', file=f_out)
else:
print('WARNING: found line with ">" not at the start! Fixing. Line:' + line.rstrip() + ' in file ' + infile, file=sys.stderr)
line1, line2 = line.split('>')
print(line1, file=f_out)
print('>', line2, sep='', end='', file=f_out) | python | def _fix_virulencefinder_fasta_file(cls, infile, outfile):
with open(infile) as f_in, open(outfile, 'w') as f_out:
for line in f_in:
if line.startswith('>') or '>' not in line:
print(line, end='', file=f_out)
elif line.endswith('>\n'):
print('WARNING: found line with ">" at the end! Fixing. Line:' + line.rstrip() + ' in file ' + infile, file=sys.stderr)
print(line.rstrip('>\n'), file=f_out)
print('>', end='', file=f_out)
else:
print('WARNING: found line with ">" not at the start! Fixing. Line:' + line.rstrip() + ' in file ' + infile, file=sys.stderr)
line1, line2 = line.split('>')
print(line1, file=f_out)
print('>', line2, sep='', end='', file=f_out) | [
"def",
"_fix_virulencefinder_fasta_file",
"(",
"cls",
",",
"infile",
",",
"outfile",
")",
":",
"with",
"open",
"(",
"infile",
")",
"as",
"f_in",
",",
"open",
"(",
"outfile",
",",
"'w'",
")",
"as",
"f_out",
":",
"for",
"line",
"in",
"f_in",
":",
"if",
"line",
".",
"startswith",
"(",
"'>'",
")",
"or",
"'>'",
"not",
"in",
"line",
":",
"print",
"(",
"line",
",",
"end",
"=",
"''",
",",
"file",
"=",
"f_out",
")",
"elif",
"line",
".",
"endswith",
"(",
"'>\\n'",
")",
":",
"print",
"(",
"'WARNING: found line with \">\" at the end! Fixing. Line:'",
"+",
"line",
".",
"rstrip",
"(",
")",
"+",
"' in file '",
"+",
"infile",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"line",
".",
"rstrip",
"(",
"'>\\n'",
")",
",",
"file",
"=",
"f_out",
")",
"print",
"(",
"'>'",
",",
"end",
"=",
"''",
",",
"file",
"=",
"f_out",
")",
"else",
":",
"print",
"(",
"'WARNING: found line with \">\" not at the start! Fixing. Line:'",
"+",
"line",
".",
"rstrip",
"(",
")",
"+",
"' in file '",
"+",
"infile",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"line1",
",",
"line2",
"=",
"line",
".",
"split",
"(",
"'>'",
")",
"print",
"(",
"line1",
",",
"file",
"=",
"f_out",
")",
"print",
"(",
"'>'",
",",
"line2",
",",
"sep",
"=",
"''",
",",
"end",
"=",
"''",
",",
"file",
"=",
"f_out",
")"
]
| Some line breaks are missing in the FASTA files from
viruslence finder. Which means there are lines like this:
AAGATCCAATAACTGAAGATGTTGAACAAACAATTCATAATATTTATGGTCAATATGCTATTTTCGTTGA
AGGTGTTGCGCATTTACCTGGACATCTCTCTCCATTATTAAAAAAATTACTACTTAAATCTTTATAA>coa:1:BA000018.3
ATGAAAAAGCAAATAATTTCGCTAGGCGCATTAGCAGTTGCATCTAGCTTATTTACATGGGATAACAAAG
and therefore the sequences are messed up when we parse them. Also
one has a > at the end, then the seq name on the next line.
This function fixes the file by adding line breaks | [
"Some",
"line",
"breaks",
"are",
"missing",
"in",
"the",
"FASTA",
"files",
"from",
"viruslence",
"finder",
".",
"Which",
"means",
"there",
"are",
"lines",
"like",
"this",
":",
"AAGATCCAATAACTGAAGATGTTGAACAAACAATTCATAATATTTATGGTCAATATGCTATTTTCGTTGA",
"AGGTGTTGCGCATTTACCTGGACATCTCTCTCCATTATTAAAAAAATTACTACTTAAATCTTTATAA",
">",
"coa",
":",
"1",
":",
"BA000018",
".",
"3",
"ATGAAAAAGCAAATAATTTCGCTAGGCGCATTAGCAGTTGCATCTAGCTTATTTACATGGGATAACAAAG",
"and",
"therefore",
"the",
"sequences",
"are",
"messed",
"up",
"when",
"we",
"parse",
"them",
".",
"Also",
"one",
"has",
"a",
">",
"at",
"the",
"end",
"then",
"the",
"seq",
"name",
"on",
"the",
"next",
"line",
".",
"This",
"function",
"fixes",
"the",
"file",
"by",
"adding",
"line",
"breaks"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/ref_genes_getter.py#L462-L483 |
sanger-pathogens/ariba | ariba/report.py | _samtools_depths_at_known_snps_all_wild | def _samtools_depths_at_known_snps_all_wild(sequence_meta, contig_name, cluster, variant_list):
'''Input is a known variants, as sequence_metadata object. The
assumption is that both the reference and the assembly have the
variant type, not wild type. The list variant_list should be a list
of pymummer.variant.Variant objects, only contaning variants to the
relevant query contig'''
ref_nuc_range = sequence_meta.variant.nucleotide_range()
if ref_nuc_range is None:
return None
bases = []
ctg_nts = []
ref_nts = []
smtls_total_depths = []
smtls_nts = []
smtls_depths = []
contig_positions = []
for ref_position in range(ref_nuc_range[0], ref_nuc_range[1]+1, 1):
nucmer_match = cluster.assembly_compare.nucmer_hit_containing_reference_position(cluster.assembly_compare.nucmer_hits, cluster.ref_sequence.id, ref_position, qry_name=contig_name)
if nucmer_match is not None:
# work out contig position. Needs indels variants to correct the position
ref_nts.append(cluster.ref_sequence[ref_position])
contig_position, in_indel = nucmer_match.qry_coords_from_ref_coord(ref_position, variant_list)
contig_positions.append(contig_position)
bases, total_depth, base_depths = cluster.samtools_vars.get_depths_at_position(contig_name, contig_position)
ctg_nts.append(cluster.assembly.sequences[contig_name][contig_position])
smtls_nts.append(bases)
smtls_total_depths.append(total_depth)
smtls_depths.append(base_depths)
ctg_nts = ';'.join(ctg_nts) if len(ctg_nts) else '.'
ref_nts = ';'.join(ref_nts) if len(ref_nts) else '.'
smtls_nts = ';'.join(smtls_nts) if len(smtls_nts) else '.'
smtls_total_depths = ';'.join([str(x)for x in smtls_total_depths]) if len(smtls_total_depths) else '.'
smtls_depths = ';'.join([str(x)for x in smtls_depths]) if len(smtls_depths) else '.'
ctg_start = str(min(contig_positions) + 1) if contig_positions is not None else '.'
ctg_end = str(max(contig_positions) + 1) if contig_positions is not None else '.'
return [str(x) for x in [
ref_nuc_range[0] + 1,
ref_nuc_range[1] + 1,
ref_nts,
ctg_start,
ctg_end,
ctg_nts,
smtls_total_depths,
smtls_nts,
smtls_depths
]] | python | def _samtools_depths_at_known_snps_all_wild(sequence_meta, contig_name, cluster, variant_list):
ref_nuc_range = sequence_meta.variant.nucleotide_range()
if ref_nuc_range is None:
return None
bases = []
ctg_nts = []
ref_nts = []
smtls_total_depths = []
smtls_nts = []
smtls_depths = []
contig_positions = []
for ref_position in range(ref_nuc_range[0], ref_nuc_range[1]+1, 1):
nucmer_match = cluster.assembly_compare.nucmer_hit_containing_reference_position(cluster.assembly_compare.nucmer_hits, cluster.ref_sequence.id, ref_position, qry_name=contig_name)
if nucmer_match is not None:
ref_nts.append(cluster.ref_sequence[ref_position])
contig_position, in_indel = nucmer_match.qry_coords_from_ref_coord(ref_position, variant_list)
contig_positions.append(contig_position)
bases, total_depth, base_depths = cluster.samtools_vars.get_depths_at_position(contig_name, contig_position)
ctg_nts.append(cluster.assembly.sequences[contig_name][contig_position])
smtls_nts.append(bases)
smtls_total_depths.append(total_depth)
smtls_depths.append(base_depths)
ctg_nts = ';'.join(ctg_nts) if len(ctg_nts) else '.'
ref_nts = ';'.join(ref_nts) if len(ref_nts) else '.'
smtls_nts = ';'.join(smtls_nts) if len(smtls_nts) else '.'
smtls_total_depths = ';'.join([str(x)for x in smtls_total_depths]) if len(smtls_total_depths) else '.'
smtls_depths = ';'.join([str(x)for x in smtls_depths]) if len(smtls_depths) else '.'
ctg_start = str(min(contig_positions) + 1) if contig_positions is not None else '.'
ctg_end = str(max(contig_positions) + 1) if contig_positions is not None else '.'
return [str(x) for x in [
ref_nuc_range[0] + 1,
ref_nuc_range[1] + 1,
ref_nts,
ctg_start,
ctg_end,
ctg_nts,
smtls_total_depths,
smtls_nts,
smtls_depths
]] | [
"def",
"_samtools_depths_at_known_snps_all_wild",
"(",
"sequence_meta",
",",
"contig_name",
",",
"cluster",
",",
"variant_list",
")",
":",
"ref_nuc_range",
"=",
"sequence_meta",
".",
"variant",
".",
"nucleotide_range",
"(",
")",
"if",
"ref_nuc_range",
"is",
"None",
":",
"return",
"None",
"bases",
"=",
"[",
"]",
"ctg_nts",
"=",
"[",
"]",
"ref_nts",
"=",
"[",
"]",
"smtls_total_depths",
"=",
"[",
"]",
"smtls_nts",
"=",
"[",
"]",
"smtls_depths",
"=",
"[",
"]",
"contig_positions",
"=",
"[",
"]",
"for",
"ref_position",
"in",
"range",
"(",
"ref_nuc_range",
"[",
"0",
"]",
",",
"ref_nuc_range",
"[",
"1",
"]",
"+",
"1",
",",
"1",
")",
":",
"nucmer_match",
"=",
"cluster",
".",
"assembly_compare",
".",
"nucmer_hit_containing_reference_position",
"(",
"cluster",
".",
"assembly_compare",
".",
"nucmer_hits",
",",
"cluster",
".",
"ref_sequence",
".",
"id",
",",
"ref_position",
",",
"qry_name",
"=",
"contig_name",
")",
"if",
"nucmer_match",
"is",
"not",
"None",
":",
"# work out contig position. Needs indels variants to correct the position",
"ref_nts",
".",
"append",
"(",
"cluster",
".",
"ref_sequence",
"[",
"ref_position",
"]",
")",
"contig_position",
",",
"in_indel",
"=",
"nucmer_match",
".",
"qry_coords_from_ref_coord",
"(",
"ref_position",
",",
"variant_list",
")",
"contig_positions",
".",
"append",
"(",
"contig_position",
")",
"bases",
",",
"total_depth",
",",
"base_depths",
"=",
"cluster",
".",
"samtools_vars",
".",
"get_depths_at_position",
"(",
"contig_name",
",",
"contig_position",
")",
"ctg_nts",
".",
"append",
"(",
"cluster",
".",
"assembly",
".",
"sequences",
"[",
"contig_name",
"]",
"[",
"contig_position",
"]",
")",
"smtls_nts",
".",
"append",
"(",
"bases",
")",
"smtls_total_depths",
".",
"append",
"(",
"total_depth",
")",
"smtls_depths",
".",
"append",
"(",
"base_depths",
")",
"ctg_nts",
"=",
"';'",
".",
"join",
"(",
"ctg_nts",
")",
"if",
"len",
"(",
"ctg_nts",
")",
"else",
"'.'",
"ref_nts",
"=",
"';'",
".",
"join",
"(",
"ref_nts",
")",
"if",
"len",
"(",
"ref_nts",
")",
"else",
"'.'",
"smtls_nts",
"=",
"';'",
".",
"join",
"(",
"smtls_nts",
")",
"if",
"len",
"(",
"smtls_nts",
")",
"else",
"'.'",
"smtls_total_depths",
"=",
"';'",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"smtls_total_depths",
"]",
")",
"if",
"len",
"(",
"smtls_total_depths",
")",
"else",
"'.'",
"smtls_depths",
"=",
"';'",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"smtls_depths",
"]",
")",
"if",
"len",
"(",
"smtls_depths",
")",
"else",
"'.'",
"ctg_start",
"=",
"str",
"(",
"min",
"(",
"contig_positions",
")",
"+",
"1",
")",
"if",
"contig_positions",
"is",
"not",
"None",
"else",
"'.'",
"ctg_end",
"=",
"str",
"(",
"max",
"(",
"contig_positions",
")",
"+",
"1",
")",
"if",
"contig_positions",
"is",
"not",
"None",
"else",
"'.'",
"return",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"ref_nuc_range",
"[",
"0",
"]",
"+",
"1",
",",
"ref_nuc_range",
"[",
"1",
"]",
"+",
"1",
",",
"ref_nts",
",",
"ctg_start",
",",
"ctg_end",
",",
"ctg_nts",
",",
"smtls_total_depths",
",",
"smtls_nts",
",",
"smtls_depths",
"]",
"]"
]
| Input is a known variants, as sequence_metadata object. The
assumption is that both the reference and the assembly have the
variant type, not wild type. The list variant_list should be a list
of pymummer.variant.Variant objects, only contaning variants to the
relevant query contig | [
"Input",
"is",
"a",
"known",
"variants",
"as",
"sequence_metadata",
"object",
".",
"The",
"assumption",
"is",
"that",
"both",
"the",
"reference",
"and",
"the",
"assembly",
"have",
"the",
"variant",
"type",
"not",
"wild",
"type",
".",
"The",
"list",
"variant_list",
"should",
"be",
"a",
"list",
"of",
"pymummer",
".",
"variant",
".",
"Variant",
"objects",
"only",
"contaning",
"variants",
"to",
"the",
"relevant",
"query",
"contig"
]
| train | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/report.py#L85-L136 |
ethereum/eth-abi | eth_abi/utils/string.py | abbr | def abbr(value: Any, limit: int=20) -> str:
"""
Converts a value into its string representation and abbreviates that
representation based on the given length `limit` if necessary.
"""
rep = repr(value)
if len(rep) > limit:
if limit < 3:
raise ValueError('Abbreviation limit may not be less than 3')
rep = rep[:limit - 3] + '...'
return rep | python | def abbr(value: Any, limit: int=20) -> str:
rep = repr(value)
if len(rep) > limit:
if limit < 3:
raise ValueError('Abbreviation limit may not be less than 3')
rep = rep[:limit - 3] + '...'
return rep | [
"def",
"abbr",
"(",
"value",
":",
"Any",
",",
"limit",
":",
"int",
"=",
"20",
")",
"->",
"str",
":",
"rep",
"=",
"repr",
"(",
"value",
")",
"if",
"len",
"(",
"rep",
")",
">",
"limit",
":",
"if",
"limit",
"<",
"3",
":",
"raise",
"ValueError",
"(",
"'Abbreviation limit may not be less than 3'",
")",
"rep",
"=",
"rep",
"[",
":",
"limit",
"-",
"3",
"]",
"+",
"'...'",
"return",
"rep"
]
| Converts a value into its string representation and abbreviates that
representation based on the given length `limit` if necessary. | [
"Converts",
"a",
"value",
"into",
"its",
"string",
"representation",
"and",
"abbreviates",
"that",
"representation",
"based",
"on",
"the",
"given",
"length",
"limit",
"if",
"necessary",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/utils/string.py#L6-L19 |
ethereum/eth-abi | eth_abi/encoding.py | BaseEncoder.invalidate_value | def invalidate_value(
cls,
value: Any,
exc: Type[Exception]=EncodingTypeError,
msg: Optional[str]=None,
) -> None:
"""
Throws a standard exception for when a value is not encodable by an
encoder.
"""
raise exc(
"Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}".format(
rep=abbr(value),
typ=type(value),
cls=cls.__name__,
msg="" if msg is None else (": " + msg),
)
) | python | def invalidate_value(
cls,
value: Any,
exc: Type[Exception]=EncodingTypeError,
msg: Optional[str]=None,
) -> None:
raise exc(
"Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}".format(
rep=abbr(value),
typ=type(value),
cls=cls.__name__,
msg="" if msg is None else (": " + msg),
)
) | [
"def",
"invalidate_value",
"(",
"cls",
",",
"value",
":",
"Any",
",",
"exc",
":",
"Type",
"[",
"Exception",
"]",
"=",
"EncodingTypeError",
",",
"msg",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"raise",
"exc",
"(",
"\"Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}\"",
".",
"format",
"(",
"rep",
"=",
"abbr",
"(",
"value",
")",
",",
"typ",
"=",
"type",
"(",
"value",
")",
",",
"cls",
"=",
"cls",
".",
"__name__",
",",
"msg",
"=",
"\"\"",
"if",
"msg",
"is",
"None",
"else",
"(",
"\": \"",
"+",
"msg",
")",
",",
")",
")"
]
| Throws a standard exception for when a value is not encodable by an
encoder. | [
"Throws",
"a",
"standard",
"exception",
"for",
"when",
"a",
"value",
"is",
"not",
"encodable",
"by",
"an",
"encoder",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/encoding.py#L78-L95 |
ethereum/eth-abi | eth_abi/base.py | parse_type_str | def parse_type_str(expected_base=None, with_arrlist=False):
"""
Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a type string with an (optional) expected base is required in
that method.
"""
def decorator(old_from_type_str):
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = '{} (normalized to {})'.format(
type_str_repr,
repr(normalized_type_str),
)
if expected_base is not None:
if not isinstance(abi_type, BasicType):
raise ValueError(
'Cannot create {} for non-basic type {}'.format(
cls.__name__,
type_str_repr,
)
)
if abi_type.base != expected_base:
raise ValueError(
'Cannot create {} for type {}: expected type with '
"base '{}'".format(
cls.__name__,
type_str_repr,
expected_base,
)
)
if not with_arrlist and abi_type.arrlist is not None:
raise ValueError(
'Cannot create {} for type {}: expected type with '
'no array dimension list'.format(
cls.__name__,
type_str_repr,
)
)
if with_arrlist and abi_type.arrlist is None:
raise ValueError(
'Cannot create {} for type {}: expected type with '
'array dimension list'.format(
cls.__name__,
type_str_repr,
)
)
# Perform general validation of default solidity types
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str)
return decorator | python | def parse_type_str(expected_base=None, with_arrlist=False):
def decorator(old_from_type_str):
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = '{} (normalized to {})'.format(
type_str_repr,
repr(normalized_type_str),
)
if expected_base is not None:
if not isinstance(abi_type, BasicType):
raise ValueError(
'Cannot create {} for non-basic type {}'.format(
cls.__name__,
type_str_repr,
)
)
if abi_type.base != expected_base:
raise ValueError(
'Cannot create {} for type {}: expected type with '
"base '{}'".format(
cls.__name__,
type_str_repr,
expected_base,
)
)
if not with_arrlist and abi_type.arrlist is not None:
raise ValueError(
'Cannot create {} for type {}: expected type with '
'no array dimension list'.format(
cls.__name__,
type_str_repr,
)
)
if with_arrlist and abi_type.arrlist is None:
raise ValueError(
'Cannot create {} for type {}: expected type with '
'array dimension list'.format(
cls.__name__,
type_str_repr,
)
)
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str)
return decorator | [
"def",
"parse_type_str",
"(",
"expected_base",
"=",
"None",
",",
"with_arrlist",
"=",
"False",
")",
":",
"def",
"decorator",
"(",
"old_from_type_str",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"old_from_type_str",
")",
"def",
"new_from_type_str",
"(",
"cls",
",",
"type_str",
",",
"registry",
")",
":",
"normalized_type_str",
"=",
"normalize",
"(",
"type_str",
")",
"abi_type",
"=",
"parse",
"(",
"normalized_type_str",
")",
"type_str_repr",
"=",
"repr",
"(",
"type_str",
")",
"if",
"type_str",
"!=",
"normalized_type_str",
":",
"type_str_repr",
"=",
"'{} (normalized to {})'",
".",
"format",
"(",
"type_str_repr",
",",
"repr",
"(",
"normalized_type_str",
")",
",",
")",
"if",
"expected_base",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"abi_type",
",",
"BasicType",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot create {} for non-basic type {}'",
".",
"format",
"(",
"cls",
".",
"__name__",
",",
"type_str_repr",
",",
")",
")",
"if",
"abi_type",
".",
"base",
"!=",
"expected_base",
":",
"raise",
"ValueError",
"(",
"'Cannot create {} for type {}: expected type with '",
"\"base '{}'\"",
".",
"format",
"(",
"cls",
".",
"__name__",
",",
"type_str_repr",
",",
"expected_base",
",",
")",
")",
"if",
"not",
"with_arrlist",
"and",
"abi_type",
".",
"arrlist",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Cannot create {} for type {}: expected type with '",
"'no array dimension list'",
".",
"format",
"(",
"cls",
".",
"__name__",
",",
"type_str_repr",
",",
")",
")",
"if",
"with_arrlist",
"and",
"abi_type",
".",
"arrlist",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Cannot create {} for type {}: expected type with '",
"'array dimension list'",
".",
"format",
"(",
"cls",
".",
"__name__",
",",
"type_str_repr",
",",
")",
")",
"# Perform general validation of default solidity types",
"abi_type",
".",
"validate",
"(",
")",
"return",
"old_from_type_str",
"(",
"cls",
",",
"abi_type",
",",
"registry",
")",
"return",
"classmethod",
"(",
"new_from_type_str",
")",
"return",
"decorator"
]
| Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a type string with an (optional) expected base is required in
that method. | [
"Used",
"by",
"BaseCoder",
"subclasses",
"as",
"a",
"convenience",
"for",
"implementing",
"the",
"from_type_str",
"method",
"required",
"by",
"ABIRegistry",
".",
"Useful",
"if",
"normalizing",
"then",
"parsing",
"a",
"type",
"string",
"with",
"an",
"(",
"optional",
")",
"expected",
"base",
"is",
"required",
"in",
"that",
"method",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/base.py#L15-L77 |
ethereum/eth-abi | eth_abi/base.py | parse_tuple_type_str | def parse_tuple_type_str(old_from_type_str):
"""
Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a tuple type string is required in that method.
"""
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = '{} (normalized to {})'.format(
type_str_repr,
repr(normalized_type_str),
)
if not isinstance(abi_type, TupleType):
raise ValueError(
'Cannot create {} for non-tuple type {}'.format(
cls.__name__,
type_str_repr,
)
)
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str) | python | def parse_tuple_type_str(old_from_type_str):
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = '{} (normalized to {})'.format(
type_str_repr,
repr(normalized_type_str),
)
if not isinstance(abi_type, TupleType):
raise ValueError(
'Cannot create {} for non-tuple type {}'.format(
cls.__name__,
type_str_repr,
)
)
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str) | [
"def",
"parse_tuple_type_str",
"(",
"old_from_type_str",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"old_from_type_str",
")",
"def",
"new_from_type_str",
"(",
"cls",
",",
"type_str",
",",
"registry",
")",
":",
"normalized_type_str",
"=",
"normalize",
"(",
"type_str",
")",
"abi_type",
"=",
"parse",
"(",
"normalized_type_str",
")",
"type_str_repr",
"=",
"repr",
"(",
"type_str",
")",
"if",
"type_str",
"!=",
"normalized_type_str",
":",
"type_str_repr",
"=",
"'{} (normalized to {})'",
".",
"format",
"(",
"type_str_repr",
",",
"repr",
"(",
"normalized_type_str",
")",
",",
")",
"if",
"not",
"isinstance",
"(",
"abi_type",
",",
"TupleType",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot create {} for non-tuple type {}'",
".",
"format",
"(",
"cls",
".",
"__name__",
",",
"type_str_repr",
",",
")",
")",
"abi_type",
".",
"validate",
"(",
")",
"return",
"old_from_type_str",
"(",
"cls",
",",
"abi_type",
",",
"registry",
")",
"return",
"classmethod",
"(",
"new_from_type_str",
")"
]
| Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a tuple type string is required in that method. | [
"Used",
"by",
"BaseCoder",
"subclasses",
"as",
"a",
"convenience",
"for",
"implementing",
"the",
"from_type_str",
"method",
"required",
"by",
"ABIRegistry",
".",
"Useful",
"if",
"normalizing",
"then",
"parsing",
"a",
"tuple",
"type",
"string",
"is",
"required",
"in",
"that",
"method",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/base.py#L80-L110 |
ethereum/eth-abi | eth_abi/decoding.py | ContextFramesBytesIO.seek_in_frame | def seek_in_frame(self, pos, *args, **kwargs):
"""
Seeks relative to the total offset of the current contextual frames.
"""
super().seek(self._total_offset + pos, *args, **kwargs) | python | def seek_in_frame(self, pos, *args, **kwargs):
super().seek(self._total_offset + pos, *args, **kwargs) | [
"def",
"seek_in_frame",
"(",
"self",
",",
"pos",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"seek",
"(",
"self",
".",
"_total_offset",
"+",
"pos",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Seeks relative to the total offset of the current contextual frames. | [
"Seeks",
"relative",
"to",
"the",
"total",
"offset",
"of",
"the",
"current",
"contextual",
"frames",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/decoding.py#L80-L84 |
ethereum/eth-abi | eth_abi/decoding.py | ContextFramesBytesIO.push_frame | def push_frame(self, offset):
"""
Pushes a new contextual frame onto the stack with the given offset and a
return position at the current cursor position then seeks to the new
total offset.
"""
self._frames.append((offset, self.tell()))
self._total_offset += offset
self.seek_in_frame(0) | python | def push_frame(self, offset):
self._frames.append((offset, self.tell()))
self._total_offset += offset
self.seek_in_frame(0) | [
"def",
"push_frame",
"(",
"self",
",",
"offset",
")",
":",
"self",
".",
"_frames",
".",
"append",
"(",
"(",
"offset",
",",
"self",
".",
"tell",
"(",
")",
")",
")",
"self",
".",
"_total_offset",
"+=",
"offset",
"self",
".",
"seek_in_frame",
"(",
"0",
")"
]
| Pushes a new contextual frame onto the stack with the given offset and a
return position at the current cursor position then seeks to the new
total offset. | [
"Pushes",
"a",
"new",
"contextual",
"frame",
"onto",
"the",
"stack",
"with",
"the",
"given",
"offset",
"and",
"a",
"return",
"position",
"at",
"the",
"current",
"cursor",
"position",
"then",
"seeks",
"to",
"the",
"new",
"total",
"offset",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/decoding.py#L86-L95 |
ethereum/eth-abi | eth_abi/decoding.py | ContextFramesBytesIO.pop_frame | def pop_frame(self):
"""
Pops the current contextual frame off of the stack and returns the
cursor to the frame's return position.
"""
try:
offset, return_pos = self._frames.pop()
except IndexError:
raise IndexError('no frames to pop')
self._total_offset -= offset
self.seek(return_pos) | python | def pop_frame(self):
try:
offset, return_pos = self._frames.pop()
except IndexError:
raise IndexError('no frames to pop')
self._total_offset -= offset
self.seek(return_pos) | [
"def",
"pop_frame",
"(",
"self",
")",
":",
"try",
":",
"offset",
",",
"return_pos",
"=",
"self",
".",
"_frames",
".",
"pop",
"(",
")",
"except",
"IndexError",
":",
"raise",
"IndexError",
"(",
"'no frames to pop'",
")",
"self",
".",
"_total_offset",
"-=",
"offset",
"self",
".",
"seek",
"(",
"return_pos",
")"
]
| Pops the current contextual frame off of the stack and returns the
cursor to the frame's return position. | [
"Pops",
"the",
"current",
"contextual",
"frame",
"off",
"of",
"the",
"stack",
"and",
"returns",
"the",
"cursor",
"to",
"the",
"frame",
"s",
"return",
"position",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/decoding.py#L97-L108 |
ethereum/eth-abi | eth_abi/registry.py | has_arrlist | def has_arrlist(type_str):
"""
A predicate that matches a type string with an array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return abi_type.arrlist is not None | python | def has_arrlist(type_str):
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return abi_type.arrlist is not None | [
"def",
"has_arrlist",
"(",
"type_str",
")",
":",
"try",
":",
"abi_type",
"=",
"grammar",
".",
"parse",
"(",
"type_str",
")",
"except",
"exceptions",
".",
"ParseError",
":",
"return",
"False",
"return",
"abi_type",
".",
"arrlist",
"is",
"not",
"None"
]
| A predicate that matches a type string with an array dimension list. | [
"A",
"predicate",
"that",
"matches",
"a",
"type",
"string",
"with",
"an",
"array",
"dimension",
"list",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L258-L267 |
ethereum/eth-abi | eth_abi/registry.py | is_base_tuple | def is_base_tuple(type_str):
"""
A predicate that matches a tuple type with no array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return isinstance(abi_type, grammar.TupleType) and abi_type.arrlist is None | python | def is_base_tuple(type_str):
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return isinstance(abi_type, grammar.TupleType) and abi_type.arrlist is None | [
"def",
"is_base_tuple",
"(",
"type_str",
")",
":",
"try",
":",
"abi_type",
"=",
"grammar",
".",
"parse",
"(",
"type_str",
")",
"except",
"exceptions",
".",
"ParseError",
":",
"return",
"False",
"return",
"isinstance",
"(",
"abi_type",
",",
"grammar",
".",
"TupleType",
")",
"and",
"abi_type",
".",
"arrlist",
"is",
"None"
]
| A predicate that matches a tuple type with no array dimension list. | [
"A",
"predicate",
"that",
"matches",
"a",
"tuple",
"type",
"with",
"no",
"array",
"dimension",
"list",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L270-L279 |
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.register_encoder | def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label) | python | def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
self._register_coder(self._encoders, lookup, encoder, label=label) | [
"def",
"register_encoder",
"(",
"self",
",",
"lookup",
":",
"Lookup",
",",
"encoder",
":",
"Encoder",
",",
"label",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"_register_coder",
"(",
"self",
".",
"_encoders",
",",
"lookup",
",",
"encoder",
",",
"label",
"=",
"label",
")"
]
| Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`. | [
"Registers",
"the",
"given",
"encoder",
"under",
"the",
"given",
"lookup",
".",
"A",
"unique",
"string",
"label",
"may",
"be",
"optionally",
"provided",
"that",
"can",
"be",
"used",
"to",
"refer",
"to",
"the",
"registration",
"by",
"name",
".",
"For",
"more",
"information",
"about",
"arguments",
"refer",
"to",
":",
"any",
":",
"register",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L354-L361 |
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.register_decoder | def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._decoders, lookup, decoder, label=label) | python | def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
self._register_coder(self._decoders, lookup, decoder, label=label) | [
"def",
"register_decoder",
"(",
"self",
",",
"lookup",
":",
"Lookup",
",",
"decoder",
":",
"Decoder",
",",
"label",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"_register_coder",
"(",
"self",
".",
"_decoders",
",",
"lookup",
",",
"decoder",
",",
"label",
"=",
"label",
")"
]
| Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`. | [
"Registers",
"the",
"given",
"decoder",
"under",
"the",
"given",
"lookup",
".",
"A",
"unique",
"string",
"label",
"may",
"be",
"optionally",
"provided",
"that",
"can",
"be",
"used",
"to",
"refer",
"to",
"the",
"registration",
"by",
"name",
".",
"For",
"more",
"information",
"about",
"arguments",
"refer",
"to",
":",
"any",
":",
"register",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L375-L382 |
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.register | def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label) | python | def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label) | [
"def",
"register",
"(",
"self",
",",
"lookup",
":",
"Lookup",
",",
"encoder",
":",
"Encoder",
",",
"decoder",
":",
"Decoder",
",",
"label",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"register_encoder",
"(",
"lookup",
",",
"encoder",
",",
"label",
"=",
"label",
")",
"self",
".",
"register_decoder",
"(",
"lookup",
",",
"decoder",
",",
"label",
"=",
"label",
")"
]
| Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants. | [
"Registers",
"the",
"given",
"encoder",
"and",
"decoder",
"under",
"the",
"given",
"lookup",
".",
"A",
"unique",
"string",
"label",
"may",
"be",
"optionally",
"provided",
"that",
"can",
"be",
"used",
"to",
"refer",
"to",
"the",
"registration",
"by",
"name",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L395-L431 |
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.unregister | def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label) | python | def unregister(self, label: str) -> None:
self.unregister_encoder(label)
self.unregister_decoder(label) | [
"def",
"unregister",
"(",
"self",
",",
"label",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"unregister_encoder",
"(",
"label",
")",
"self",
".",
"unregister_decoder",
"(",
"label",
")"
]
| Unregisters the entries in the encoder and decoder registries which
have the label ``label``. | [
"Unregisters",
"the",
"entries",
"in",
"the",
"encoder",
"and",
"decoder",
"registries",
"which",
"have",
"the",
"label",
"label",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L433-L439 |
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.has_encoder | def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True | python | def has_encoder(self, type_str: abi.TypeStr) -> bool:
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True | [
"def",
"has_encoder",
"(",
"self",
",",
"type_str",
":",
"abi",
".",
"TypeStr",
")",
"->",
"bool",
":",
"try",
":",
"self",
".",
"get_encoder",
"(",
"type_str",
")",
"except",
"NoEntriesFound",
":",
"return",
"False",
"else",
":",
"return",
"True"
]
| Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found. | [
"Returns",
"True",
"if",
"an",
"encoder",
"is",
"found",
"for",
"the",
"given",
"type",
"string",
"type_str",
".",
"Otherwise",
"returns",
"False",
".",
"Raises",
":",
"class",
":",
"~eth_abi",
".",
"exceptions",
".",
"MultipleEntriesFound",
"if",
"multiple",
"encoders",
"are",
"found",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L445-L457 |
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.copy | def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy | python | def copy(self):
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy | [
"def",
"copy",
"(",
"self",
")",
":",
"cpy",
"=",
"type",
"(",
"self",
")",
"(",
")",
"cpy",
".",
"_encoders",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"_encoders",
")",
"cpy",
".",
"_decoders",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"_decoders",
")",
"return",
"cpy"
]
| Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry. | [
"Copies",
"a",
"registry",
"such",
"that",
"new",
"registrations",
"can",
"be",
"made",
"or",
"existing",
"registrations",
"can",
"be",
"unregistered",
"without",
"affecting",
"any",
"instance",
"from",
"which",
"a",
"copy",
"was",
"obtained",
".",
"This",
"is",
"useful",
"if",
"an",
"existing",
"registry",
"fulfills",
"most",
"of",
"a",
"user",
"s",
"needs",
"but",
"requires",
"one",
"or",
"two",
"modifications",
".",
"In",
"that",
"case",
"a",
"copy",
"of",
"that",
"registry",
"can",
"be",
"obtained",
"and",
"the",
"necessary",
"changes",
"made",
"without",
"affecting",
"the",
"original",
"registry",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L463-L477 |
ethereum/eth-abi | eth_abi/codec.py | ABIEncoder.encode_single | def encode_single(self, typ: TypeStr, arg: Any) -> bytes:
"""
Encodes the python value ``arg`` as a binary value of the ABI type
``typ``.
:param typ: The string representation of the ABI type that will be used
for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``,
etc.
:param arg: The python value to be encoded.
:returns: The binary representation of the python value ``arg`` as a
value of the ABI type ``typ``.
"""
encoder = self._registry.get_encoder(typ)
return encoder(arg) | python | def encode_single(self, typ: TypeStr, arg: Any) -> bytes:
encoder = self._registry.get_encoder(typ)
return encoder(arg) | [
"def",
"encode_single",
"(",
"self",
",",
"typ",
":",
"TypeStr",
",",
"arg",
":",
"Any",
")",
"->",
"bytes",
":",
"encoder",
"=",
"self",
".",
"_registry",
".",
"get_encoder",
"(",
"typ",
")",
"return",
"encoder",
"(",
"arg",
")"
]
| Encodes the python value ``arg`` as a binary value of the ABI type
``typ``.
:param typ: The string representation of the ABI type that will be used
for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``,
etc.
:param arg: The python value to be encoded.
:returns: The binary representation of the python value ``arg`` as a
value of the ABI type ``typ``. | [
"Encodes",
"the",
"python",
"value",
"arg",
"as",
"a",
"binary",
"value",
"of",
"the",
"ABI",
"type",
"typ",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L50-L65 |
ethereum/eth-abi | eth_abi/codec.py | ABIEncoder.encode_abi | def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes:
"""
Encodes the python values in ``args`` as a sequence of binary values of
the ABI types in ``types`` via the head-tail mechanism.
:param types: An iterable of string representations of the ABI types
that will be used for encoding e.g. ``('uint256', 'bytes[]',
'(int,int)')``
:param args: An iterable of python values to be encoded.
:returns: The head-tail encoded binary representation of the python
values in ``args`` as values of the ABI types in ``types``.
"""
encoders = [
self._registry.get_encoder(type_str)
for type_str in types
]
encoder = TupleEncoder(encoders=encoders)
return encoder(args) | python | def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes:
encoders = [
self._registry.get_encoder(type_str)
for type_str in types
]
encoder = TupleEncoder(encoders=encoders)
return encoder(args) | [
"def",
"encode_abi",
"(",
"self",
",",
"types",
":",
"Iterable",
"[",
"TypeStr",
"]",
",",
"args",
":",
"Iterable",
"[",
"Any",
"]",
")",
"->",
"bytes",
":",
"encoders",
"=",
"[",
"self",
".",
"_registry",
".",
"get_encoder",
"(",
"type_str",
")",
"for",
"type_str",
"in",
"types",
"]",
"encoder",
"=",
"TupleEncoder",
"(",
"encoders",
"=",
"encoders",
")",
"return",
"encoder",
"(",
"args",
")"
]
| Encodes the python values in ``args`` as a sequence of binary values of
the ABI types in ``types`` via the head-tail mechanism.
:param types: An iterable of string representations of the ABI types
that will be used for encoding e.g. ``('uint256', 'bytes[]',
'(int,int)')``
:param args: An iterable of python values to be encoded.
:returns: The head-tail encoded binary representation of the python
values in ``args`` as values of the ABI types in ``types``. | [
"Encodes",
"the",
"python",
"values",
"in",
"args",
"as",
"a",
"sequence",
"of",
"binary",
"values",
"of",
"the",
"ABI",
"types",
"in",
"types",
"via",
"the",
"head",
"-",
"tail",
"mechanism",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L67-L87 |
ethereum/eth-abi | eth_abi/codec.py | ABIEncoder.is_encodable | def is_encodable(self, typ: TypeStr, arg: Any) -> bool:
"""
Determines if the python value ``arg`` is encodable as a value of the
ABI type ``typ``.
:param typ: A string representation for the ABI type against which the
python value ``arg`` will be checked e.g. ``'uint256'``,
``'bytes[]'``, ``'(int,int)'``, etc.
:param arg: The python value whose encodability should be checked.
:returns: ``True`` if ``arg`` is encodable as a value of the ABI type
``typ``. Otherwise, ``False``.
"""
encoder = self._registry.get_encoder(typ)
try:
encoder.validate_value(arg)
except EncodingError:
return False
except AttributeError:
try:
encoder(arg)
except EncodingError:
return False
return True | python | def is_encodable(self, typ: TypeStr, arg: Any) -> bool:
encoder = self._registry.get_encoder(typ)
try:
encoder.validate_value(arg)
except EncodingError:
return False
except AttributeError:
try:
encoder(arg)
except EncodingError:
return False
return True | [
"def",
"is_encodable",
"(",
"self",
",",
"typ",
":",
"TypeStr",
",",
"arg",
":",
"Any",
")",
"->",
"bool",
":",
"encoder",
"=",
"self",
".",
"_registry",
".",
"get_encoder",
"(",
"typ",
")",
"try",
":",
"encoder",
".",
"validate_value",
"(",
"arg",
")",
"except",
"EncodingError",
":",
"return",
"False",
"except",
"AttributeError",
":",
"try",
":",
"encoder",
"(",
"arg",
")",
"except",
"EncodingError",
":",
"return",
"False",
"return",
"True"
]
| Determines if the python value ``arg`` is encodable as a value of the
ABI type ``typ``.
:param typ: A string representation for the ABI type against which the
python value ``arg`` will be checked e.g. ``'uint256'``,
``'bytes[]'``, ``'(int,int)'``, etc.
:param arg: The python value whose encodability should be checked.
:returns: ``True`` if ``arg`` is encodable as a value of the ABI type
``typ``. Otherwise, ``False``. | [
"Determines",
"if",
"the",
"python",
"value",
"arg",
"is",
"encodable",
"as",
"a",
"value",
"of",
"the",
"ABI",
"type",
"typ",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L89-L114 |
ethereum/eth-abi | eth_abi/codec.py | ABIDecoder.decode_single | def decode_single(self, typ: TypeStr, data: Decodable) -> Any:
"""
Decodes the binary value ``data`` of the ABI type ``typ`` into its
equivalent python value.
:param typ: The string representation of the ABI type that will be used for
decoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc.
:param data: The binary value to be decoded.
:returns: The equivalent python value of the ABI value represented in
``data``.
"""
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoder = self._registry.get_decoder(typ)
stream = ContextFramesBytesIO(data)
return decoder(stream) | python | def decode_single(self, typ: TypeStr, data: Decodable) -> Any:
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoder = self._registry.get_decoder(typ)
stream = ContextFramesBytesIO(data)
return decoder(stream) | [
"def",
"decode_single",
"(",
"self",
",",
"typ",
":",
"TypeStr",
",",
"data",
":",
"Decodable",
")",
"->",
"Any",
":",
"if",
"not",
"is_bytes",
"(",
"data",
")",
":",
"raise",
"TypeError",
"(",
"\"The `data` value must be of bytes type. Got {0}\"",
".",
"format",
"(",
"type",
"(",
"data",
")",
")",
")",
"decoder",
"=",
"self",
".",
"_registry",
".",
"get_decoder",
"(",
"typ",
")",
"stream",
"=",
"ContextFramesBytesIO",
"(",
"data",
")",
"return",
"decoder",
"(",
"stream",
")"
]
| Decodes the binary value ``data`` of the ABI type ``typ`` into its
equivalent python value.
:param typ: The string representation of the ABI type that will be used for
decoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc.
:param data: The binary value to be decoded.
:returns: The equivalent python value of the ABI value represented in
``data``. | [
"Decodes",
"the",
"binary",
"value",
"data",
"of",
"the",
"ABI",
"type",
"typ",
"into",
"its",
"equivalent",
"python",
"value",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L135-L153 |
ethereum/eth-abi | eth_abi/codec.py | ABIDecoder.decode_abi | def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]:
"""
Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``.
"""
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoders = [
self._registry.get_decoder(type_str)
for type_str in types
]
decoder = TupleDecoder(decoders=decoders)
stream = ContextFramesBytesIO(data)
return decoder(stream) | python | def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]:
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoders = [
self._registry.get_decoder(type_str)
for type_str in types
]
decoder = TupleDecoder(decoders=decoders)
stream = ContextFramesBytesIO(data)
return decoder(stream) | [
"def",
"decode_abi",
"(",
"self",
",",
"types",
":",
"Iterable",
"[",
"TypeStr",
"]",
",",
"data",
":",
"Decodable",
")",
"->",
"Tuple",
"[",
"Any",
",",
"...",
"]",
":",
"if",
"not",
"is_bytes",
"(",
"data",
")",
":",
"raise",
"TypeError",
"(",
"\"The `data` value must be of bytes type. Got {0}\"",
".",
"format",
"(",
"type",
"(",
"data",
")",
")",
")",
"decoders",
"=",
"[",
"self",
".",
"_registry",
".",
"get_decoder",
"(",
"type_str",
")",
"for",
"type_str",
"in",
"types",
"]",
"decoder",
"=",
"TupleDecoder",
"(",
"decoders",
"=",
"decoders",
")",
"stream",
"=",
"ContextFramesBytesIO",
"(",
"data",
")",
"return",
"decoder",
"(",
"stream",
")"
]
| Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``. | [
"Decodes",
"the",
"binary",
"value",
"data",
"as",
"a",
"sequence",
"of",
"values",
"of",
"the",
"ABI",
"types",
"in",
"types",
"via",
"the",
"head",
"-",
"tail",
"mechanism",
"into",
"a",
"tuple",
"of",
"equivalent",
"python",
"values",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L155-L179 |
ethereum/eth-abi | eth_abi/grammar.py | NodeVisitor.parse | def parse(self, type_str):
"""
Parses a type string into an appropriate instance of
:class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,
throws :class:`~eth_abi.exceptions.ParseError`.
:param type_str: The type string to be parsed.
:returns: An instance of :class:`~eth_abi.grammar.ABIType` containing
information about the parsed type string.
"""
if not isinstance(type_str, str):
raise TypeError('Can only parse string values: got {}'.format(type(type_str)))
try:
return super().parse(type_str)
except parsimonious.ParseError as e:
raise ParseError(e.text, e.pos, e.expr) | python | def parse(self, type_str):
if not isinstance(type_str, str):
raise TypeError('Can only parse string values: got {}'.format(type(type_str)))
try:
return super().parse(type_str)
except parsimonious.ParseError as e:
raise ParseError(e.text, e.pos, e.expr) | [
"def",
"parse",
"(",
"self",
",",
"type_str",
")",
":",
"if",
"not",
"isinstance",
"(",
"type_str",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'Can only parse string values: got {}'",
".",
"format",
"(",
"type",
"(",
"type_str",
")",
")",
")",
"try",
":",
"return",
"super",
"(",
")",
".",
"parse",
"(",
"type_str",
")",
"except",
"parsimonious",
".",
"ParseError",
"as",
"e",
":",
"raise",
"ParseError",
"(",
"e",
".",
"text",
",",
"e",
".",
"pos",
",",
"e",
".",
"expr",
")"
]
| Parses a type string into an appropriate instance of
:class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,
throws :class:`~eth_abi.exceptions.ParseError`.
:param type_str: The type string to be parsed.
:returns: An instance of :class:`~eth_abi.grammar.ABIType` containing
information about the parsed type string. | [
"Parses",
"a",
"type",
"string",
"into",
"an",
"appropriate",
"instance",
"of",
":",
"class",
":",
"~eth_abi",
".",
"grammar",
".",
"ABIType",
".",
"If",
"a",
"type",
"string",
"cannot",
"be",
"parsed",
"throws",
":",
"class",
":",
"~eth_abi",
".",
"exceptions",
".",
"ParseError",
"."
]
| train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/grammar.py#L109-L125 |
aiortc/aioice | aioice/turn.py | create_turn_endpoint | async def create_turn_endpoint(protocol_factory, server_addr, username, password,
lifetime=600, ssl=False, transport='udp'):
"""
Create datagram connection relayed over TURN.
"""
loop = asyncio.get_event_loop()
if transport == 'tcp':
_, inner_protocol = await loop.create_connection(
lambda: TurnClientTcpProtocol(server_addr,
username=username,
password=password,
lifetime=lifetime),
host=server_addr[0],
port=server_addr[1],
ssl=ssl)
else:
_, inner_protocol = await loop.create_datagram_endpoint(
lambda: TurnClientUdpProtocol(server_addr,
username=username,
password=password,
lifetime=lifetime),
remote_addr=server_addr)
protocol = protocol_factory()
transport = TurnTransport(protocol, inner_protocol)
await transport._connect()
return transport, protocol | python | async def create_turn_endpoint(protocol_factory, server_addr, username, password,
lifetime=600, ssl=False, transport='udp'):
loop = asyncio.get_event_loop()
if transport == 'tcp':
_, inner_protocol = await loop.create_connection(
lambda: TurnClientTcpProtocol(server_addr,
username=username,
password=password,
lifetime=lifetime),
host=server_addr[0],
port=server_addr[1],
ssl=ssl)
else:
_, inner_protocol = await loop.create_datagram_endpoint(
lambda: TurnClientUdpProtocol(server_addr,
username=username,
password=password,
lifetime=lifetime),
remote_addr=server_addr)
protocol = protocol_factory()
transport = TurnTransport(protocol, inner_protocol)
await transport._connect()
return transport, protocol | [
"async",
"def",
"create_turn_endpoint",
"(",
"protocol_factory",
",",
"server_addr",
",",
"username",
",",
"password",
",",
"lifetime",
"=",
"600",
",",
"ssl",
"=",
"False",
",",
"transport",
"=",
"'udp'",
")",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"if",
"transport",
"==",
"'tcp'",
":",
"_",
",",
"inner_protocol",
"=",
"await",
"loop",
".",
"create_connection",
"(",
"lambda",
":",
"TurnClientTcpProtocol",
"(",
"server_addr",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"lifetime",
"=",
"lifetime",
")",
",",
"host",
"=",
"server_addr",
"[",
"0",
"]",
",",
"port",
"=",
"server_addr",
"[",
"1",
"]",
",",
"ssl",
"=",
"ssl",
")",
"else",
":",
"_",
",",
"inner_protocol",
"=",
"await",
"loop",
".",
"create_datagram_endpoint",
"(",
"lambda",
":",
"TurnClientUdpProtocol",
"(",
"server_addr",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"lifetime",
"=",
"lifetime",
")",
",",
"remote_addr",
"=",
"server_addr",
")",
"protocol",
"=",
"protocol_factory",
"(",
")",
"transport",
"=",
"TurnTransport",
"(",
"protocol",
",",
"inner_protocol",
")",
"await",
"transport",
".",
"_connect",
"(",
")",
"return",
"transport",
",",
"protocol"
]
| Create datagram connection relayed over TURN. | [
"Create",
"datagram",
"connection",
"relayed",
"over",
"TURN",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L276-L303 |
aiortc/aioice | aioice/turn.py | TurnClientMixin.connect | async def connect(self):
"""
Create a TURN allocation.
"""
request = stun.Message(message_method=stun.Method.ALLOCATE,
message_class=stun.Class.REQUEST)
request.attributes['LIFETIME'] = self.lifetime
request.attributes['REQUESTED-TRANSPORT'] = UDP_TRANSPORT
try:
response, _ = await self.request(request)
except exceptions.TransactionFailed as e:
response = e.response
if response.attributes['ERROR-CODE'][0] == 401:
# update long-term credentials
self.nonce = response.attributes['NONCE']
self.realm = response.attributes['REALM']
self.integrity_key = make_integrity_key(self.username, self.realm, self.password)
# retry request with authentication
request.transaction_id = random_transaction_id()
response, _ = await self.request(request)
self.relayed_address = response.attributes['XOR-RELAYED-ADDRESS']
logger.info('TURN allocation created %s', self.relayed_address)
# periodically refresh allocation
self.refresh_handle = asyncio.ensure_future(self.refresh())
return self.relayed_address | python | async def connect(self):
request = stun.Message(message_method=stun.Method.ALLOCATE,
message_class=stun.Class.REQUEST)
request.attributes['LIFETIME'] = self.lifetime
request.attributes['REQUESTED-TRANSPORT'] = UDP_TRANSPORT
try:
response, _ = await self.request(request)
except exceptions.TransactionFailed as e:
response = e.response
if response.attributes['ERROR-CODE'][0] == 401:
self.nonce = response.attributes['NONCE']
self.realm = response.attributes['REALM']
self.integrity_key = make_integrity_key(self.username, self.realm, self.password)
request.transaction_id = random_transaction_id()
response, _ = await self.request(request)
self.relayed_address = response.attributes['XOR-RELAYED-ADDRESS']
logger.info('TURN allocation created %s', self.relayed_address)
self.refresh_handle = asyncio.ensure_future(self.refresh())
return self.relayed_address | [
"async",
"def",
"connect",
"(",
"self",
")",
":",
"request",
"=",
"stun",
".",
"Message",
"(",
"message_method",
"=",
"stun",
".",
"Method",
".",
"ALLOCATE",
",",
"message_class",
"=",
"stun",
".",
"Class",
".",
"REQUEST",
")",
"request",
".",
"attributes",
"[",
"'LIFETIME'",
"]",
"=",
"self",
".",
"lifetime",
"request",
".",
"attributes",
"[",
"'REQUESTED-TRANSPORT'",
"]",
"=",
"UDP_TRANSPORT",
"try",
":",
"response",
",",
"_",
"=",
"await",
"self",
".",
"request",
"(",
"request",
")",
"except",
"exceptions",
".",
"TransactionFailed",
"as",
"e",
":",
"response",
"=",
"e",
".",
"response",
"if",
"response",
".",
"attributes",
"[",
"'ERROR-CODE'",
"]",
"[",
"0",
"]",
"==",
"401",
":",
"# update long-term credentials",
"self",
".",
"nonce",
"=",
"response",
".",
"attributes",
"[",
"'NONCE'",
"]",
"self",
".",
"realm",
"=",
"response",
".",
"attributes",
"[",
"'REALM'",
"]",
"self",
".",
"integrity_key",
"=",
"make_integrity_key",
"(",
"self",
".",
"username",
",",
"self",
".",
"realm",
",",
"self",
".",
"password",
")",
"# retry request with authentication",
"request",
".",
"transaction_id",
"=",
"random_transaction_id",
"(",
")",
"response",
",",
"_",
"=",
"await",
"self",
".",
"request",
"(",
"request",
")",
"self",
".",
"relayed_address",
"=",
"response",
".",
"attributes",
"[",
"'XOR-RELAYED-ADDRESS'",
"]",
"logger",
".",
"info",
"(",
"'TURN allocation created %s'",
",",
"self",
".",
"relayed_address",
")",
"# periodically refresh allocation",
"self",
".",
"refresh_handle",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"refresh",
"(",
")",
")",
"return",
"self",
".",
"relayed_address"
]
| Create a TURN allocation. | [
"Create",
"a",
"TURN",
"allocation",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L70-L99 |
aiortc/aioice | aioice/turn.py | TurnClientMixin.delete | async def delete(self):
"""
Delete the TURN allocation.
"""
if self.refresh_handle:
self.refresh_handle.cancel()
self.refresh_handle = None
request = stun.Message(message_method=stun.Method.REFRESH,
message_class=stun.Class.REQUEST)
request.attributes['LIFETIME'] = 0
await self.request(request)
logger.info('TURN allocation deleted %s', self.relayed_address)
if self.receiver:
self.receiver.connection_lost(None) | python | async def delete(self):
if self.refresh_handle:
self.refresh_handle.cancel()
self.refresh_handle = None
request = stun.Message(message_method=stun.Method.REFRESH,
message_class=stun.Class.REQUEST)
request.attributes['LIFETIME'] = 0
await self.request(request)
logger.info('TURN allocation deleted %s', self.relayed_address)
if self.receiver:
self.receiver.connection_lost(None) | [
"async",
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"refresh_handle",
":",
"self",
".",
"refresh_handle",
".",
"cancel",
"(",
")",
"self",
".",
"refresh_handle",
"=",
"None",
"request",
"=",
"stun",
".",
"Message",
"(",
"message_method",
"=",
"stun",
".",
"Method",
".",
"REFRESH",
",",
"message_class",
"=",
"stun",
".",
"Class",
".",
"REQUEST",
")",
"request",
".",
"attributes",
"[",
"'LIFETIME'",
"]",
"=",
"0",
"await",
"self",
".",
"request",
"(",
"request",
")",
"logger",
".",
"info",
"(",
"'TURN allocation deleted %s'",
",",
"self",
".",
"relayed_address",
")",
"if",
"self",
".",
"receiver",
":",
"self",
".",
"receiver",
".",
"connection_lost",
"(",
"None",
")"
]
| Delete the TURN allocation. | [
"Delete",
"the",
"TURN",
"allocation",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L130-L145 |
aiortc/aioice | aioice/turn.py | TurnClientMixin.refresh | async def refresh(self):
"""
Periodically refresh the TURN allocation.
"""
while True:
await asyncio.sleep(5/6 * self.lifetime)
request = stun.Message(message_method=stun.Method.REFRESH,
message_class=stun.Class.REQUEST)
request.attributes['LIFETIME'] = self.lifetime
await self.request(request)
logger.info('TURN allocation refreshed %s', self.relayed_address) | python | async def refresh(self):
while True:
await asyncio.sleep(5/6 * self.lifetime)
request = stun.Message(message_method=stun.Method.REFRESH,
message_class=stun.Class.REQUEST)
request.attributes['LIFETIME'] = self.lifetime
await self.request(request)
logger.info('TURN allocation refreshed %s', self.relayed_address) | [
"async",
"def",
"refresh",
"(",
"self",
")",
":",
"while",
"True",
":",
"await",
"asyncio",
".",
"sleep",
"(",
"5",
"/",
"6",
"*",
"self",
".",
"lifetime",
")",
"request",
"=",
"stun",
".",
"Message",
"(",
"message_method",
"=",
"stun",
".",
"Method",
".",
"REFRESH",
",",
"message_class",
"=",
"stun",
".",
"Class",
".",
"REQUEST",
")",
"request",
".",
"attributes",
"[",
"'LIFETIME'",
"]",
"=",
"self",
".",
"lifetime",
"await",
"self",
".",
"request",
"(",
"request",
")",
"logger",
".",
"info",
"(",
"'TURN allocation refreshed %s'",
",",
"self",
".",
"relayed_address",
")"
]
| Periodically refresh the TURN allocation. | [
"Periodically",
"refresh",
"the",
"TURN",
"allocation",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L147-L159 |
aiortc/aioice | aioice/turn.py | TurnClientMixin.request | async def request(self, request):
"""
Execute a STUN transaction and return the response.
"""
assert request.transaction_id not in self.transactions
if self.integrity_key:
self.__add_authentication(request)
transaction = stun.Transaction(request, self.server, self)
self.transactions[request.transaction_id] = transaction
try:
return await transaction.run()
finally:
del self.transactions[request.transaction_id] | python | async def request(self, request):
assert request.transaction_id not in self.transactions
if self.integrity_key:
self.__add_authentication(request)
transaction = stun.Transaction(request, self.server, self)
self.transactions[request.transaction_id] = transaction
try:
return await transaction.run()
finally:
del self.transactions[request.transaction_id] | [
"async",
"def",
"request",
"(",
"self",
",",
"request",
")",
":",
"assert",
"request",
".",
"transaction_id",
"not",
"in",
"self",
".",
"transactions",
"if",
"self",
".",
"integrity_key",
":",
"self",
".",
"__add_authentication",
"(",
"request",
")",
"transaction",
"=",
"stun",
".",
"Transaction",
"(",
"request",
",",
"self",
".",
"server",
",",
"self",
")",
"self",
".",
"transactions",
"[",
"request",
".",
"transaction_id",
"]",
"=",
"transaction",
"try",
":",
"return",
"await",
"transaction",
".",
"run",
"(",
")",
"finally",
":",
"del",
"self",
".",
"transactions",
"[",
"request",
".",
"transaction_id",
"]"
]
| Execute a STUN transaction and return the response. | [
"Execute",
"a",
"STUN",
"transaction",
"and",
"return",
"the",
"response",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L161-L175 |
aiortc/aioice | aioice/turn.py | TurnClientMixin.send_data | async def send_data(self, data, addr):
"""
Send data to a remote host via the TURN server.
"""
channel = self.peer_to_channel.get(addr)
if channel is None:
channel = self.channel_number
self.channel_number += 1
self.channel_to_peer[channel] = addr
self.peer_to_channel[addr] = channel
# bind channel
await self.channel_bind(channel, addr)
header = struct.pack('!HH', channel, len(data))
self._send(header + data) | python | async def send_data(self, data, addr):
channel = self.peer_to_channel.get(addr)
if channel is None:
channel = self.channel_number
self.channel_number += 1
self.channel_to_peer[channel] = addr
self.peer_to_channel[addr] = channel
await self.channel_bind(channel, addr)
header = struct.pack('!HH', channel, len(data))
self._send(header + data) | [
"async",
"def",
"send_data",
"(",
"self",
",",
"data",
",",
"addr",
")",
":",
"channel",
"=",
"self",
".",
"peer_to_channel",
".",
"get",
"(",
"addr",
")",
"if",
"channel",
"is",
"None",
":",
"channel",
"=",
"self",
".",
"channel_number",
"self",
".",
"channel_number",
"+=",
"1",
"self",
".",
"channel_to_peer",
"[",
"channel",
"]",
"=",
"addr",
"self",
".",
"peer_to_channel",
"[",
"addr",
"]",
"=",
"channel",
"# bind channel",
"await",
"self",
".",
"channel_bind",
"(",
"channel",
",",
"addr",
")",
"header",
"=",
"struct",
".",
"pack",
"(",
"'!HH'",
",",
"channel",
",",
"len",
"(",
"data",
")",
")",
"self",
".",
"_send",
"(",
"header",
"+",
"data",
")"
]
| Send data to a remote host via the TURN server. | [
"Send",
"data",
"to",
"a",
"remote",
"host",
"via",
"the",
"TURN",
"server",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L177-L192 |
aiortc/aioice | aioice/turn.py | TurnClientMixin.send_stun | def send_stun(self, message, addr):
"""
Send a STUN message to the TURN server.
"""
logger.debug('%s > %s %s', self, addr, message)
self._send(bytes(message)) | python | def send_stun(self, message, addr):
logger.debug('%s > %s %s', self, addr, message)
self._send(bytes(message)) | [
"def",
"send_stun",
"(",
"self",
",",
"message",
",",
"addr",
")",
":",
"logger",
".",
"debug",
"(",
"'%s > %s %s'",
",",
"self",
",",
"addr",
",",
"message",
")",
"self",
".",
"_send",
"(",
"bytes",
"(",
"message",
")",
")"
]
| Send a STUN message to the TURN server. | [
"Send",
"a",
"STUN",
"message",
"to",
"the",
"TURN",
"server",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L194-L199 |
aiortc/aioice | aioice/turn.py | TurnTransport.get_extra_info | def get_extra_info(self, name, default=None):
"""
Return optional transport information.
- `'related_address'`: the related address
- `'sockname'`: the relayed address
"""
if name == 'related_address':
return self.__inner_protocol.transport.get_extra_info('sockname')
elif name == 'sockname':
return self.__relayed_address
return default | python | def get_extra_info(self, name, default=None):
if name == 'related_address':
return self.__inner_protocol.transport.get_extra_info('sockname')
elif name == 'sockname':
return self.__relayed_address
return default | [
"def",
"get_extra_info",
"(",
"self",
",",
"name",
",",
"default",
"=",
"None",
")",
":",
"if",
"name",
"==",
"'related_address'",
":",
"return",
"self",
".",
"__inner_protocol",
".",
"transport",
".",
"get_extra_info",
"(",
"'sockname'",
")",
"elif",
"name",
"==",
"'sockname'",
":",
"return",
"self",
".",
"__relayed_address",
"return",
"default"
]
| Return optional transport information.
- `'related_address'`: the related address
- `'sockname'`: the relayed address | [
"Return",
"optional",
"transport",
"information",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L250-L261 |
aiortc/aioice | aioice/turn.py | TurnTransport.sendto | def sendto(self, data, addr):
"""
Sends the `data` bytes to the remote peer given `addr`.
This will bind a TURN channel as necessary.
"""
asyncio.ensure_future(self.__inner_protocol.send_data(data, addr)) | python | def sendto(self, data, addr):
asyncio.ensure_future(self.__inner_protocol.send_data(data, addr)) | [
"def",
"sendto",
"(",
"self",
",",
"data",
",",
"addr",
")",
":",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"__inner_protocol",
".",
"send_data",
"(",
"data",
",",
"addr",
")",
")"
]
| Sends the `data` bytes to the remote peer given `addr`.
This will bind a TURN channel as necessary. | [
"Sends",
"the",
"data",
"bytes",
"to",
"the",
"remote",
"peer",
"given",
"addr",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L263-L269 |
aiortc/aioice | aioice/candidate.py | candidate_foundation | def candidate_foundation(candidate_type, candidate_transport, base_address):
"""
See RFC 5245 - 4.1.1.3. Computing Foundations
"""
key = '%s|%s|%s' % (candidate_type, candidate_transport, base_address)
return hashlib.md5(key.encode('ascii')).hexdigest() | python | def candidate_foundation(candidate_type, candidate_transport, base_address):
key = '%s|%s|%s' % (candidate_type, candidate_transport, base_address)
return hashlib.md5(key.encode('ascii')).hexdigest() | [
"def",
"candidate_foundation",
"(",
"candidate_type",
",",
"candidate_transport",
",",
"base_address",
")",
":",
"key",
"=",
"'%s|%s|%s'",
"%",
"(",
"candidate_type",
",",
"candidate_transport",
",",
"base_address",
")",
"return",
"hashlib",
".",
"md5",
"(",
"key",
".",
"encode",
"(",
"'ascii'",
")",
")",
".",
"hexdigest",
"(",
")"
]
| See RFC 5245 - 4.1.1.3. Computing Foundations | [
"See",
"RFC",
"5245",
"-",
"4",
".",
"1",
".",
"1",
".",
"3",
".",
"Computing",
"Foundations"
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/candidate.py#L5-L10 |
aiortc/aioice | aioice/candidate.py | candidate_priority | def candidate_priority(candidate_component, candidate_type, local_pref=65535):
"""
See RFC 5245 - 4.1.2.1. Recommended Formula
"""
if candidate_type == 'host':
type_pref = 126
elif candidate_type == 'prflx':
type_pref = 110
elif candidate_type == 'srflx':
type_pref = 100
else:
type_pref = 0
return (1 << 24) * type_pref + \
(1 << 8) * local_pref + \
(256 - candidate_component) | python | def candidate_priority(candidate_component, candidate_type, local_pref=65535):
if candidate_type == 'host':
type_pref = 126
elif candidate_type == 'prflx':
type_pref = 110
elif candidate_type == 'srflx':
type_pref = 100
else:
type_pref = 0
return (1 << 24) * type_pref + \
(1 << 8) * local_pref + \
(256 - candidate_component) | [
"def",
"candidate_priority",
"(",
"candidate_component",
",",
"candidate_type",
",",
"local_pref",
"=",
"65535",
")",
":",
"if",
"candidate_type",
"==",
"'host'",
":",
"type_pref",
"=",
"126",
"elif",
"candidate_type",
"==",
"'prflx'",
":",
"type_pref",
"=",
"110",
"elif",
"candidate_type",
"==",
"'srflx'",
":",
"type_pref",
"=",
"100",
"else",
":",
"type_pref",
"=",
"0",
"return",
"(",
"1",
"<<",
"24",
")",
"*",
"type_pref",
"+",
"(",
"1",
"<<",
"8",
")",
"*",
"local_pref",
"+",
"(",
"256",
"-",
"candidate_component",
")"
]
| See RFC 5245 - 4.1.2.1. Recommended Formula | [
"See",
"RFC",
"5245",
"-",
"4",
".",
"1",
".",
"2",
".",
"1",
".",
"Recommended",
"Formula"
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/candidate.py#L13-L28 |
aiortc/aioice | aioice/candidate.py | Candidate.from_sdp | def from_sdp(cls, sdp):
"""
Parse a :class:`Candidate` from SDP.
.. code-block:: python
Candidate.from_sdp(
'6815297761 1 udp 659136 1.2.3.4 31102 typ host generation 0')
"""
bits = sdp.split()
if len(bits) < 8:
raise ValueError('SDP does not have enough properties')
kwargs = {
'foundation': bits[0],
'component': int(bits[1]),
'transport': bits[2],
'priority': int(bits[3]),
'host': bits[4],
'port': int(bits[5]),
'type': bits[7],
}
for i in range(8, len(bits) - 1, 2):
if bits[i] == 'raddr':
kwargs['related_address'] = bits[i + 1]
elif bits[i] == 'rport':
kwargs['related_port'] = int(bits[i + 1])
elif bits[i] == 'tcptype':
kwargs['tcptype'] = bits[i + 1]
elif bits[i] == 'generation':
kwargs['generation'] = int(bits[i + 1])
return Candidate(**kwargs) | python | def from_sdp(cls, sdp):
bits = sdp.split()
if len(bits) < 8:
raise ValueError('SDP does not have enough properties')
kwargs = {
'foundation': bits[0],
'component': int(bits[1]),
'transport': bits[2],
'priority': int(bits[3]),
'host': bits[4],
'port': int(bits[5]),
'type': bits[7],
}
for i in range(8, len(bits) - 1, 2):
if bits[i] == 'raddr':
kwargs['related_address'] = bits[i + 1]
elif bits[i] == 'rport':
kwargs['related_port'] = int(bits[i + 1])
elif bits[i] == 'tcptype':
kwargs['tcptype'] = bits[i + 1]
elif bits[i] == 'generation':
kwargs['generation'] = int(bits[i + 1])
return Candidate(**kwargs) | [
"def",
"from_sdp",
"(",
"cls",
",",
"sdp",
")",
":",
"bits",
"=",
"sdp",
".",
"split",
"(",
")",
"if",
"len",
"(",
"bits",
")",
"<",
"8",
":",
"raise",
"ValueError",
"(",
"'SDP does not have enough properties'",
")",
"kwargs",
"=",
"{",
"'foundation'",
":",
"bits",
"[",
"0",
"]",
",",
"'component'",
":",
"int",
"(",
"bits",
"[",
"1",
"]",
")",
",",
"'transport'",
":",
"bits",
"[",
"2",
"]",
",",
"'priority'",
":",
"int",
"(",
"bits",
"[",
"3",
"]",
")",
",",
"'host'",
":",
"bits",
"[",
"4",
"]",
",",
"'port'",
":",
"int",
"(",
"bits",
"[",
"5",
"]",
")",
",",
"'type'",
":",
"bits",
"[",
"7",
"]",
",",
"}",
"for",
"i",
"in",
"range",
"(",
"8",
",",
"len",
"(",
"bits",
")",
"-",
"1",
",",
"2",
")",
":",
"if",
"bits",
"[",
"i",
"]",
"==",
"'raddr'",
":",
"kwargs",
"[",
"'related_address'",
"]",
"=",
"bits",
"[",
"i",
"+",
"1",
"]",
"elif",
"bits",
"[",
"i",
"]",
"==",
"'rport'",
":",
"kwargs",
"[",
"'related_port'",
"]",
"=",
"int",
"(",
"bits",
"[",
"i",
"+",
"1",
"]",
")",
"elif",
"bits",
"[",
"i",
"]",
"==",
"'tcptype'",
":",
"kwargs",
"[",
"'tcptype'",
"]",
"=",
"bits",
"[",
"i",
"+",
"1",
"]",
"elif",
"bits",
"[",
"i",
"]",
"==",
"'generation'",
":",
"kwargs",
"[",
"'generation'",
"]",
"=",
"int",
"(",
"bits",
"[",
"i",
"+",
"1",
"]",
")",
"return",
"Candidate",
"(",
"*",
"*",
"kwargs",
")"
]
| Parse a :class:`Candidate` from SDP.
.. code-block:: python
Candidate.from_sdp(
'6815297761 1 udp 659136 1.2.3.4 31102 typ host generation 0') | [
"Parse",
"a",
":",
"class",
":",
"Candidate",
"from",
"SDP",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/candidate.py#L50-L83 |
aiortc/aioice | aioice/candidate.py | Candidate.to_sdp | def to_sdp(self):
"""
Return a string representation suitable for SDP.
"""
sdp = '%s %d %s %d %s %d typ %s' % (
self.foundation,
self.component,
self.transport,
self.priority,
self.host,
self.port,
self.type)
if self.related_address is not None:
sdp += ' raddr %s' % self.related_address
if self.related_port is not None:
sdp += ' rport %s' % self.related_port
if self.tcptype is not None:
sdp += ' tcptype %s' % self.tcptype
if self.generation is not None:
sdp += ' generation %d' % self.generation
return sdp | python | def to_sdp(self):
sdp = '%s %d %s %d %s %d typ %s' % (
self.foundation,
self.component,
self.transport,
self.priority,
self.host,
self.port,
self.type)
if self.related_address is not None:
sdp += ' raddr %s' % self.related_address
if self.related_port is not None:
sdp += ' rport %s' % self.related_port
if self.tcptype is not None:
sdp += ' tcptype %s' % self.tcptype
if self.generation is not None:
sdp += ' generation %d' % self.generation
return sdp | [
"def",
"to_sdp",
"(",
"self",
")",
":",
"sdp",
"=",
"'%s %d %s %d %s %d typ %s'",
"%",
"(",
"self",
".",
"foundation",
",",
"self",
".",
"component",
",",
"self",
".",
"transport",
",",
"self",
".",
"priority",
",",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"self",
".",
"type",
")",
"if",
"self",
".",
"related_address",
"is",
"not",
"None",
":",
"sdp",
"+=",
"' raddr %s'",
"%",
"self",
".",
"related_address",
"if",
"self",
".",
"related_port",
"is",
"not",
"None",
":",
"sdp",
"+=",
"' rport %s'",
"%",
"self",
".",
"related_port",
"if",
"self",
".",
"tcptype",
"is",
"not",
"None",
":",
"sdp",
"+=",
"' tcptype %s'",
"%",
"self",
".",
"tcptype",
"if",
"self",
".",
"generation",
"is",
"not",
"None",
":",
"sdp",
"+=",
"' generation %d'",
"%",
"self",
".",
"generation",
"return",
"sdp"
]
| Return a string representation suitable for SDP. | [
"Return",
"a",
"string",
"representation",
"suitable",
"for",
"SDP",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/candidate.py#L85-L105 |
aiortc/aioice | aioice/candidate.py | Candidate.can_pair_with | def can_pair_with(self, other):
"""
A local candidate is paired with a remote candidate if and only if
the two candidates have the same component ID and have the same IP
address version.
"""
a = ipaddress.ip_address(self.host)
b = ipaddress.ip_address(other.host)
return (
self.component == other.component and
self.transport.lower() == other.transport.lower() and
a.version == b.version
) | python | def can_pair_with(self, other):
a = ipaddress.ip_address(self.host)
b = ipaddress.ip_address(other.host)
return (
self.component == other.component and
self.transport.lower() == other.transport.lower() and
a.version == b.version
) | [
"def",
"can_pair_with",
"(",
"self",
",",
"other",
")",
":",
"a",
"=",
"ipaddress",
".",
"ip_address",
"(",
"self",
".",
"host",
")",
"b",
"=",
"ipaddress",
".",
"ip_address",
"(",
"other",
".",
"host",
")",
"return",
"(",
"self",
".",
"component",
"==",
"other",
".",
"component",
"and",
"self",
".",
"transport",
".",
"lower",
"(",
")",
"==",
"other",
".",
"transport",
".",
"lower",
"(",
")",
"and",
"a",
".",
"version",
"==",
"b",
".",
"version",
")"
]
| A local candidate is paired with a remote candidate if and only if
the two candidates have the same component ID and have the same IP
address version. | [
"A",
"local",
"candidate",
"is",
"paired",
"with",
"a",
"remote",
"candidate",
"if",
"and",
"only",
"if",
"the",
"two",
"candidates",
"have",
"the",
"same",
"component",
"ID",
"and",
"have",
"the",
"same",
"IP",
"address",
"version",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/candidate.py#L107-L119 |
aiortc/aioice | aioice/ice.py | candidate_pair_priority | def candidate_pair_priority(local, remote, ice_controlling):
"""
See RFC 5245 - 5.7.2. Computing Pair Priority and Ordering Pairs
"""
G = ice_controlling and local.priority or remote.priority
D = ice_controlling and remote.priority or local.priority
return (1 << 32) * min(G, D) + 2 * max(G, D) + (G > D and 1 or 0) | python | def candidate_pair_priority(local, remote, ice_controlling):
G = ice_controlling and local.priority or remote.priority
D = ice_controlling and remote.priority or local.priority
return (1 << 32) * min(G, D) + 2 * max(G, D) + (G > D and 1 or 0) | [
"def",
"candidate_pair_priority",
"(",
"local",
",",
"remote",
",",
"ice_controlling",
")",
":",
"G",
"=",
"ice_controlling",
"and",
"local",
".",
"priority",
"or",
"remote",
".",
"priority",
"D",
"=",
"ice_controlling",
"and",
"remote",
".",
"priority",
"or",
"local",
".",
"priority",
"return",
"(",
"1",
"<<",
"32",
")",
"*",
"min",
"(",
"G",
",",
"D",
")",
"+",
"2",
"*",
"max",
"(",
"G",
",",
"D",
")",
"+",
"(",
"G",
">",
"D",
"and",
"1",
"or",
"0",
")"
]
| See RFC 5245 - 5.7.2. Computing Pair Priority and Ordering Pairs | [
"See",
"RFC",
"5245",
"-",
"5",
".",
"7",
".",
"2",
".",
"Computing",
"Pair",
"Priority",
"and",
"Ordering",
"Pairs"
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L24-L30 |
aiortc/aioice | aioice/ice.py | get_host_addresses | def get_host_addresses(use_ipv4, use_ipv6):
"""
Get local IP addresses.
"""
addresses = []
for interface in netifaces.interfaces():
ifaddresses = netifaces.ifaddresses(interface)
for address in ifaddresses.get(socket.AF_INET, []):
if use_ipv4 and address['addr'] != '127.0.0.1':
addresses.append(address['addr'])
for address in ifaddresses.get(socket.AF_INET6, []):
if use_ipv6 and address['addr'] != '::1' and '%' not in address['addr']:
addresses.append(address['addr'])
return addresses | python | def get_host_addresses(use_ipv4, use_ipv6):
addresses = []
for interface in netifaces.interfaces():
ifaddresses = netifaces.ifaddresses(interface)
for address in ifaddresses.get(socket.AF_INET, []):
if use_ipv4 and address['addr'] != '127.0.0.1':
addresses.append(address['addr'])
for address in ifaddresses.get(socket.AF_INET6, []):
if use_ipv6 and address['addr'] != '::1' and '%' not in address['addr']:
addresses.append(address['addr'])
return addresses | [
"def",
"get_host_addresses",
"(",
"use_ipv4",
",",
"use_ipv6",
")",
":",
"addresses",
"=",
"[",
"]",
"for",
"interface",
"in",
"netifaces",
".",
"interfaces",
"(",
")",
":",
"ifaddresses",
"=",
"netifaces",
".",
"ifaddresses",
"(",
"interface",
")",
"for",
"address",
"in",
"ifaddresses",
".",
"get",
"(",
"socket",
".",
"AF_INET",
",",
"[",
"]",
")",
":",
"if",
"use_ipv4",
"and",
"address",
"[",
"'addr'",
"]",
"!=",
"'127.0.0.1'",
":",
"addresses",
".",
"append",
"(",
"address",
"[",
"'addr'",
"]",
")",
"for",
"address",
"in",
"ifaddresses",
".",
"get",
"(",
"socket",
".",
"AF_INET6",
",",
"[",
"]",
")",
":",
"if",
"use_ipv6",
"and",
"address",
"[",
"'addr'",
"]",
"!=",
"'::1'",
"and",
"'%'",
"not",
"in",
"address",
"[",
"'addr'",
"]",
":",
"addresses",
".",
"append",
"(",
"address",
"[",
"'addr'",
"]",
")",
"return",
"addresses"
]
| Get local IP addresses. | [
"Get",
"local",
"IP",
"addresses",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L33-L46 |
aiortc/aioice | aioice/ice.py | server_reflexive_candidate | async def server_reflexive_candidate(protocol, stun_server):
"""
Query STUN server to obtain a server-reflexive candidate.
"""
# lookup address
loop = asyncio.get_event_loop()
stun_server = (
await loop.run_in_executor(None, socket.gethostbyname, stun_server[0]),
stun_server[1])
# perform STUN query
request = stun.Message(message_method=stun.Method.BINDING,
message_class=stun.Class.REQUEST)
response, _ = await protocol.request(request, stun_server)
local_candidate = protocol.local_candidate
return Candidate(
foundation=candidate_foundation('srflx', 'udp', local_candidate.host),
component=local_candidate.component,
transport=local_candidate.transport,
priority=candidate_priority(local_candidate.component, 'srflx'),
host=response.attributes['XOR-MAPPED-ADDRESS'][0],
port=response.attributes['XOR-MAPPED-ADDRESS'][1],
type='srflx',
related_address=local_candidate.host,
related_port=local_candidate.port) | python | async def server_reflexive_candidate(protocol, stun_server):
loop = asyncio.get_event_loop()
stun_server = (
await loop.run_in_executor(None, socket.gethostbyname, stun_server[0]),
stun_server[1])
request = stun.Message(message_method=stun.Method.BINDING,
message_class=stun.Class.REQUEST)
response, _ = await protocol.request(request, stun_server)
local_candidate = protocol.local_candidate
return Candidate(
foundation=candidate_foundation('srflx', 'udp', local_candidate.host),
component=local_candidate.component,
transport=local_candidate.transport,
priority=candidate_priority(local_candidate.component, 'srflx'),
host=response.attributes['XOR-MAPPED-ADDRESS'][0],
port=response.attributes['XOR-MAPPED-ADDRESS'][1],
type='srflx',
related_address=local_candidate.host,
related_port=local_candidate.port) | [
"async",
"def",
"server_reflexive_candidate",
"(",
"protocol",
",",
"stun_server",
")",
":",
"# lookup address",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"stun_server",
"=",
"(",
"await",
"loop",
".",
"run_in_executor",
"(",
"None",
",",
"socket",
".",
"gethostbyname",
",",
"stun_server",
"[",
"0",
"]",
")",
",",
"stun_server",
"[",
"1",
"]",
")",
"# perform STUN query",
"request",
"=",
"stun",
".",
"Message",
"(",
"message_method",
"=",
"stun",
".",
"Method",
".",
"BINDING",
",",
"message_class",
"=",
"stun",
".",
"Class",
".",
"REQUEST",
")",
"response",
",",
"_",
"=",
"await",
"protocol",
".",
"request",
"(",
"request",
",",
"stun_server",
")",
"local_candidate",
"=",
"protocol",
".",
"local_candidate",
"return",
"Candidate",
"(",
"foundation",
"=",
"candidate_foundation",
"(",
"'srflx'",
",",
"'udp'",
",",
"local_candidate",
".",
"host",
")",
",",
"component",
"=",
"local_candidate",
".",
"component",
",",
"transport",
"=",
"local_candidate",
".",
"transport",
",",
"priority",
"=",
"candidate_priority",
"(",
"local_candidate",
".",
"component",
",",
"'srflx'",
")",
",",
"host",
"=",
"response",
".",
"attributes",
"[",
"'XOR-MAPPED-ADDRESS'",
"]",
"[",
"0",
"]",
",",
"port",
"=",
"response",
".",
"attributes",
"[",
"'XOR-MAPPED-ADDRESS'",
"]",
"[",
"1",
"]",
",",
"type",
"=",
"'srflx'",
",",
"related_address",
"=",
"local_candidate",
".",
"host",
",",
"related_port",
"=",
"local_candidate",
".",
"port",
")"
]
| Query STUN server to obtain a server-reflexive candidate. | [
"Query",
"STUN",
"server",
"to",
"obtain",
"a",
"server",
"-",
"reflexive",
"candidate",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L49-L74 |
aiortc/aioice | aioice/ice.py | sort_candidate_pairs | def sort_candidate_pairs(pairs, ice_controlling):
"""
Sort a list of candidate pairs.
"""
def pair_priority(pair):
return -candidate_pair_priority(pair.local_candidate,
pair.remote_candidate,
ice_controlling)
pairs.sort(key=pair_priority) | python | def sort_candidate_pairs(pairs, ice_controlling):
def pair_priority(pair):
return -candidate_pair_priority(pair.local_candidate,
pair.remote_candidate,
ice_controlling)
pairs.sort(key=pair_priority) | [
"def",
"sort_candidate_pairs",
"(",
"pairs",
",",
"ice_controlling",
")",
":",
"def",
"pair_priority",
"(",
"pair",
")",
":",
"return",
"-",
"candidate_pair_priority",
"(",
"pair",
".",
"local_candidate",
",",
"pair",
".",
"remote_candidate",
",",
"ice_controlling",
")",
"pairs",
".",
"sort",
"(",
"key",
"=",
"pair_priority",
")"
]
| Sort a list of candidate pairs. | [
"Sort",
"a",
"list",
"of",
"candidate",
"pairs",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L77-L86 |
aiortc/aioice | aioice/ice.py | StunProtocol.request | async def request(self, request, addr, integrity_key=None, retransmissions=None):
"""
Execute a STUN transaction and return the response.
"""
assert request.transaction_id not in self.transactions
if integrity_key is not None:
request.add_message_integrity(integrity_key)
request.add_fingerprint()
transaction = stun.Transaction(request, addr, self, retransmissions=retransmissions)
transaction.integrity_key = integrity_key
self.transactions[request.transaction_id] = transaction
try:
return await transaction.run()
finally:
del self.transactions[request.transaction_id] | python | async def request(self, request, addr, integrity_key=None, retransmissions=None):
assert request.transaction_id not in self.transactions
if integrity_key is not None:
request.add_message_integrity(integrity_key)
request.add_fingerprint()
transaction = stun.Transaction(request, addr, self, retransmissions=retransmissions)
transaction.integrity_key = integrity_key
self.transactions[request.transaction_id] = transaction
try:
return await transaction.run()
finally:
del self.transactions[request.transaction_id] | [
"async",
"def",
"request",
"(",
"self",
",",
"request",
",",
"addr",
",",
"integrity_key",
"=",
"None",
",",
"retransmissions",
"=",
"None",
")",
":",
"assert",
"request",
".",
"transaction_id",
"not",
"in",
"self",
".",
"transactions",
"if",
"integrity_key",
"is",
"not",
"None",
":",
"request",
".",
"add_message_integrity",
"(",
"integrity_key",
")",
"request",
".",
"add_fingerprint",
"(",
")",
"transaction",
"=",
"stun",
".",
"Transaction",
"(",
"request",
",",
"addr",
",",
"self",
",",
"retransmissions",
"=",
"retransmissions",
")",
"transaction",
".",
"integrity_key",
"=",
"integrity_key",
"self",
".",
"transactions",
"[",
"request",
".",
"transaction_id",
"]",
"=",
"transaction",
"try",
":",
"return",
"await",
"transaction",
".",
"run",
"(",
")",
"finally",
":",
"del",
"self",
".",
"transactions",
"[",
"request",
".",
"transaction_id",
"]"
]
| Execute a STUN transaction and return the response. | [
"Execute",
"a",
"STUN",
"transaction",
"and",
"return",
"the",
"response",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L179-L195 |
aiortc/aioice | aioice/ice.py | StunProtocol.send_stun | def send_stun(self, message, addr):
"""
Send a STUN message.
"""
self.__log_debug('> %s %s', addr, message)
self.transport.sendto(bytes(message), addr) | python | def send_stun(self, message, addr):
self.__log_debug('> %s %s', addr, message)
self.transport.sendto(bytes(message), addr) | [
"def",
"send_stun",
"(",
"self",
",",
"message",
",",
"addr",
")",
":",
"self",
".",
"__log_debug",
"(",
"'> %s %s'",
",",
"addr",
",",
"message",
")",
"self",
".",
"transport",
".",
"sendto",
"(",
"bytes",
"(",
"message",
")",
",",
"addr",
")"
]
| Send a STUN message. | [
"Send",
"a",
"STUN",
"message",
"."
]
| train | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L200-L205 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.