function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def start(self, rev):
return int(self.index[rev][0] >> 16) | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def length(self, rev):
return self.index[rev][1] | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def flags(self, rev):
return self.index[rev][0] & 0xFFFF | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def reachable(self, node, stop=None):
"""return the set of all nodes ancestral to a given node, including
the node itself, stopping when stop is matched"""
reachable = set((node,))
visit = [node]
if stop:
stopn = self.rev(stop)
else:
stopn = 0
while visit:
n = visit.pop(0)
if n == stop:
continue
if n == nullid:
continue
for p in self.parents(n):
if self.rev(p) < stopn:
continue
if p not in reachable:
reachable.add(p)
visit.append(p)
return reachable | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def descendants(self, *revs):
"""Generate the descendants of 'revs' in revision order.
Yield a sequence of revision numbers starting with a child of
some rev in revs, i.e., each revision is *not* considered a
descendant of itself. Results are ordered by revision number (a
topological sort)."""
first = min(revs)
if first == nullrev:
for i in self:
yield i
return
seen = set(revs)
for i in xrange(first + 1, len(self)):
for x in self.parentrevs(i):
if x != nullrev and x in seen:
seen.add(i)
yield i
break | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def findmissing(self, common=None, heads=None):
"""Return the ancestors of heads that are not ancestors of common.
More specifically, return a list of nodes N such that every N
satisfies the following constraints:
1. N is an ancestor of some node in 'heads'
2. N is not an ancestor of any node in 'common'
The list is sorted by revision number, meaning it is
topologically sorted.
'heads' and 'common' are both lists of node IDs. If heads is
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
_common, missing = self.findcommonmissing(common, heads)
return missing | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def headrevs(self):
count = len(self)
if not count:
return [nullrev]
ishead = [1] * (count + 1)
index = self.index
for r in xrange(count):
e = index[r]
ishead[e[5]] = ishead[e[6]] = 0
return [r for r in xrange(count) if ishead[r]] | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def children(self, node):
"""find the children of a given node"""
c = []
p = self.rev(node)
for r in range(p + 1, len(self)):
prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
if prevs:
for pr in prevs:
if pr == p:
c.append(self.node(r))
elif p == nullrev:
c.append(self.node(r))
return c | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def ancestor(self, a, b):
"""calculate the least common ancestor of nodes a and b"""
# fast path, check if it is a descendant
a, b = self.rev(a), self.rev(b)
start, end = sorted((a, b))
if self.descendant(start, end):
return self.node(start)
def parents(rev):
return [p for p in self.parentrevs(rev) if p != nullrev]
c = ancestor.ancestor(a, b, parents)
if c is None:
return nullid
return self.node(c) | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def _partialmatch(self, id):
if id in self._pcache:
return self._pcache[id]
if len(id) < 40:
try:
# hex(node)[:...]
l = len(id) // 2 # grab an even number of digits
prefix = bin(id[:l * 2])
nl = [e[7] for e in self.index if e[7].startswith(prefix)]
nl = [n for n in nl if hex(n).startswith(id)]
if len(nl) > 0:
if len(nl) == 1:
self._pcache[id] = nl[0]
return nl[0]
raise LookupError(id, self.indexfile,
_('ambiguous identifier'))
return None
except TypeError:
pass | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def cmp(self, node, text):
"""compare text with a given file revision
returns True if text is different than what is stored.
"""
p1, p2 = self.parents(node)
return hash(text, p1, p2) != node | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def _loadchunk(self, offset, length):
if self._inline:
df = self.opener(self.indexfile)
else:
df = self.opener(self.datafile)
readahead = max(65536, length)
df.seek(offset)
d = df.read(readahead)
df.close()
self._addchunk(offset, d)
if readahead > length:
return util.buffer(d, 0, length)
return d | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def _chunkraw(self, startrev, endrev):
start = self.start(startrev)
length = self.end(endrev) - start
if self._inline:
start += (startrev + 1) * self._io.size
return self._getchunk(start, length) | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def _chunkbase(self, rev):
return self._chunk(rev) | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def deltaparent(self, rev):
"""return deltaparent of the given revision"""
base = self.index[rev][3]
if base == rev:
return nullrev
elif self._generaldelta:
return base
else:
return rev - 1 | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def revision(self, nodeorrev):
"""return an uncompressed revision of a given node or revision
number.
"""
if isinstance(nodeorrev, int):
rev = nodeorrev
node = self.node(rev)
else:
node = nodeorrev
rev = None
cachedrev = None
if node == nullid:
return ""
if self._cache:
if self._cache[0] == node:
return self._cache[2]
cachedrev = self._cache[1]
# look up what we need to read
text = None
if rev is None:
rev = self.rev(node)
# check rev flags
if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
raise RevlogError(_('incompatible revision flag %x') %
(self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
# build delta chain
chain = []
index = self.index # for performance
generaldelta = self._generaldelta
iterrev = rev
e = index[iterrev]
while iterrev != e[3] and iterrev != cachedrev:
chain.append(iterrev)
if generaldelta:
iterrev = e[3]
else:
iterrev -= 1
e = index[iterrev]
chain.reverse()
base = iterrev
if iterrev == cachedrev:
# cache hit
text = self._cache[2]
# drop cache to save memory
self._cache = None
self._chunkraw(base, rev)
if text is None:
text = str(self._chunkbase(base))
bins = [self._chunk(r) for r in chain]
text = mdiff.patches(text, bins)
text = self._checkhash(text, node, rev)
self._cache = (node, rev, text)
return text | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def checkinlinesize(self, tr, fp=None):
if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
return
trinfo = tr.find(self.indexfile)
if trinfo is None:
raise RevlogError(_("%s not found in the transaction")
% self.indexfile)
trindex = trinfo[2]
dataoff = self.start(trindex)
tr.add(self.datafile, dataoff)
if fp:
fp.flush()
fp.close()
df = self.opener(self.datafile, 'w')
try:
for r in self:
df.write(self._chunkraw(r, r))
finally:
df.close()
fp = self.opener(self.indexfile, 'w', atomictemp=True)
self.version &= ~(REVLOGNGINLINEDATA)
self._inline = False
for i in self:
e = self._io.packentry(self.index[i], self.node, self.version, i)
fp.write(e)
# if we don't call close, the temp file will never replace the
# real index
fp.close()
tr.replace(self.indexfile, trindex * self._io.size)
self._chunkclear() | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def _addrevision(self, node, text, transaction, link, p1, p2,
cachedelta, ifh, dfh):
"""internal function to add revisions to the log
see addrevision for argument descriptions.
invariants:
- text is optional (can be None); if not set, cachedelta must be set.
if both are set, they must correspond to eachother.
"""
btext = [text]
def buildtext():
if btext[0] is not None:
return btext[0]
# flush any pending writes here so we can read it in revision
if dfh:
dfh.flush()
ifh.flush()
basetext = self.revision(self.node(cachedelta[0]))
btext[0] = mdiff.patch(basetext, cachedelta[1])
chk = hash(btext[0], p1, p2)
if chk != node:
raise RevlogError(_("consistency error in delta"))
return btext[0]
def builddelta(rev):
# can we use the cached delta?
if cachedelta and cachedelta[0] == rev:
delta = cachedelta[1]
else:
t = buildtext()
ptext = self.revision(self.node(rev))
delta = mdiff.textdiff(ptext, t)
data = compress(delta)
l = len(data[1]) + len(data[0])
if basecache[0] == rev:
chainbase = basecache[1]
else:
chainbase = self.chainbase(rev)
dist = l + offset - self.start(chainbase)
if self._generaldelta:
base = rev
else:
base = chainbase
return dist, l, data, base, chainbase
curr = len(self)
prev = curr - 1
base = chainbase = curr
offset = self.end(prev)
flags = 0
d = None
basecache = self._basecache
p1r, p2r = self.rev(p1), self.rev(p2)
# should we try to build a delta?
if prev != nullrev:
if self._generaldelta:
if p1r >= basecache[1]:
d = builddelta(p1r)
elif p2r >= basecache[1]:
d = builddelta(p2r)
else:
d = builddelta(prev)
else:
d = builddelta(prev)
dist, l, data, base, chainbase = d
# full versions are inserted when the needed deltas
# become comparable to the uncompressed text
if text is None:
textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
cachedelta[1])
else:
textlen = len(text)
if d is None or dist > textlen * 2:
text = buildtext()
data = compress(text)
l = len(data[1]) + len(data[0])
base = chainbase = curr
e = (offset_type(offset, flags), l, textlen,
base, link, p1r, p2r, node)
self.index.insert(-1, e)
self.nodemap[node] = curr
entry = self._io.packentry(e, self.node, self.version, curr)
if not self._inline:
transaction.add(self.datafile, offset)
transaction.add(self.indexfile, curr * len(entry))
if data[0]:
dfh.write(data[0])
dfh.write(data[1])
dfh.flush()
ifh.write(entry)
else:
offset += curr * self._io.size
transaction.add(self.indexfile, offset, curr)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
self.checkinlinesize(transaction, ifh)
if type(text) == str: # only accept immutable objects
self._cache = (node, curr, text)
self._basecache = (curr, chainbase)
return node | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def addgroup(self, bundle, linkmapper, transaction):
"""
add a delta group
given a set of deltas, add them to the revision log. the
first delta is against its parent, which should be in our
log, the rest are against the previous delta.
"""
# track the base of the current delta log
content = []
node = None
r = len(self)
end = 0
if r:
end = self.end(r - 1)
ifh = self.opener(self.indexfile, "a+")
isize = r * self._io.size
if self._inline:
transaction.add(self.indexfile, end + isize, r)
dfh = None
else:
transaction.add(self.indexfile, isize, r)
transaction.add(self.datafile, end)
dfh = self.opener(self.datafile, "a")
try:
# loop through our set of deltas
chain = None
while True:
chunkdata = bundle.deltachunk(chain)
if not chunkdata:
break
node = chunkdata['node']
p1 = chunkdata['p1']
p2 = chunkdata['p2']
cs = chunkdata['cs']
deltabase = chunkdata['deltabase']
delta = chunkdata['delta']
content.append(node)
link = linkmapper(cs)
if node in self.nodemap:
# this can happen if two branches make the same change
chain = node
continue
for p in (p1, p2):
if not p in self.nodemap:
raise LookupError(p, self.indexfile,
_('unknown parent'))
if deltabase not in self.nodemap:
raise LookupError(deltabase, self.indexfile,
_('unknown delta base'))
baserev = self.rev(deltabase)
chain = self._addrevision(node, None, transaction, link,
p1, p2, (baserev, delta), ifh, dfh)
if not dfh and not self._inline:
# addrevision switched from inline to conventional
# reopen the index
ifh.close()
dfh = self.opener(self.datafile, "a")
ifh = self.opener(self.indexfile, "a")
finally:
if dfh:
dfh.close()
ifh.close()
return content | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def checksize(self):
expected = 0
if len(self):
expected = max(0, self.end(len(self) - 1))
try:
f = self.opener(self.datafile)
f.seek(0, 2)
actual = f.tell()
f.close()
dd = actual - expected
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
dd = 0
try:
f = self.opener(self.indexfile)
f.seek(0, 2)
actual = f.tell()
f.close()
s = self._io.size
i = max(0, actual // s)
di = actual - (i * s)
if self._inline:
databytes = 0
for r in self:
databytes += max(0, self.length(r))
dd = 0
di = actual - len(self) * s - databytes
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
di = 0
return (dd, di) | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def __init__(self, ini_path='', section='', debug=False):
"""
To init CCParser you can enter a path
and a section. If you doesn't know them yet
you can leave them empty.
If debug is set to True, all the exceptions
will print its traceback.
"""
self._debug = debug
self._config = configparser.ConfigParser()
if ini_path != '':
self.set_configuration_path(ini_path)
if section != '':
self.set_section(section)
self.__default_bool = False
self.__default_string = ''
self.__default_int = 0
self.__default_float = 0.0
self.__default_list = []
self._accepted_true_bool = ('true', 'yes') # must be lower case
self._accepted_false_bool = ('false', 'no') # must be lower case | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def __str__(self):
return ''' | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def check_value(self, value):
"""
return False if the value don't exists,
return True if the value exists
"""
if not os.path.exists(self.ini_path):
return False
else:
try:
self._config.read(self.ini_path)
except Exception:
print("CCParser Warning: reading damaged file or file without section")
print(traceback.format_exc())
print()
return False
if not self._config.has_section(self.__section):
return False
elif self._config.has_option(self.__section, value):
return True
else:
return False | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def get_float(self, value):
"""
If the value exists, return the float
corresponding to the string. If it does
not exists, or the value can not be converted
to a float, return the default float.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = float(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return self.__default_float
else:
return self.__default_float | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def get_list(self, value):
"""
If the value exists, return the integer
corresponding to the string. If it does
not exists, or the value can not be converted
to a integer, return the default integer.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = val.split("|")
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return self.__default_list
else:
return self.__default_list | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def get_bool_defval(self, value, default):
"""
If the value exists, return the boolean
corresponding to the string. If it does
not exists, or the value can not be converted
to a boolean, return the the second argument.
"""
if self.check_value(value):
val = self._config.get(self.__section, value).lower()
if val in self._accepted_false_bool:
return False
elif val in self._accepted_true_bool:
return True
else:
return default
else:
return default | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def get_int_defval(self, value, default):
"""
If the value exists, return the integer
corresponding to the string. If it does
not exists, or the value can not be converted
to a integer, return the the second argument.
"""
if self.check_value(value):
val = self._config.get(self.__section, value)
try:
val = int(val)
return val
except Exception:
if self._debug:
print(traceback.format_exc())
return default
else:
return default | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def set_configuration_path(self, ini_path):
"""
Set the path to the configuration file.
"""
if isinstance(ini_path, str):
self.ini_path = ini_path
if not os.path.exists(ini_path) and self._debug:
print("CCParser Warning: the path to the configuration file does not exists\n")
else:
print("CCParser Warning: The path is not valid.\n")
self.ini_path = '' | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def set_default_float(self, value):
"""
Set the default float to return when
a value does not exists. By default
it returns 0.0
"""
self.__default_float = value | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def set_default_bool(self, value):
"""
Set the default boolean to return when
a value does not exists. By default
it returns false
"""
self.__default_bool = value | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def set_default_list(self, value):
"""
Set the default integer to return when
a value does not exists. By default
it returns 0
"""
self.__default_list = value | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def get_default_bool(self):
return self.__default_bool | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def get_default_str(self):
return self.__default_string | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def get_default_list(self):
return self.__default_list | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def get_configuration_path(self):
return self.ini_path | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def test(path): | rsm-gh/alienware-kbl | [
111,
23,
111,
50,
1478047481
] |
def upgrade():
''' Drop the columns calendar_multiple_meetings and
calendar_regional_meetings and rename meeting_region into
meeting_location.
'''
op.drop_column('calendars', 'calendar_multiple_meetings')
op.drop_column('calendars', 'calendar_regional_meetings')
op.alter_column(
'meetings',
column_name='meeting_region',
name='meeting_location',
type_=sa.Text,
existing_type=sa.String(100)) | fedora-infra/fedocal | [
24,
19,
24,
1,
1363980910
] |
def extractbody(m) :
begin = re.compile("\\\\begin\s*")
m= begin.sub("\\\\begin",m)
end = re.compile("\\\\end\s*")
m = end.sub("\\\\end",m) | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertsqb(m) :
r = re.compile("\\\\item\\s*\\[.*?\\]")
Litems = r.findall(m)
Lrest = r.split(m)
m = Lrest[0]
for i in range(0,len(Litems)) :
s= Litems[i]
s=s.replace("\\item","\\nitem")
s=s.replace("[","{")
s=s.replace("]","}")
m=m+s+Lrest[i+1]
r = re.compile("\\\\begin\\s*\\{\\w+}\\s*\\[.*?\\]")
Lthms = r.findall(m)
Lrest = r.split(m)
m = Lrest[0]
for i in range(0,len(Lthms)) :
s= Lthms[i]
s=s.replace("\\begin","\\nbegin")
s=s.replace("[","{")
s=s.replace("]","}")
m=m+s+Lrest[i+1]
return m | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertmacros(m) :
comm = re.compile("\\\\[a-zA-Z]*")
commands = comm.findall(m)
rest = comm.split(m)
r= rest[0]
for i in range( len (commands) ) :
for s1,s2 in M :
if s1==commands[i] :
commands[i] = s2
r=r+commands[i]+rest[i+1]
return(r) | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def separatemath(m) :
mathre = re.compile("\\$.*?\\$"
"|\\\\begin\\{equation}.*?\\\\end\\{equation}"
"|\\\\\\[.*?\\\\\\]")
math = mathre.findall(m)
text = mathre.split(m)
return(math,text) | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertcolors(m,c) :
if m.find("begin") != -1 :
return("<span style=\"color:#"+colors[c]+";\">")
else :
return("</span>") | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertenum(m) :
if m.find("begin") != -1 :
return ("\n\n<ol>")
else :
return ("\n</ol>\n\n") | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertbeginthm(thm) :
global inthm
count[T[thm]] +=1
inthm = thm
t = beginthm.replace("_ThmType_",thm.capitalize())
t = t.replace("_ThmNumb_",str(count[T[thm]]))
return(t) | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertendthm(thm) :
global inthm
inthm = ""
return(endthm) | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertproof(m) :
if m.find("begin") != -1 :
return(beginproof)
else :
return(endproof) | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertsection (m) : | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertsubsection (m) : | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def converturl (m) :
L = cb.split(m)
return ("<a href=\""+L[1]+"\">"+L[3]+"</a>") | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertimage (m) :
L = cb.split (m)
return ("<p align=center><img "+L[1] + " src=\""+L[3]
+"\"></p>") | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def processtext ( t ) :
p = re.compile("\\\\begin\\{\\w+}"
"|\\\\nbegin\\{\\w+}\\s*\\{.*?}"
"|\\\\end\\{\\w+}"
"|\\\\item"
"|\\\\nitem\\s*\\{.*?}"
"|\\\\label\\s*\\{.*?}"
"|\\\\section\\s*\\{.*?}"
"|\\\\section\\*\\s*\\{.*?}"
"|\\\\subsection\\s*\\{.*?}"
"|\\\\subsection\\*\\s*\\{.*?}"
"|\\\\href\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\hrefnosnap\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\image\\s*\\{.*?}\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\sout\\s*\\{.*?}") | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def processfontstyle(w) :
close = dict()
ww = ""
level = i = 0
while i < len(w):
special = False
for k, v in fontstyle.items():
l = len(k)
if w[i:i+l] == k:
level += 1
ww += '<' + v + '>'
close[level] = '</' + v + '>'
i += l
special = True
if not special:
if w[i] == '{':
ww += '{'
level += 1
close[level] = '}'
elif w[i] == '}' and level > 0:
ww += close[level]
level -= 1
else:
ww += w[i]
i += 1
return ww | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def convertref(m) :
global ref | theoj2/Nibbletex | [
1,
1,
1,
6,
1395387630
] |
def __init__(self, line_string):
"""
To initialize a Cluster object, only a string compliant with the
format of a cluster in an OrthoMCL groups file has to be provided.
This line should contain the name of the group, a colon, and the
sequences belonging to that group separated by whitespace
:param line_string: String of a cluster
"""
# Initializing attributes for parse_string
self.name = None
self.sequences = None
self.species_frequency = {}
# Initializing attributes for apply filter
# If the value is different than None, this will inform downstream
# objects of whether this cluster is compliant with the specified
# gene_threshold
self.gene_compliant = None
# If the value is different than None, this will inform downstream
# objects of whether this cluster is compliant with the specified
# species_threshold
self.species_compliant = None
self.parse_string(line_string) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def remove_taxa(self, taxa_list):
"""
Removes the taxa contained in taxa_list from self.sequences and
self.species_frequency
:param taxa_list: list, each element should be a taxon name
"""
self.sequences = [x for x in self.sequences if x.split("|")[0]
not in taxa_list]
self.species_frequency = dict((taxon, val) for taxon, val in
self.species_frequency.items()
if taxon not in taxa_list) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def __init__(self, groups_file, gene_threshold=None,
species_threshold=None, ns=None):
self.gene_threshold = gene_threshold if gene_threshold else None
self.species_threshold = species_threshold if species_threshold \
else None
# Attribute containing the list of included species
self.species_list = []
# Attribute that will contain taxa to be excluded from analyses
self.excluded_taxa = []
self.species_frequency = []
# Attributes that will store the number (int) of cluster after gene and
# species filter
self.all_clusters = 0
self.num_gene_compliant = 0
self.num_species_compliant = 0
self.all_compliant = 0
# Attribute containing the total number of sequences
self.total_seqs = 0
# Attribute containing the maximum number of extra copies found in the
# clusters
self.max_extra_copy = 0
# Attribute with name of the group file, which will be an ID
self.name = os.path.abspath(groups_file)
self.table = groups_file.split(os.sep)[-1].split(".")[0]
# Initialize atribute containing the groups filtered using the gene and
# species threshold. This attribute can be updated at any time using
# the update_filtered_group method
self.filtered_groups = []
self._parse_groups(ns)
if type(self.species_threshold) is float:
self._get_sp_proportion() | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def iter_species_frequency(self):
"""
In order to prevent permanent changes to the species_frequency
attribute due to the filtering of taxa, this iterable should be used
instead of the said variable. This creates a temporary deepcopy of
species_frequency which will be iterated over and eventually modified.
"""
# Since the items of species_frequency are mutable, this ensures
# that even those objects are correctly cloned
sp_freq = copy.deepcopy(self.species_frequency)
for cl in sp_freq:
yield cl | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def _apply_filter(self, cl):
"""
Sets or updates the basic group statistics, such as the number of
orthologs compliant with the gene copy and minimum taxa filters.
:param cl: dictionary. Contains the number of occurrences for each
taxon present in the ortholog cluster
(e.g. {"taxonA": 2, "taxonB": 1).
"""
# First, remove excluded taxa from the cl object since this will
# impact all other filters
for tx in self.excluded_taxa:
cl.pop(tx, None)
if cl:
self.all_clusters += 1
extra_copies = max(cl.values())
if extra_copies > self.max_extra_copy:
self.max_extra_copy = extra_copies
if extra_copies <= self.gene_threshold and self.gene_threshold and\
len(cl) >= self.species_threshold and \
self.species_threshold:
self.num_gene_compliant += 1
self.num_species_compliant += 1
self.all_compliant += 1
elif (extra_copies <= self.gene_threshold and
self.gene_threshold) or self.gene_threshold == 0:
self.num_gene_compliant += 1
elif len(cl) >= self.species_threshold and \
self.species_threshold:
self.num_species_compliant += 1 | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def _reset_counter(self):
self.all_clusters = 0
self.num_gene_compliant = 0
self.num_species_compliant = 0
self.all_compliant = 0 | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def exclude_taxa(self, taxa_list, update_stats=False):
"""
Updates the excluded_taxa attribute and updates group statistics if
update_stats is True. This does not change the Group object data
permanently, only sets an attribute that will be taken into account
when plotting and exporting data.
:param taxa_list: list. List of taxa that should be excluded from
downstream operations
:param update_stats: boolean. If True, it will update the group
statistics
"""
# IF the taxa_list is the same as the excluded_taxa attribute,
# there is nothing to do
if sorted(taxa_list) == sorted(self.excluded_taxa):
return
self.species_list = [x for x in self.species_list + self.excluded_taxa
if x not in taxa_list]
self.excluded_taxa = taxa_list
if update_stats:
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def _get_sp_proportion(self):
"""
When the species filter is a float value between 0 and 1, convert
this proportion into absolute values (rounded up), since filters were
already designed for absolutes.
"""
self.species_threshold = int(self.species_threshold *
len(self.species_list)) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def retrieve_sequences(self, sqldb, protein_db, dest="./",
shared_namespace=None, outfile=None):
"""
:param sqldb: srting. Path to sqlite database file
:param protein_db: string. Path to protein database file
:param dest: string. Directory where sequences will be exported
:param shared_namespace: Namespace object to communicate with
TriFusion's main process
:param outfile: If set, all sequeces will be instead saved in a
single output file. This is used for the nucleotide sequence export
:return:
"""
if not os.path.exists(dest) and not outfile:
os.makedirs(dest)
if not os.path.exists(join(dest, "header_correspondance")):
os.makedirs(join(dest, "header_correspondance"))
if shared_namespace:
shared_namespace.act = shared_namespace.msg = "Creating database"
# Stores sequences that could not be retrieved
shared_namespace.missed = shared_namespace.counter = 0
shared_namespace.progress = 0
# Get number of lines of protein database
p = 0
with open(protein_db) as fh:
for p, _ in enumerate(fh):
pass
shared_namespace.max_pb = shared_namespace.total = p + 1
# Connect to database
con = sqlite3.connect(sqldb)
c = con.cursor()
table_name = "".join([x for x in protein_db if x.isalnum()]).encode(
"utf8")
# Create table if it does not exist
if not c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='{}'".format(table_name)).fetchall():
c.execute("CREATE TABLE [{}] (seq_id text PRIMARY KEY, seq text)".
format(table_name))
# Populate database
with open(protein_db) as ph:
seq = ""
for line in ph:
# Kill switch
if shared_namespace:
if shared_namespace.stop:
con.close()
raise KillByUser("")
shared_namespace.progress += 1
shared_namespace.counter += 1
if line.startswith(">"):
if seq != "":
c.execute("INSERT INTO [{}] VALUES (?, ?)".
format(table_name), (seq_id, seq))
seq_id = line.strip()[1:]
seq = ""
else:
seq += line.strip()
con.commit()
if shared_namespace:
shared_namespace.act = shared_namespace.msg = "Fetching sequences"
shared_namespace.good = shared_namespace.counter = 0
shared_namespace.progress = 0
shared_namespace.max_pb = shared_namespace.total = \
self.all_compliant
# Set single output file, if option is set
if outfile:
output_handle = open(join(dest, outfile), "w")
# Fetching sequences
for line, cl in zip(self.groups(), self.iter_species_frequency()):
# Kill switch
if shared_namespace:
if shared_namespace.stop:
con.close()
raise KillByUser("")
# Filter sequences
if self._get_compliance(cl) == (1, 1):
if shared_namespace:
shared_namespace.good += 1
shared_namespace.progress += 1
shared_namespace.counter += 1
# Retrieve sequences from current cluster
if self.excluded_taxa:
line = self._remove_tx(line)
fields = line.split(":")
# Open file
if not outfile:
cl_name = fields[0]
oname = join(dest, cl_name)
mname = join(dest, "header_correspondance", cl_name)
output_handle = open(oname + ".fas", "w")
map_handle = open(mname + "_headerMap.csv", "w")
seqs = fields[-1].split()
for i in seqs:
# Query database
c.execute("SELECT * FROM [{}] WHERE seq_id = ?".
format(table_name), (i,))
vals = c.fetchone()
# Handles cases where the sequence could not be retrieved
# If outfile is set, output_handle will be a single file
# for all groups. If not, it will represent an individual
# group file
try:
if not outfile:
tx_name = vals[0].split("|")[0]
output_handle.write(">{}\n{}\n".format(tx_name,
vals[1]))
map_handle.write("{}; {}\n".format(vals[0],
tx_name))
else:
output_handle.write(">{}\n{}\n".format(vals[0],
vals[1]))
except TypeError:
pass
if not outfile:
output_handle.close()
if outfile:
output_handle.close()
con.close() | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def bar_species_distribution(self, filt=False):
if filt:
data = Counter((len(cl) for cl in self.iter_species_frequency() if
self._get_compliance(cl) == (1, 1)))
else:
data = Counter((len(cl) for cl in self.species_frequency))
x_labels = [x for x in list(data)]
data = list(data.values())
# When data is empty, return an exception
if not data:
return {"data": None}
# Sort lists
x_labels = [list(x) for x in zip(*sorted(zip(x_labels, data)))][0]
# Convert label to strings
x_labels = [str(x) for x in x_labels]
title = "Taxa frequency distribution"
ax_names = ["Number of taxa", "Ortholog frequency"]
return {"data": [data],
"title": title,
"ax_names": ax_names,
"labels": x_labels,
"table_header": ["Number of species",
"Ortholog frequency"]} | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def bar_species_coverage(self, filt=False):
"""
Creates a stacked bar plot with the proportion of
:return:
"""
data = Counter(dict((x, 0) for x in self.species_list))
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
if filt:
data += Counter(dict((x, 1) for x, y in cl.items() if y > 0 and
self._get_compliance(cl) == (1, 1)))
else:
data += Counter(dict((x, 1) for x, y in cl.items() if y > 0))
data = data.most_common()
# When data is empty, return an exception
if not data:
return {"data": None}
x_labels = [str(x[0]) for x in data]
data = [[x[1] for x in data], [self.all_clusters - x[1] if not
filt else self.all_compliant - x[1]
for x in data]]
lgd_list = ["Available data", "Missing data"]
ax_names = [None, "Ortholog frequency"]
return {"data": data,
"labels": x_labels,
"lgd_list": lgd_list,
"ax_names": ax_names} | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def __init__(self, groups_file, gene_threshold=None,
species_threshold=None, project_prefix="MyGroups"):
# Initializing thresholds. These may be set from the start, or using
# some method that uses them as arguments
self.gene_threshold = gene_threshold
self.species_threshold = species_threshold
# Attribute containing the list of included species
self.species_list = []
# Attribute that will contain taxa to be excluded from analyses
self.excluded_taxa = []
# Attributes that will store the number (int) of cluster after gene and
# species filter
self.all_compliant = 0
self.num_gene_compliant = 0
self.num_species_compliant = 0
# Attribute containing the total number of sequences
self.total_seqs = 0
# Attribute containing the maximum number of extra copies found in the
# clusters
self.max_extra_copy = 0
# Attribute with name of the group file, which will be an ID
self.group_name = groups_file
# Initialize the project prefix for possible output files
self.prefix = project_prefix
# Initialize attribute containing the original groups
self.groups = []
# Initialize atribute containing the groups filtered using the gene and
# species threshold. This attribute can be updated at any time using
# the update_filtered_group method
self.filtered_groups = []
self.name = None
# Parse groups file and populate groups attribute
self.__parse_groups(groups_file) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def exclude_taxa(self, taxa_list):
"""
Adds a taxon_name to the excluded_taxa list and updates the
filtered_groups list
"""
self.excluded_taxa.extend(taxa_list)
# Storage variable for new filtered groups
filtered_groups = []
# Reset max_extra_copy attribute
self.max_extra_copy = 0
for cl in self.groups:
cl.remove_taxa(taxa_list)
if cl.iter_sequences and cl.species_frequency:
filtered_groups.append(cl)
# Update maximum number of extra copies, if needed
if max(cl.species_frequency.values()) > self.max_extra_copy:
self.max_extra_copy = max(cl.species_frequency.values())
# Update species_list
self.species_list = sorted(list(set(self.species_list) -
set(taxa_list)))
self.filtered_groups = self.groups = filtered_groups | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def basic_group_statistics(self):
"""
This method creates a basic table in list format containing basic
information of the groups file (total number of clusters, total number
of sequences, number of clusters below the gene threshold, number of
clusters below the species threshold and number of clusters below the
gene AND species threshold)
:return: List containing number of
[total clusters,
total sequences,
clusters above gene threshold,
clusters above species threshold,
clusters above gene and species threshold]
"""
# Total number of clusters
total_cluster_num = len(self.groups)
# Total number of sequenes
total_sequence_num = self.total_seqs
# Gene compliant clusters
clusters_gene_threshold = self.num_gene_compliant
# Species compliant clusters
clusters_species_threshold = self.num_species_compliant
clusters_all_threshold = len(self.filtered_groups)
statistics = [total_cluster_num, total_sequence_num,
clusters_gene_threshold, clusters_species_threshold,
clusters_all_threshold]
return statistics | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def export_filtered_group(self, output_file_name="filtered_groups",
dest="./", get_stats=False,
shared_namespace=None):
"""
Export the filtered groups into a new file.
:param output_file_name: string, name of the filtered groups file
:param dest: string, path to directory where the filtered groups file
will be created
:param get_stats: Boolean, whether to return the basic count stats or
not
:param shared_namespace: Namespace object, for communicating with
main process.
"""
if self.filtered_groups:
if shared_namespace:
shared_namespace.act = "Exporting filtered orthologs"
output_handle = open(os.path.join(dest, output_file_name), "w")
if get_stats:
all_orthologs = len(self.groups)
sp_compliant = 0
gene_compliant = 0
final_orthologs = 0
for cluster in self.filtered_groups:
if shared_namespace:
shared_namespace.progress = \
self.filtered_groups.index(cluster)
if cluster.species_compliant and cluster.gene_compliant:
output_handle.write("%s: %s\n" % (
cluster.name, " ".join(cluster.iter_sequences)))
if get_stats:
final_orthologs += 1
if get_stats:
if cluster.species_compliant:
sp_compliant += 1
if cluster.gene_compliant:
gene_compliant += 1
output_handle.close()
if get_stats:
return all_orthologs, sp_compliant, gene_compliant,\
final_orthologs
else:
raise OrthoGroupException("The groups object must be filtered "
"before using the export_filtered_group"
"method") | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def update_filtered_group(self):
"""
This method creates a new filtered group variable, like
export_filtered_group, but instead of writing into a new file, it
replaces the self.filtered_groups variable
"""
self.filtered_groups = []
# Reset gene and species compliant counters
self.num_gene_compliant = 0
self.num_species_compliant = 0
for cluster in self.groups:
cluster.apply_filter(self.gene_threshold, self.species_threshold)
if cluster.species_compliant and cluster.gene_compliant:
self.filtered_groups.append(cluster)
# Update num_species_compliant attribute
if cluster.species_compliant:
self.num_species_compliant += 1
# Update num_gene_compliant attribute
if cluster.gene_compliant:
self.num_gene_compliant += 1 | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def bar_species_distribution(self, dest="./", filt=False, ns=None,
output_file_name="Species_distribution"):
"""
Creates a bar plot with the distribution of species numbers across
clusters
:param dest: string, destination directory
:param filt: Boolean, whether or not to use the filtered groups.
:param output_file_name: string, name of the output file
"""
data = []
# Determine which groups to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
for i in groups:
if ns:
if ns.stop:
raise KillByUser("")
data.append(len([x for x, y in i.species_frequency.items()
if y > 0]))
# Transform data into histogram-like
transform_data = Counter(data)
x_labels = [x for x in list(transform_data)]
y_vals = list(transform_data.values())
# Sort lists
x_labels, y_vals = (list(x) for x in zip(*sorted(zip(x_labels,
y_vals))))
# Convert label to strings
x_labels = [str(x) for x in x_labels]
if ns:
if ns.stop:
raise KillByUser("")
# Create plot
b_plt, lgd, _ = bar_plot([y_vals], x_labels,
title="Taxa frequency distribution",
ax_names=["Number of taxa", "Ortholog frequency"])
b_plt.savefig(os.path.join(dest, output_file_name), bbox_inches="tight",
dpi=400)
# Create table
table_list = [["Number of species", "Ortholog frequency"]]
for x, y in zip(x_labels, y_vals):
table_list.append([x, y])
return b_plt, lgd, table_list | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def bar_species_coverage(self, dest="./", filt=False, ns=None,
output_file_name="Species_coverage"):
"""
Creates a stacked bar plot with the proportion of
:return:
"""
# Determine which groups to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
data = Counter(dict((x, 0) for x in self.species_list))
for cl in groups:
if ns:
if ns.stop:
raise KillByUser("")
data += Counter(dict((x, 1) for x, y in cl.species_frequency.items()
if y > 0))
xlabels = [str(x) for x in list(data.keys())]
data = [list(data.values()), [len(groups) - x for x in
data.values()]]
lgd_list = ["Available data", "Missing data"]
if ns:
if ns.stop:
raise KillByUser("")
b_plt, lgd, _ = bar_plot(data, xlabels, lgd_list=lgd_list,
ax_names=[None, "Ortholog frequency"])
b_plt.savefig(os.path.join(dest, output_file_name), bbox_inches="tight",
dpi=200)
return b_plt, lgd, "" | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def __init__(self, groups_files=None, gene_threshold=None,
species_threshold=None, project_prefix="MyGroups"):
"""
:param groups_files: A list containing the file names of the multiple
group files
:return: Populates the self.multiple_groups attribute
"""
# If a MultiGroups is initialized with duplicate Group objects, these
# will be stored in a list. If all Group objects are unique, the list
# will remain empty
self.duplicate_groups = []
# Initializing thresholds. These may be set from the start, or using
# some method that uses them as arguments
self.gene_threshold = gene_threshold
self.species_threshold = species_threshold
self.prefix = project_prefix
self.multiple_groups = {}
self.filters = {}
if groups_files:
for group_file in groups_files:
# If group_file is already a Group object, just add it
if not isinstance(group_file, Group):
# Check for duplicate group files
group_object = Group(group_file, self.gene_threshold,
self.species_threshold)
else:
group_object = group_file
if group_object.name in self.multiple_groups:
self.duplicate_groups.append(group_object.name)
else:
self.multiple_groups[group_object.name] = group_object
self.filters[group_object.name] = (1,
len(group_object.species_list)) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def iter_gnames(self):
return (x.name for x in self.multiple_groups) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def add_group(self, group_obj):
"""
Adds a group object
:param group_obj: Group object
"""
# Check for duplicate groups
if group_obj.name in self.multiple_groups:
self.duplicate_groups.append(group_obj.name)
else:
self.multiple_groups[group_obj.name] = group_obj | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def get_group(self, group_id):
"""
Returns a group object based on its name. If the name does not match
any group object, returns None
:param group_id: string. Name of group object
"""
try:
return self.multiple_groups[group_id]
except KeyError:
return | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def update_filters(self, gn_filter, sp_filter, group_names=None,
default=False):
"""
This will not change the Group object themselves, only the filter
mapping. The filter is only applied when the Group object is retrieved
to reduce computations
:param gn_filter: int, filter for max gene copies
:param sp_filter: int, filter for min species
:param group_names: list, with names of group objects
"""
if group_names:
for group_name in group_names:
# Get group object
group_obj = self.multiple_groups[group_name]
# Define filters
gn_filter = gn_filter if not default else 1
sp_filter = sp_filter if not default else \
len(group_obj.species_list)
# Update Group object with new filters
group_obj.update_filters(gn_filter, sp_filter)
# Update filter map
self.filters[group_name] = (gn_filter, sp_filter)
for group_name, group_obj in self.multiple_groups.items():
# Define filters
gn_filter = gn_filter if not default else 1
sp_filter = sp_filter if not default else \
len(group_obj.species_list)
# Update Group object with new filters
group_obj.update_filters(gn_filter, sp_filter)
# Update filter map
self.filters[group_name] = (gn_filter, sp_filter) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def bar_orthologs(self, output_file_name="Final_orthologs",
dest="./", stats="total"):
"""
Creates a bar plot with the final ortholog values for each group file
:param output_file_name: string. Name of output file
:param dest: string. output directory
:param stats: string. The statistics that should be used to generate
the bar plot. Options are:
..: "1": Total orthologs
..: "2": Species compliant orthologs
..: "3": Gene compliant orthologs
..: "4": Final orthologs
..: "all": All of the above
Multiple combinations can be provided, for instance: "123" will
display bars for total, species compliant and gene compliant stats
"""
# Stores the x-axis labels
x_labels = []
# Stores final ortholog values for all 4 possible data sets
vals = [[], [], [], []]
lgd = ["Total orthologs", "After species filter", "After gene filter",
"Final orthologs"]
# Get final ortholog values
for g_obj in self.multiple_groups:
x_labels.append(g_obj.name.split(os.sep)[-1])
# Populate total orthologs
if "1" in stats or stats == "all":
vals[0].append(len(g_obj.groups))
# Populate species compliant orthologs
if "2" in stats or stats == "all":
vals[1].append(g_obj.num_species_compliant)
# Populate gene compliant orthologs
if "3" in stats or stats == "all":
vals[2].append(g_obj.num_gene_compliant)
# Populate final orthologs
if "4" in stats or stats == "all":
vals[3].append(len(g_obj.filtered_groups))
# Filter valid data sets
lgd_list = [x for x in lgd if vals[lgd.index(x)]]
vals = [l for l in vals if l]
# Create plot
b_plt, lgd = multi_bar_plot(vals, x_labels, lgd_list=lgd_list)
b_plt.savefig(os.path.join(dest, output_file_name),
bbox_extra_artists=(lgd,), bbox_inches="tight")
# Create table list object
table_list = []
# Create header
table_list.append([""] + x_labels)
# Create content
for i in range(len(vals)):
table_list += [x for x in [[lgd_list[i]] + vals[i]]]
return b_plt, lgd, table_list | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def parse_groups(group_obj):
"""
Returns a list with the sorted ortholog clusters
"""
storage = []
for cluster in group_obj.groups:
storage.append(set(cluster.iter_sequences))
return storage | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def __init__(self, db_path, groups=None, gene_threshold=None,
species_threshold=None, project_prefix="MyGroups",
ns=None):
"""
:param groups: A list containing the file names of the multiple
group files
:return: Populates the self.multiple_groups attribute
"""
self.db_path = db_path
# If a MultiGroups is initialized with duplicate Group objects, their
# names will be stored in a list. If all Group objects are unique, the
# list will remain empty
self.duplicate_groups = []
self.groups = {}
self.groups_stats = {}
# Attribute that will store the paths of badly formated group files
self.bad_groups = []
# Initializing thresholds. These may be set from the start, or using
# some method that uses them as arguments
self.gene_threshold = gene_threshold
self.species_threshold = species_threshold
# Initializing mapping of group filters to their names. Should be
# something like {"groupA": (1, 10)}
self.filters = {}
self.taxa_list = {}
self.excluded_taxa = {}
# This attribute will contain a dictionary with the maximum extra copies
# for each group object
self.max_extra_copy = {}
# This attribute will contain a list with the number of species for
# each group object, excluding replicates. If a MultiGroupLight object
# contains Group objects with different taxa numbers, this attribute
# can be used to issue a warning
self.species_number = []
self.prefix = project_prefix
if ns:
ns.files = len(groups)
if groups:
for group_file in groups:
# If group_file is already a Group object, just add it
if not isinstance(group_file, GroupLight):
try:
if ns:
if ns.stop:
raise KillByUser("")
ns.counter += 1
group_object = GroupLight(group_file,
self.gene_threshold,
self.species_threshold,
ns=ns)
except Exception as e:
print(e.message)
self.bad_groups.append(group_file)
continue
else:
group_object = group_file
# Check for duplicate group files
if group_object.name in self.groups:
self.duplicate_groups.append(group_file.name)
else:
self.add_group(group_object) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def clear_groups(self):
"""
Clears the current MultiGroupsLight object
"""
for f in self.groups.values():
os.remove(f)
self.duplicate_groups = []
self.groups = {}
self.groups_stats = {}
self.filters = {}
self.max_extra_copy = {}
self.species_number = []
self.gene_threshold = self.species_threshold = 0 | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def remove_group(self, group_id):
"""
Removes a group object according to its name
:param group_id: string, name matching a Group object name attribute
"""
if group_id in self.groups:
os.remove(self.groups[group_id])
del self.groups[group_id] | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def add_multigroups(self, multigroup_obj):
"""
Merges a MultiGroup object
:param multigroup_obj: MultiGroup object
"""
for _, group_obj in multigroup_obj:
self.add_group(group_obj) | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def get_multigroup_statistics(self, group_obj):
"""
:return:
"""
stats = group_obj.basic_group_statistics()
self.groups_stats[group_obj.name] = {"stats": stats,
"species": group_obj.species_list,
"max_copies": group_obj.max_extra_copy} | ODiogoSilva/TriFusion | [
83,
23,
83,
21,
1392138811
] |
def __init__(self, sheet_width, sheet_height, columns, rows, label_width, label_height, **kwargs):
"""
Required parameters
-------------------
sheet_width, sheet_height: positive dimension
The size of the sheet.
columns, rows: positive integer
The number of labels on the sheet.
label_width, label_size: positive dimension
The size of each label.
Margins and gaps
----------------
left_margin: positive dimension
The gap between the left edge of the sheet and the first column.
column_gap: positive dimension
The internal gap between columns.
right_margin: positive dimension
The gap between the right edge of the sheet and the last column.
top_margin: positive dimension
The gap between the top edge of the sheet and the first row.
row_gap: positive dimension
The internal gap between rows.
bottom_margin: positive dimension
The gap between the bottom edge of the sheet and the last row.
Padding
-------
left_padding, right_padding, top_padding, bottom_padding: positive dimensions, default 0
The padding between the edges of the label and the area available
to draw on.
Corners
---------------------
corner_radius: positive dimension, default 0
Gives the labels rounded corners with the given radius.
padding_radius: positive dimension, default 0
Give the drawing area rounded corners. If there is no padding, this
must be set to zero.
Background
----------
background_image: reportlab.graphics.shape.Image
An image to use as the background to the page. This will be
automatically sized to fit the page; make sure it has the correct
aspect ratio.
background_filename: string
Filename of an image to use as a background to the page. If both
this and background_image are given, then background_image will
take precedence.
Raises
------
InvalidDimension
If any given dimension is invalid (i.e., the labels cannot fit on
the sheet).
"""
# Compulsory arguments.
self._sheet_width = Decimal(sheet_width)
self._sheet_height = Decimal(sheet_height)
self._columns = int(columns)
self._rows = int(rows)
self._label_width = Decimal(label_width)
self._label_height = Decimal(label_height)
# Optional arguments; missing ones will be computed later.
self._left_margin = kwargs.pop('left_margin', None)
self._column_gap = kwargs.pop('column_gap', None)
self._right_margin = kwargs.pop('right_margin', None)
self._top_margin = kwargs.pop('top_margin', None)
self._row_gap = kwargs.pop('row_gap', None)
self._bottom_margin = kwargs.pop('bottom_margin', None)
# Optional arguments with default values.
self._left_padding = kwargs.pop('left_padding', 0)
self._right_padding = kwargs.pop('right_padding', 0)
self._top_padding = kwargs.pop('top_padding', 0)
self._bottom_padding = kwargs.pop('bottom_padding', 0)
self._corner_radius = Decimal(kwargs.pop('corner_radius', 0))
self._padding_radius = Decimal(kwargs.pop('padding_radius', 0))
self._background_image = kwargs.pop('background_image', None)
self._background_filename = kwargs.pop('background_filename', None)
# Leftover arguments.
if kwargs:
args = kwargs.keys()
if len(args) == 1:
raise TypeError("Unknown keyword argument {}.".format(args[0]))
else:
raise TypeError("Unknown keyword arguments: {}.".format(', '.join(args)))
# Track which attributes have been automatically set.
self._autoset = set()
# Check all the dimensions etc are valid.
self._calculate() | bcbnz/pylabels | [
87,
40,
87,
11,
1352002192
] |
def bounding_boxes(self, mode='fraction', output='dict'):
"""Get the bounding boxes of the labels on a page.
Parameters
----------
mode: 'fraction', 'actual'
If 'fraction', the bounding boxes are expressed as a fraction of the
height and width of the sheet. If 'actual', they are the actual
position of the labels in millimetres from the top-left of the
sheet.
output: 'dict', 'json'
If 'dict', a dictionary with label identifier tuples (row, column)
as keys and a dictionary with 'left', 'right', 'top', and 'bottom'
entries as the values.
If 'json', a JSON encoded string which represents a dictionary with
keys of the string format 'rowxcolumn' and each value being a
bounding box dictionary with 'left', 'right', 'top', and 'bottom'
entries.
Returns
-------
The bounding boxes in the format set by the output parameter.
"""
boxes = {}
# Check the parameters.
if mode not in ('fraction', 'actual'):
raise ValueError("Unknown mode {0}.".format(mode))
if output not in ('dict', 'json'):
raise ValueError("Unknown output {0}.".format(output))
# Iterate over the rows.
for row in range(1, self.rows + 1):
# Top and bottom of all labels in the row.
top = self.top_margin + ((row - 1) * (self.label_height + self.row_gap))
bottom = top + self.label_height
# Now iterate over all columns in this row.
for column in range(1, self.columns + 1):
# Left and right position of this column.
left = self.left_margin + ((column - 1) * (self.label_width + self.column_gap))
right = left + self.label_width
# Output in the appropriate mode format.
if mode == 'fraction':
box = {
'top': top / self.sheet_height,
'bottom': bottom / self.sheet_height,
'left': left / self.sheet_width,
'right': right / self.sheet_width,
}
elif mode == 'actual':
box = {'top': top, 'bottom': bottom, 'left': left, 'right': right}
# Add to the collection.
if output == 'json':
boxes['{0:d}x{1:d}'.format(row, column)] = box
box['top'] = float(box['top'])
box['bottom'] = float(box['bottom'])
box['left'] = float(box['left'])
box['right'] = float(box['right'])
else:
boxes[(row, column)] = box
# Done.
if output == 'json':
return json.dumps(boxes)
return boxes | bcbnz/pylabels | [
87,
40,
87,
11,
1352002192
] |
def create_accessor(attr, deletable=False):
# Getter is simple; no processing needed.
@property
def accessor(self):
return getattr(self, attr)
# Setter is more complicated.
@accessor.setter
def accessor(self, value):
# Store the original value in case we need to reset.
original = getattr(self, attr)
# If this was originally autoset or not.
was_autoset = attr in self._autoset
# Discard this attribute from the autoset list.
self._autoset.discard(attr)
# Set the value and see if it is valid.
setattr(self, attr, value)
try:
self._calculate()
except:
# Reset to the original state.
setattr(self, attr, original)
if was_autoset:
self._autoset.add(attr)
# Let the error propogate up.
raise
# Create a deleter if allowable.
if deletable:
@accessor.deleter
def accessor(self):
self._autoset.add(attr)
setattr(self, attr, None)
self._calculate()
# And we now have our accessor.
return accessor | bcbnz/pylabels | [
87,
40,
87,
11,
1352002192
] |
def __init__(self, x, y, vx, vy, word):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.word = word
self.size = max(100, 50 + len(word) * 20)
self.color = random.choice(BALLOON_COLORS) | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def __init__(self, lesson, activity):
GObject.GObject.__init__(self) | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def realize_cb(self, widget):
self.activity.add_events(Gdk.EventMask.KEY_PRESS_MASK)
self.key_press_cb_id = self.activity.connect('key-press-event', self.key_cb)
# Clear the mouse cursor.
#pixmap = Gdk.Pixmap(widget.window, 10, 10)
#color = Gdk.Color()
#cursor = Gdk.Cursor.new(pixmap, pixmap, color, color, 5, 5)
#widget.window.set_cursor(cursor) | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def unrealize_cb(self, widget):
self.activity.disconnect(self.key_press_cb_id) | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def stop_cb(self, widget):
# Stop the animation loop.
if self.update_timer:
try:
GObject.source_remove(self.update_timer)
except:
pass # Try remove instance, if not found, just pass | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def key_cb(self, widget, event):
# Ignore hotkeys.
if event.get_state() & (Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.MOD1_MASK):
return False
# Extract information about the key pressed.
key = Gdk.keyval_to_unicode(event.keyval)
if key != 0: key = chr(key)
if self.finished:
key_name = Gdk.keyval_name(event.keyval)
if key_name == 'Return':
self.activity.pop_screen()
# Show the new medal if there was one.
if self.medal:
self.activity.push_screen(medalscreen.MedalScreen(self.medal, self.activity))
else:
for b in self.balloons:
if b.word[0] == key:
b.word = b.word[1:]
self.add_score(1)
# Pop the balloon if it's been typed.
if len(b.word) == 0:
self.balloons.remove(b)
self.add_score(100)
self.queue_draw_balloon(b)
break | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def update_balloon(self, b):
b.x += b.vx
b.y += b.vy
if b.x < 100 or b.x >= self.bounds.width - 100:
b.vx = -b.vx
if b.y < -100:
self.balloons.remove(b)
self.queue_draw_balloon(b) | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def tick(self):
if self.finished:
return False
self.bounds = self.area.get_allocation() | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def draw_results(self, cr):
# Draw background.
w = self.bounds.width - 400
h = self.bounds.height - 200
x = self.bounds.width/2 - w/2
y = self.bounds.height/2 - h/2
cr.set_source_rgb(0.762, 0.762, 0.762)
cr.rectangle(x, y, w, h)
cr.fill()
cr.set_source_rgb(0, 0, 0)
cr.rectangle(x, y, w, h)
cr.stroke()
# Draw text
title = _('You finished!') + '\n'
cr.set_source_rgb(0, 0, 0)
pango_layout = PangoCairo.create_layout(cr)
fd = Pango.FontDescription('Serif Bold')
fd.set_size(16 * Pango.SCALE)
pango_layout.set_font_description(fd)
pango_layout.set_text(title,
len(title))
size = pango_layout.get_size()
tx = x + (w / 2) - (size[0] / Pango.SCALE) / 2
ty = y + 100
cr.move_to(tx, ty)
PangoCairo.update_layout(cr, pango_layout)
PangoCairo.show_layout(cr, pango_layout)
report = ''
report += _('Your score was %(score)d.') % { 'score': self.score } + '\n'
if self.medal:
report += _('You earned a %(type)s medal!') % self.medal + '\n'
report += '\n'
report += _('Press the ENTER key to continue.') | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def finish_game(self):
self.finished = True
# Add to the lesson history.
report = {
'lesson': self.lesson['name'],
'score': self.score,
}
self.activity.add_history(report)
# Show the medal screen, if one should be given.
got_medal = None | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def queue_draw_balloon(self, b):
x = int(b.x - b.size/2) - 5
y = int(b.y - b.size/2) - 5
w = int(b.size + 100)
h = int(b.size*1.5 + 10)
self.area.queue_draw_area(x, y, w, h) | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def add_score(self, num):
self.score += num
self.queue_draw_score() | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def draw_score(self, cr):
cr.set_source_rgb(0, 0, 0)
pango_layout = PangoCairo.create_layout(cr)
fd = Pango.FontDescription('Times')
fd.set_size(14 * Pango.SCALE)
pango_layout.set_font_description(fd)
text = _('SCORE: %d') % self.score
pango_layout.set_text(text, len(text))
size = pango_layout.get_size()
x = self.bounds.width - 20 - size[0] / Pango.SCALE
y = 20
cr.move_to(x, y)
PangoCairo.update_layout(cr, pango_layout)
PangoCairo.show_layout(cr, pango_layout) | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
def draw(self, cr):
self.bounds = self.area.get_allocation()
# Draw background.
cr.set_source_rgb(0.915, 0.915, 1)
cr.rectangle(0, 0, self.bounds.width, self.bounds.height)
cr.fill()
# Draw the balloons.
for b in self.balloons:
self.draw_balloon(cr, b)
if self.finished:
self.draw_results(cr)
else:
self.draw_instructions(cr)
self.draw_score(cr) | godiard/typing-turtle-activity | [
4,
8,
4,
4,
1419121319
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.