code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
'''
Almost exact same problem as leetcode 54 (except much easier)
'''
def v1(n):
matrix = [[0]*n for _ in range(n)]
left, right = (0, n-1)
up, down = (0, n-1)
vert, horiz = (0, 1)
row, col = (0, 0)
for i in range(1, (n**2)+1):
matrix[row][col] = i
row += vert
col += horiz
if horiz == 1 and col == right:
up += 1
vert, horiz = (1, 0)
elif vert == 1 and row == down:
right -= 1
vert, horiz = (0, -1)
elif horiz == -1 and col == left:
down -= 1
vert, horiz = (-1, 0)
elif vert == -1 and row == up:
left += 1
vert, horiz = (0, 1)
return matrix
def main():
import testSuite
tests = [
([0], []),
([1], [[1]]),
([2], [[1,2], [4,3]]),
([3], [[1,2,3], [8,9,4], [7,6,5]]),
([4], [[1,2,3,4], [12,13,14,5],[11,16,15,6],[10,9,8,7]])
]
t = testSuite.init(tests)
t.test(v1)
main()
| myriasofo/CLRS_exercises | algos/leetcode/59_spiralMatrixV2.py | Python | mit | 1,028 |
#!/usr/bin/env python3
from PIL import Image, ImageTk
import tkinter
from tkinter import ttk
from tkinter import Button, Label, Menu
from tkinter.filedialog import askopenfilename as file_selector
from tkinter.filedialog import asksaveasfilename as file_saver
from tkinter.filedialog import askdirectory
from tkinter.messagebox import showerror, askyesno
import yaml
from FontRuntime import TkFont
from PyFont import Font
MAIN_COLOR = '#FFFFFF'
class MyLabel:
def __init__(self, master, text=""):
self._textvar = tkinter.StringVar()
self._textvar.set(text)
self._label = Label(master, textvariable=self._textvar)
self._label.config(background=MAIN_COLOR, font=('times', '16'))
def set_text(self, text):
self._textvar.set(text)
def place(self, **kwargs):
self._label.place(**kwargs)
class PyFontMenu:
"""Main class for the Tkinter GUI"""
def __init__(self, master):
"""Master must be a Tk() object"""
self.master = master
# self.master.iconbitmap(default="icon.ico")
self.master.title("Malbonaĵo Skribo")
self.menu = Menu(master)
self.menu.configure(background=MAIN_COLOR)
self.master.config(menu=self.menu)
self.infile = None
self.outfile = None
self.conf_file = None
self.db = None
self.infile_label = MyLabel(self.master, "")
self.infile_label.place(rely=1.0, relx=0.0, x=0, y=0, anchor=tkinter.SW)
self.outfile_label = MyLabel(self.master, "")
self.file_menu = Menu(self.menu)
self.file_menu.configure(background=MAIN_COLOR)
self.file_menu.add_command(label='Convert file',
command=self.convert_file)
self.file_menu.add_command(label='Export SVG', command=self._export)
self.file_menu.add_command(label='Export PNG', command=self._export_png)
self.file_menu.add_command(label='Select database',
command=self.select_db)
self.menu.add_cascade(label='File', menu=self.file_menu)
self.option_menu = Menu(self.menu)
self.option_menu.add_command(label='Configure', command=self._open_options)
self.option_menu.add_command(label='Select configuration file',
command=self.select_conf_file)
self.menu.add_cascade(label='Options', menu=self.option_menu)
def select_infile(self):
self.infile = file_selector(initialdir='.')
self.infile_label.set_text('File to convert: ' + self.infile)
def select_conf_file(self):
self.conf_file = file_selector(initialdir='.')
def select_outfile(self):
self.outfile = file_saver(initialdir='.', defaultextension='.svg')
def convert_file(self):
if not self.infile:
self.select_infile()
with open (self.infile, 'r') as f:
txt = f.read()
if not self.outfile:
self.outfile = file_saver(initialdir='.', defaultextension='.svg')
# svg = Font().generate_svg(txt)
svg = Font().generate_loadable_svg(txt, self.master)
svg.export(self.outfile)
self.infile_label.set_text('File exported')
def select_db(self):
self.db = askdirectory(initialdir='.')
def _export(self):
self.outfile = file_saver(initialdir='.', defaultextension='.svg')
self.tkfont.export()
def _export_png(self):
self.outfile = file_saver(initialdir='.', defaultextension='.png')
svg = self.tkfont.get_svg()
if (svg):
svg.export_png(self.outfile)
self.infile_label.set_text("Text Exported")
def _open_options(self):
if not self.conf_file:
self.select_conf_file()
toto = GuiSvgConfig(self.master, self.conf_file)
def the_end(self, svg):
ret = askyesno("Save", "Save your text ?")
if ret:
if not self.outfile:
self.select_outfile()
svg.export(self.outfile)
self.infile_label.set_text("Text Exported")
def main():
root = tkinter.Tk()
root.geometry('1280x720')
root.configure(background=MAIN_COLOR)
gui = PyFontMenu(root)
tkfont = TkFont(root, Font(), gui)
gui.tkfont = tkfont
root.bind("<KeyPress>", lambda event: tkfont.handle_char(event.char))
root.bind("<KeyPress-Escape>", lambda event: tkfont.theend())
root.mainloop()
print()
if __name__ == '__main__':
main()
| chichaj/PyFont | PyFont/tkmenu.py | Python | mit | 4,510 |
from django.db import models
from django_extensions.db.fields import AutoSlugField
from core.models import Country
class Constituency(models.Model):
constituency_id = models.CharField(max_length=10, primary_key=True)
name = models.CharField(max_length=765)
country_name = models.CharField(max_length=255)
alternative_name = models.CharField(max_length=765, blank=True)
retired = models.IntegerField(null=True, blank=True)
slug = AutoSlugField( populate_from='name', max_length=255,separator=u'_')
count = models.IntegerField(null=True)
# Not used anywhere
wikipedia_url = models.CharField(max_length=765, blank=True)
url_id = models.CharField(max_length=300, blank=True)
guardian_aristotle_id = models.IntegerField(null=True, blank=True)
guardian_pa_code = models.IntegerField(null=True, blank=True)
class Meta:
verbose_name_plural = 'Constituencies'
def __unicode__(self):
return self.name
| electionleaflets/electionleaflets | electionleaflets/apps/constituencies/models.py | Python | mit | 971 |
# https://en.wikipedia.org/wiki/Exponentiation_by_squaring
def pow(a, b, mod):
res = 1
while b > 0:
if b & 1 != 0:
res = res * a % mod
a = a * a % mod
b >>= 1
return res
def test():
print(1024 == pow(2, 10, 1000000007))
test()
| indy256/codelibrary | python/binary_exponentiation.py | Python | unlicense | 283 |
from bottle import route, run, static_file, request, post,response,template,hook
import bottle
bottle.TEMPLATE_PATH = ['osmquadtreeutils/static']
import rendertiles, rendersplit
import sys,os
import urllib,json
import postds as ps
import argparse
smalls=[]
def find_small(v):
for i,s in enumerate(smalls):
if v==s:
return i
i=len(smalls)
smalls.append(v)
return i
@route('/tile/<layer>/<zs>/<xs>/<ys>.png')
def tile(layer,zs,xs,ys):
global mp
global smalls
response.content_type = "image/png"
z,x,y = map(int,(zs,xs,ys))
if not (z,x,y) in tiles[layer]:
mpp = mp[layer]
if type(mpp)==tuple:
for ka,kb,v in rendersplit.render_tile_split(mpp[0],mpp[1],z,x,y):
if len(v)<1000:
v = find_small(v)
tiles['split_'+ka][kb] = v
else:
for k,v in rendertiles.render_tile(mpp,z,x,y):
if len(v)<1000:
v = find_small(v)
tiles[layer][k]=v
tt=tiles[layer][z,x,y]
if type(tt)==int:
return smalls[tt]
return tt
@route('/tilespec.geojson')
def tilespec():
response.content_type='application/json'
return urllib.urlopen('http://localhost:17831/tiles').read()
roadsquery = """(
SELECT * FROM (
(SELECT osm_id,quadtree,name,ref,way,feature,horse,foot,bicycle,tracktype,access,construction,service,oneway,link,layernotnull,prio FROM
(SELECT osm_id,quadtree,name,ref,way, ('highway_' || (CASE WHEN substr(highway, length(highway)-3, 4) = 'link' THEN substr(highway, 0, length(highway)-4) ELSE highway END)) AS feature, horse, foot, bicycle, tracktype, CASE WHEN access IN ('destination') THEN 'destination'::text WHEN access IN ('no', 'private') THEN 'no'::text ELSE NULL END AS access, construction, CASE WHEN service IN ('parking_aisle', 'drive-through', 'driveway') THEN 'INT-minor'::text ELSE 'INT-normal'::text END AS service, CASE WHEN oneway IN ('yes', '-1') THEN oneway ELSE NULL END AS oneway, CASE WHEN substr(highway, length(highway)-3, 4) = 'link' THEN 'yes'::text ELSE 'no'::text END AS link, makeinteger(layer) AS layernotnull FROM planet_osm_line )
as a JOIN (VALUES ('highway_motorway','no',380), ('highway_trunk','no',370), ('highway_primary','no',360), ('highway_secondary','no',350), ('highway_tertiary','no',340), ('highway_residential','no',330), ('highway_unclassified','no',330), ('highway_road','no',330), ('highway_living_street','no',320), ('highway_pedestrian','no',310), ('highway_raceway','no',300), ('highway_motorway','yes',240), ('highway_trunk','yes',230), ('highway_primary','yes',220), ('highway_secondary','yes',210), ('highway_tertiary','yes',200), ('highway_service','no',150), ('highway_track','no',110), ('highway_path','no',100), ('highway_footway','no',100), ('highway_bridleway','no',100), ('highway_cycleway','no',100), ('highway_steps','no',100), ('highway_platform','no',90), ('highway_proposed','no',20), ('highway_construction','no',10) ) AS ordertable (feature, link, prio)
USING (feature,link) )
UNION ALL
(SELECT osm_id,quadtree,name,ref,way, feature,horse,foot,bicycle,tracktype,access,construction,service,oneway,link,layernotnull,prio FROM
(SELECT osm_id,quadtree,name, ref, way, COALESCE(('railway_' ||(CASE WHEN railway = 'preserved' AND service IN ('spur', 'siding', 'yard') THEN 'INT-preserved-ssy'::text WHEN (railway = 'rail' AND service IN ('spur', 'siding', 'yard')) THEN 'INT-spur-siding-yard' ELSE railway end)), ('aeroway_' || aeroway)) AS feature, horse, foot, bicycle, tracktype, CASE WHEN access IN ('destination') THEN 'destination'::text WHEN access IN ('no', 'private') THEN 'no'::text ELSE NULL END AS access, construction, CASE WHEN service IN ('parking_aisle', 'drive-through', 'driveway') THEN 'INT-minor'::text ELSE 'INT-normal'::text END AS service, NULL::text AS oneway, 'no'::text AS link, makeinteger(layer) AS layernotnull FROM planet_osm_line )
as c JOIN (VALUES ('railway_rail', 430), ('railway_spur', 430), ('railway_siding', 430),('railway_INT-spur-siding-yard',430), ('railway_subway', 420), ('railway_narrow_gauge', 420), ('railway_light_rail', 420), ('railway_preserved', 420), ('railway_funicular', 420), ('railway_monorail', 420), ('railway_miniature', 420), ('railway_turntable', 420), ('railway_tram', 410), ('railway_disused', 400), ('railway_construction', 400), ('aeroway_runway', 60), ('aeroway_taxiway', 50), ('railway_platform', 90) ) AS ordertable (feature, prio)
USING (feature) )
) as features ORDER BY prio )
AS roads_casing"""
@hook('after_request')
def enable_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
@route('/roads/<z>/<x>/<y>.json')
def getRoads(z,x,y):
response.content_type='application/json'
jj=json.loads(ps.getTile(-1,roadsquery, tup=(x,y,z,)))
jj.pop('properties')
return jj
@route('/')
def fetchindex():
global idxfn
print request.query.items()
lat,lon = '51.39','0.09'
if 'lat' in request.query:
lat=request.query['lat']
if 'lon' in request.query:
lon=request.query['lon']
print "fetchindex",lat,lon
response.content_type = "text/html"
global mp
hasOrig = 'orig' in mp
hasSplit= 'split_BASE' in mp
#return idx % (lat,lon)
return template('index',ln=lon,lt=lat,hasOrig=hasOrig,hasSplit=hasSplit)
#return static_file("index.html",root=staticloc)
#@route('/setloc')
#def setloc():
# return {'x': 0.09, 'y': 51.395, 'z': 15}
@route('/images/<filename>')
def server_js(filename):
global staticloc
return static_file(filename, root=staticloc)
@route('/<filename>')
def server_js(filename):
global staticloc
return static_file(filename, root=staticloc)
if __name__ == "__main__":
mml='project-oqt.mml'
parser = argparse.ArgumentParser(description="setup demo mapnik tileserver")
parser.add_argument("-s","--mapstyle",help="map style (mml) file",default=mml)
parser.add_argument("-o","--origstyle",help="orig style (mml) file",default="")
parser.add_argument("-t", "--origtableprefix",help="orig table prefix",default="")
parser.add_argument("-p", "--split", action="store_true",help="split map style file",default=False)
parser.add_argument("-l", "--staticloc",help="split map style file",default="osmquadtreeutils/static/")
args = parser.parse_args()
orig = None
if args.mapstyle:
mml = args.mapstyle
if not os.path.exists(mml):
print "'%s' doesn not exist: specify -s for style file" % mml
sys.exit(1)
if args.origstyle:
orig = args.origstyle
else:
root,_ = os.path.split(mml)
orig = os.path.join(root, 'project.mml')
if not os.path.exists(orig):
orig=None
grps=None
if args.split:
fn=mml[:-4]+'-tabs.csv'
if not os.path.exists(fn):
print "split file %s does not exist" % fn
sys.exit(1)
grps=rendersplit.load_groups(fn)
print "map style: %s" % mml
print "orig style: %s" % (orig if orig else 'MISSING')
print "groups? %s" % ('yes' if grps else 'no')
print "orig tabpp: %s" % (args.origtableprefix if args.origtableprefix else 'MISSING')
mpa = rendertiles.make_mapnik(mml, avoidEdges=True)
mp = {'alt': mpa }
if orig and os.path.exists(orig):
if args.origtableprefix:
mp['orig'] = rendertiles.make_mapnik(orig,tabpp=args.origtableprefix)
else:
mp['orig'] = rendertiles.make_mapnik(orig)
if grps:
for k in set(a for a,b in grps):
mp['split_'+k] = (mpa, grps)
#tiles['split_'+k] = {}
tiles=dict((k, {}) for k in mp)
print tiles, mp
staticloc=args.staticloc
bottle.TEMPLATE_PATH = [staticloc]
#idx=open(staticloc+"/index.html").read()
idxfn = staticloc+'index.html'
run(host='0.0.0.0', port=8351, debug=True)
| jharris2268/osmquadtreeutils | osmquadtreeutils/__main__.py | Python | gpl-3.0 | 8,064 |
#!/usr/bin/python
"""
BACpypes Test
-------------
"""
import os
import sys
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob
from bacpypes.consolelogging import ArgumentParser
from bacpypes.pdu import Address
from bacpypes.comm import bind
from bacpypes.apdu import APDU, IAmRequest
from bacpypes.app import LocalDeviceObject, Application
from bacpypes.appservice import ApplicationServiceAccessPoint, StateMachineAccessPoint
from tests.state_machine import ServerStateMachine
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# defaults for testing
BACPYPES_TEST = ""
BACPYPES_TEST_OPTION = ""
# parsed test options
test_options = None
#
# TestApplication
#
@bacpypes_debugging
class TestApplication(Application):
def __init__(self, localDevice, localAddress, aseID=None):
if _debug: TestApplication._debug("__init__ %r %r aseID=%r", localDevice, localAddress, aseID)
Application.__init__(self, localDevice, localAddress, aseID)
#
# setUp
#
@bacpypes_debugging
def setUp(argv=None):
global test_options
# create an argument parser
parser = ArgumentParser(description=__doc__)
# add an option
parser.add_argument(
'--option', help="this is an option",
default=os.getenv("BACPYPES_TEST_OPTION") or BACPYPES_TEST_OPTION,
)
# get the debugging args and parse them
arg_str = os.getenv("BACPYPES_TEST") or BACPYPES_TEST
test_options = parser.parse_args(argv or arg_str.split())
if _debug: setUp._debug("setUp")
if _debug: setUp._debug(" - test_options: %r", test_options)
#
# tearDown
#
@bacpypes_debugging
def tearDown():
if _debug: tearDown._debug("tearDown")
#
# main
#
setUp(sys.argv[1:])
# make a device object
test_device = LocalDeviceObject(
objectName='test_device',
objectIdentifier=599,
maxApduLengthAccepted=1024,
segmentationSupported='segmentedBoth',
vendorIdentifier=15,
)
# make a test address
test_address = Address(1)
# create a client state machine, trapped server, and bind them together
test_application = TestApplication(test_device, test_address)
# include a application decoder
test_asap = ApplicationServiceAccessPoint()
# pass the device object to the state machine access point so it
# can know if it should support segmentation
test_smap = StateMachineAccessPoint(test_device)
# state machine
test_server = ServerStateMachine()
# bind everything together
bind(test_application, test_asap, test_smap, test_server)
# ==============================================================================
i_am_request = IAmRequest(
iAmDeviceIdentifier=('device', 100),
maxAPDULengthAccepted=1024,
segmentationSupported='segmentedBoth',
vendorID=15,
)
print("i_am_request")
i_am_request.debug_contents()
print("")
test_apdu = APDU()
i_am_request.encode(test_apdu)
print("test_apdu")
test_apdu.debug_contents()
print("")
print("modify test_apdu")
test_apdu.pduData = test_apdu.pduData[5:]
test_apdu.debug_contents()
print("")
# make a send transition from start to success
test_server.start_state.send(test_apdu).success()
# run the machine
print("running")
test_server.run()
print("")
# ==============================================================================
# check for success
assert not test_server.running
assert test_server.current_state.is_success_state
tearDown()
| JoelBender/bacpypes | sandbox/i_am_reject_test_x.py | Python | mit | 3,401 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import fileinput
import glob
import os
import platform
import psutil
import shutil
import signal
import sys
import telnetlib
import time
import urlparse
import urllib2
from distutils.spawn import find_executable
from mozdevice import DeviceManagerADB, DMError
from mozprocess import ProcessHandler
EMULATOR_HOME_DIR = os.path.join(os.path.expanduser('~'), '.mozbuild', 'android-device')
EMULATOR_AUTH_FILE = os.path.join(os.path.expanduser('~'), '.emulator_console_auth_token')
TOOLTOOL_URL = 'https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py'
TRY_URL = 'https://hg.mozilla.org/try/raw-file/default'
MANIFEST_PATH = 'testing/config/tooltool-manifests'
verbose_logging = False
class AvdInfo(object):
"""
Simple class to contain an AVD description.
"""
def __init__(self, description, name, tooltool_manifest, extra_args,
port):
self.description = description
self.name = name
self.tooltool_manifest = tooltool_manifest
self.extra_args = extra_args
self.port = port
"""
A dictionary to map an AVD type to a description of that type of AVD.
There is one entry for each type of AVD used in Mozilla automated tests
and the parameters for each reflect those used in mozharness.
"""
AVD_DICT = {
'4.3': AvdInfo('Android 4.3',
'mozemulator-4.3',
'testing/config/tooltool-manifests/androidarm_4_3/releng.manifest',
['-show-kernel', '-debug',
'init,console,gles,memcheck,adbserver,adbclient,adb,avd_config,socket'],
5554),
'6.0': AvdInfo('Android 6.0',
'mozemulator-6.0',
'testing/config/tooltool-manifests/androidarm_6_0/releng.manifest',
['-show-kernel', '-debug',
'init,console,gles,memcheck,adbserver,adbclient,adb,avd_config,socket'],
5554),
'x86': AvdInfo('Android 4.2 x86',
'mozemulator-x86',
'testing/config/tooltool-manifests/androidx86/releng.manifest',
['-debug',
'init,console,gles,memcheck,adbserver,adbclient,adb,avd_config,socket',
'-qemu', '-m', '1024', '-enable-kvm'],
5554)
}
def verify_android_device(build_obj, install=False, xre=False, debugger=False, verbose=False):
"""
Determine if any Android device is connected via adb.
If no device is found, prompt to start an emulator.
If a device is found or an emulator started and 'install' is
specified, also check whether Firefox is installed on the
device; if not, prompt to install Firefox.
If 'xre' is specified, also check with MOZ_HOST_BIN is set
to a valid xre/host-utils directory; if not, prompt to set
one up.
If 'debugger' is specified, also check that JimDB is installed;
if JimDB is not found, prompt to set up JimDB.
Returns True if the emulator was started or another device was
already connected.
"""
device_verified = False
emulator = AndroidEmulator('*', substs=build_obj.substs, verbose=verbose)
devices = emulator.dm.devices()
if (len(devices) > 0) and ('device' in [d[1] for d in devices]):
device_verified = True
elif emulator.is_available():
response = raw_input(
"No Android devices connected. Start an emulator? (Y/n) ").strip()
if response.lower().startswith('y') or response == '':
if not emulator.check_avd():
_log_info("Fetching AVD. This may take a while...")
emulator.update_avd()
_log_info("Starting emulator running %s..." %
emulator.get_avd_description())
emulator.start()
emulator.wait_for_start()
device_verified = True
if device_verified and install:
# Determine if Firefox is installed on the device; if not,
# prompt to install. This feature allows a test command to
# launch an emulator, install Firefox, and proceed with testing
# in one operation. It is also a basic safeguard against other
# cases where testing is requested but Firefox installation has
# been forgotten.
# If Firefox is installed, there is no way to determine whether
# the current build is installed, and certainly no way to
# determine if the installed build is the desired build.
# Installing every time is problematic because:
# - it prevents testing against other builds (downloaded apk)
# - installation may take a couple of minutes.
installed = emulator.dm.shellCheckOutput(['pm', 'list',
'packages', 'org.mozilla.'])
if 'fennec' not in installed and 'firefox' not in installed:
response = raw_input(
"It looks like Firefox is not installed on this device.\n"
"Install Firefox? (Y/n) ").strip()
if response.lower().startswith('y') or response == '':
_log_info("Installing Firefox. This may take a while...")
build_obj._run_make(directory=".", target='install',
ensure_exit_code=False)
if device_verified and xre:
# Check whether MOZ_HOST_BIN has been set to a valid xre; if not,
# prompt to install one.
xre_path = os.environ.get('MOZ_HOST_BIN')
err = None
if not xre_path:
err = "environment variable MOZ_HOST_BIN is not set to a directory" \
"containing host xpcshell"
elif not os.path.isdir(xre_path):
err = '$MOZ_HOST_BIN does not specify a directory'
elif not os.path.isfile(os.path.join(xre_path, 'xpcshell')):
err = '$MOZ_HOST_BIN/xpcshell does not exist'
if err:
xre_path = glob.glob(os.path.join(EMULATOR_HOME_DIR, 'host-utils*'))
for path in xre_path:
if os.path.isdir(path) and os.path.isfile(os.path.join(path, 'xpcshell')):
os.environ['MOZ_HOST_BIN'] = path
err = None
break
if err:
_log_info("Host utilities not found: %s" % err)
response = raw_input(
"Download and setup your host utilities? (Y/n) ").strip()
if response.lower().startswith('y') or response == '':
_log_info("Installing host utilities. This may take a while...")
host_platform = _get_host_platform()
if host_platform:
path = os.path.join(MANIFEST_PATH, host_platform, 'hostutils.manifest')
_get_tooltool_manifest(build_obj.substs, path, EMULATOR_HOME_DIR,
'releng.manifest')
_tooltool_fetch()
xre_path = glob.glob(os.path.join(EMULATOR_HOME_DIR, 'host-utils*'))
for path in xre_path:
if os.path.isdir(path) and os.path.isfile(os.path.join(path, 'xpcshell')):
os.environ['MOZ_HOST_BIN'] = path
err = None
break
if err:
_log_warning("Unable to install host utilities.")
else:
_log_warning(
"Unable to install host utilities -- your platform is not supported!")
if debugger:
# Optionally set up JimDB. See https://wiki.mozilla.org/Mobile/Fennec/Android/GDB.
build_platform = _get_device_platform(build_obj.substs)
jimdb_path = os.path.join(EMULATOR_HOME_DIR, 'jimdb-%s' % build_platform)
jimdb_utils_path = os.path.join(jimdb_path, 'utils')
gdb_path = os.path.join(jimdb_path, 'bin', 'gdb')
err = None
if not os.path.isdir(jimdb_path):
err = '%s does not exist' % jimdb_path
elif not os.path.isfile(gdb_path):
err = '%s not found' % gdb_path
if err:
_log_info("JimDB (%s) not found: %s" % (build_platform, err))
response = raw_input(
"Download and setup JimDB (%s)? (Y/n) " % build_platform).strip()
if response.lower().startswith('y') or response == '':
host_platform = _get_host_platform()
if host_platform:
_log_info(
"Installing JimDB (%s/%s). This may take a while..." % (host_platform,
build_platform))
path = os.path.join(MANIFEST_PATH, host_platform,
'jimdb-%s.manifest' % build_platform)
_get_tooltool_manifest(build_obj.substs, path,
EMULATOR_HOME_DIR, 'releng.manifest')
_tooltool_fetch()
if os.path.isfile(gdb_path):
# Get JimDB utilities from git repository
proc = ProcessHandler(['git', 'pull'], cwd=jimdb_utils_path)
proc.run()
git_pull_complete = False
try:
proc.wait()
if proc.proc.returncode == 0:
git_pull_complete = True
except:
if proc.poll() is None:
proc.kill(signal.SIGTERM)
if not git_pull_complete:
_log_warning("Unable to update JimDB utils from git -- "
"some JimDB features may be unavailable.")
else:
_log_warning("Unable to install JimDB -- unable to fetch from tooltool.")
else:
_log_warning("Unable to install JimDB -- your platform is not supported!")
if os.path.isfile(gdb_path):
# sync gdbinit.local with build settings
_update_gdbinit(build_obj.substs, os.path.join(jimdb_utils_path, "gdbinit.local"))
# ensure JimDB is in system path, so that mozdebug can find it
bin_path = os.path.join(jimdb_path, 'bin')
os.environ['PATH'] = "%s:%s" % (bin_path, os.environ['PATH'])
return device_verified
def run_firefox_for_android(build_obj, params):
"""
Launch Firefox for Android on the connected device.
Optional 'params' allow parameters to be passed to Firefox.
"""
adb_path = _find_sdk_exe(build_obj.substs, 'adb', False)
if not adb_path:
adb_path = 'adb'
dm = DeviceManagerADB(autoconnect=False, adbPath=adb_path, retryLimit=1)
try:
#
# Construct an adb command similar to:
#
# $ adb shell am start -a android.activity.MAIN \
# -n org.mozilla.fennec_$USER \
# -d <url param> \
# --es args "<params>"
#
app = "%s/org.mozilla.gecko.BrowserApp" % build_obj.substs['ANDROID_PACKAGE_NAME']
cmd = ['am', 'start', '-a', 'android.activity.MAIN', '-n', app]
if params:
for p in params:
if urlparse.urlparse(p).scheme != "":
cmd.extend(['-d', p])
params.remove(p)
break
if params:
cmd.extend(['--es', 'args', '"%s"' % ' '.join(params)])
_log_debug(cmd)
output = dm.shellCheckOutput(cmd, timeout=10)
_log_info(output)
except DMError:
_log_warning("unable to launch Firefox for Android")
return 1
return 0
def grant_runtime_permissions(build_obj):
"""
Grant required runtime permissions to the specified app
(typically org.mozilla.fennec_$USER).
"""
app = build_obj.substs['ANDROID_PACKAGE_NAME']
adb_path = _find_sdk_exe(build_obj.substs, 'adb', False)
if not adb_path:
adb_path = 'adb'
dm = DeviceManagerADB(autoconnect=False, adbPath=adb_path, retryLimit=1)
dm.default_timeout = 10
try:
sdk_level = dm.shellCheckOutput(['getprop', 'ro.build.version.sdk'])
if sdk_level and int(sdk_level) >= 23:
_log_info("Granting important runtime permissions to %s" % app)
dm.shellCheckOutput(['pm', 'grant', app, 'android.permission.WRITE_EXTERNAL_STORAGE'])
dm.shellCheckOutput(['pm', 'grant', app, 'android.permission.READ_EXTERNAL_STORAGE'])
dm.shellCheckOutput(['pm', 'grant', app, 'android.permission.ACCESS_FINE_LOCATION'])
dm.shellCheckOutput(['pm', 'grant', app, 'android.permission.CAMERA'])
except DMError:
_log_warning("Unable to grant runtime permissions to %s" % app)
class AndroidEmulator(object):
"""
Support running the Android emulator with an AVD from Mozilla
test automation.
Example usage:
emulator = AndroidEmulator()
if not emulator.is_running() and emulator.is_available():
if not emulator.check_avd():
warn("this may take a while...")
emulator.update_avd()
emulator.start()
emulator.wait_for_start()
emulator.wait()
"""
def __init__(self, avd_type='4.3', verbose=False, substs=None, device_serial=None):
global verbose_logging
self.emulator_log = None
self.emulator_path = 'emulator'
verbose_logging = verbose
self.substs = substs
self.avd_type = self._get_avd_type(avd_type)
self.avd_info = AVD_DICT[self.avd_type]
self.gpu = True
self.restarted = False
adb_path = _find_sdk_exe(substs, 'adb', False)
if not adb_path:
adb_path = 'adb'
self.dm = DeviceManagerADB(autoconnect=False, adbPath=adb_path, retryLimit=1,
deviceSerial=device_serial)
self.dm.default_timeout = 10
_log_debug("Emulator created with type %s" % self.avd_type)
def __del__(self):
if self.emulator_log:
self.emulator_log.close()
def is_running(self):
"""
Returns True if the Android emulator is running.
"""
for proc in psutil.process_iter():
name = proc.name()
# On some platforms, "emulator" may start an emulator with
# process name "emulator64-arm" or similar.
if name and name.startswith('emulator'):
return True
return False
def is_available(self):
"""
Returns True if an emulator executable is found.
"""
found = False
emulator_path = _find_sdk_exe(self.substs, 'emulator', True)
if emulator_path:
self.emulator_path = emulator_path
found = True
return found
def check_avd(self, force=False):
"""
Determine if the AVD is already installed locally.
(This is usually used to determine if update_avd() is likely
to require a download; it is a convenient way of determining
whether a 'this may take a while' warning is warranted.)
Returns True if the AVD is installed.
"""
avd = os.path.join(
EMULATOR_HOME_DIR, 'avd', self.avd_info.name + '.avd')
if force and os.path.exists(avd):
shutil.rmtree(avd)
if os.path.exists(avd):
_log_debug("AVD found at %s" % avd)
return True
return False
def update_avd(self, force=False):
"""
If required, update the AVD via tooltool.
If the AVD directory is not found, or "force" is requested,
download the tooltool manifest associated with the AVD and then
invoke tooltool.py on the manifest. tooltool.py will download the
required archive (unless already present in the local tooltool
cache) and install the AVD.
"""
avd = os.path.join(
EMULATOR_HOME_DIR, 'avd', self.avd_info.name + '.avd')
ini_file = os.path.join(
EMULATOR_HOME_DIR, 'avd', self.avd_info.name + '.ini')
if force and os.path.exists(avd):
shutil.rmtree(avd)
if not os.path.exists(avd):
if os.path.exists(ini_file):
os.remove(ini_file)
path = self.avd_info.tooltool_manifest
_get_tooltool_manifest(self.substs, path, EMULATOR_HOME_DIR, 'releng.manifest')
_tooltool_fetch()
self._update_avd_paths()
def start(self):
"""
Launch the emulator.
"""
if os.path.exists(EMULATOR_AUTH_FILE):
os.remove(EMULATOR_AUTH_FILE)
_log_debug("deleted %s" % EMULATOR_AUTH_FILE)
# create an empty auth file to disable emulator authentication
auth_file = open(EMULATOR_AUTH_FILE, 'w')
auth_file.close()
def outputHandler(line):
self.emulator_log.write("<%s>\n" % line)
if "Invalid value for -gpu" in line or "Invalid GPU mode" in line:
self.gpu = False
env = os.environ
env['ANDROID_AVD_HOME'] = os.path.join(EMULATOR_HOME_DIR, "avd")
command = [self.emulator_path, "-avd",
self.avd_info.name, "-port", "5554"]
if self.gpu:
command += ['-gpu', 'swiftshader']
if self.avd_info.extra_args:
# -enable-kvm option is not valid on OSX
if _get_host_platform() == 'macosx64' and '-enable-kvm' in self.avd_info.extra_args:
self.avd_info.extra_args.remove('-enable-kvm')
command += self.avd_info.extra_args
log_path = os.path.join(EMULATOR_HOME_DIR, 'emulator.log')
self.emulator_log = open(log_path, 'w')
_log_debug("Starting the emulator with this command: %s" %
' '.join(command))
_log_debug("Emulator output will be written to '%s'" %
log_path)
self.proc = ProcessHandler(
command, storeOutput=False, processOutputLine=outputHandler,
env=env)
self.proc.run()
_log_debug("Emulator started with pid %d" %
int(self.proc.proc.pid))
def wait_for_start(self):
"""
Verify that the emulator is running, the emulator device is visible
to adb, and Android has booted.
"""
if not self.proc:
_log_warning("Emulator not started!")
return False
if self.check_completed():
return False
_log_debug("Waiting for device status...")
while(('emulator-5554', 'device') not in self.dm.devices()):
time.sleep(10)
if self.check_completed():
return False
_log_debug("Device status verified.")
_log_debug("Checking that Android has booted...")
complete = False
while(not complete):
output = ''
try:
output = self.dm.shellCheckOutput(
['getprop', 'sys.boot_completed'], timeout=5)
except DMError:
# adb not yet responding...keep trying
pass
if output.strip() == '1':
complete = True
else:
time.sleep(10)
if self.check_completed():
return False
_log_debug("Android boot status verified.")
if not self._verify_emulator():
return False
return True
def check_completed(self):
if self.proc.proc.poll() is not None:
if not self.gpu and not self.restarted:
_log_warning("Emulator failed to start. Your emulator may be out of date.")
_log_warning("Trying to restart the emulator without -gpu argument.")
self.restarted = True
self.start()
return False
_log_warning("Emulator has already completed!")
log_path = os.path.join(EMULATOR_HOME_DIR, 'emulator.log')
_log_warning("See log at %s for more information." % log_path)
return True
return False
def wait(self):
"""
Wait for the emulator to close. If interrupted, close the emulator.
"""
try:
self.proc.wait()
except:
if self.proc.poll() is None:
self.cleanup()
return self.proc.poll()
def cleanup(self):
"""
Close the emulator.
"""
self.proc.kill(signal.SIGTERM)
def get_avd_description(self):
"""
Return the human-friendly description of this AVD.
"""
return self.avd_info.description
def _update_avd_paths(self):
avd_path = os.path.join(EMULATOR_HOME_DIR, "avd")
ini_file = os.path.join(avd_path, "test-1.ini")
ini_file_new = os.path.join(avd_path, self.avd_info.name + ".ini")
os.rename(ini_file, ini_file_new)
avd_dir = os.path.join(avd_path, "test-1.avd")
avd_dir_new = os.path.join(avd_path, self.avd_info.name + ".avd")
os.rename(avd_dir, avd_dir_new)
self._replace_ini_contents(ini_file_new)
def _replace_ini_contents(self, path):
with open(path, "r") as f:
lines = f.readlines()
with open(path, "w") as f:
for line in lines:
if line.startswith('path='):
avd_path = os.path.join(EMULATOR_HOME_DIR, "avd")
f.write('path=%s/%s.avd\n' %
(avd_path, self.avd_info.name))
elif line.startswith('path.rel='):
f.write('path.rel=avd/%s.avd\n' % self.avd_info.name)
else:
f.write(line)
def _telnet_cmd(self, telnet, command):
_log_debug(">>> " + command)
telnet.write('%s\n' % command)
result = telnet.read_until('OK', 10)
_log_debug("<<< " + result)
return result
def _verify_emulator(self):
telnet_ok = False
tn = None
while(not telnet_ok):
try:
tn = telnetlib.Telnet('localhost', self.avd_info.port, 10)
if tn is not None:
tn.read_until('OK', 10)
self._telnet_cmd(tn, 'avd status')
self._telnet_cmd(tn, 'redir list')
self._telnet_cmd(tn, 'network status')
tn.write('quit\n')
tn.read_all()
telnet_ok = True
else:
_log_warning("Unable to connect to port %d" % self.avd_info.port)
except:
_log_warning("Trying again after unexpected exception")
finally:
if tn is not None:
tn.close()
if not telnet_ok:
time.sleep(10)
if self.proc.proc.poll() is not None:
_log_warning("Emulator has already completed!")
return False
return telnet_ok
def _get_avd_type(self, requested):
if requested in AVD_DICT.keys():
return requested
if self.substs:
if not self.substs['TARGET_CPU'].startswith('arm'):
return 'x86'
return '4.3'
def _find_sdk_exe(substs, exe, tools):
if tools:
subdir = 'tools'
else:
subdir = 'platform-tools'
found = False
if not found and substs:
# It's best to use the tool specified by the build, rather
# than something we find on the PATH or crawl for.
try:
exe_path = substs[exe.upper()]
if os.path.exists(exe_path):
found = True
else:
_log_debug(
"Unable to find executable at %s" % exe_path)
except KeyError:
_log_debug("%s not set" % exe.upper())
# Append '.exe' to the name on Windows if it's not present,
# so that the executable can be found.
if (os.name == 'nt' and not exe.lower().endswith('.exe')):
exe += '.exe'
if not found:
# Can exe be found in the Android SDK?
try:
android_sdk_root = os.environ['ANDROID_SDK_ROOT']
exe_path = os.path.join(
android_sdk_root, subdir, exe)
if os.path.exists(exe_path):
found = True
else:
_log_debug(
"Unable to find executable at %s" % exe_path)
except KeyError:
_log_debug("ANDROID_SDK_ROOT not set")
if not found:
# Can exe be found in the default bootstrap location?
mozbuild_path = os.environ.get('MOZBUILD_STATE_PATH',
os.path.expanduser(os.path.join('~', '.mozbuild')))
exe_path = os.path.join(
mozbuild_path, 'android-sdk-linux', subdir, exe)
if os.path.exists(exe_path):
found = True
else:
_log_debug(
"Unable to find executable at %s" % exe_path)
if not found:
# Is exe on PATH?
exe_path = find_executable(exe)
if exe_path:
found = True
else:
_log_debug("Unable to find executable on PATH")
if found:
_log_debug("%s found at %s" % (exe, exe_path))
else:
exe_path = None
return exe_path
def _log_debug(text):
if verbose_logging:
print "DEBUG: %s" % text
def _log_warning(text):
print "WARNING: %s" % text
def _log_info(text):
print "%s" % text
def _download_file(url, filename, path):
f = urllib2.urlopen(url)
if not os.path.isdir(path):
try:
os.makedirs(path)
except Exception, e:
_log_warning(str(e))
return False
local_file = open(os.path.join(path, filename), 'wb')
local_file.write(f.read())
local_file.close()
_log_debug("Downloaded %s to %s/%s" % (url, path, filename))
return True
def _get_tooltool_manifest(substs, src_path, dst_path, filename):
if not os.path.isdir(dst_path):
try:
os.makedirs(dst_path)
except Exception, e:
_log_warning(str(e))
copied = False
if substs and 'top_srcdir' in substs:
src = os.path.join(substs['top_srcdir'], src_path)
if os.path.exists(src):
dst = os.path.join(dst_path, filename)
shutil.copy(src, dst)
copied = True
_log_debug("Copied tooltool manifest %s to %s" % (src, dst))
if not copied:
url = os.path.join(TRY_URL, src_path)
_download_file(url, filename, dst_path)
def _tooltool_fetch():
def outputHandler(line):
_log_debug(line)
_download_file(TOOLTOOL_URL, 'tooltool.py', EMULATOR_HOME_DIR)
command = [sys.executable, 'tooltool.py',
'fetch', '-o', '-m', 'releng.manifest']
proc = ProcessHandler(
command, processOutputLine=outputHandler, storeOutput=False,
cwd=EMULATOR_HOME_DIR)
proc.run()
try:
proc.wait()
except:
if proc.poll() is None:
proc.kill(signal.SIGTERM)
def _get_host_platform():
plat = None
if 'darwin' in str(sys.platform).lower():
plat = 'macosx64'
elif 'linux' in str(sys.platform).lower():
if '64' in platform.architecture()[0]:
plat = 'linux64'
else:
plat = 'linux32'
return plat
def _get_device_platform(substs):
# PIE executables are required when SDK level >= 21 - important for gdbserver
adb_path = _find_sdk_exe(substs, 'adb', False)
if not adb_path:
adb_path = 'adb'
dm = DeviceManagerADB(autoconnect=False, adbPath=adb_path, retryLimit=1)
sdk_level = None
try:
cmd = ['getprop', 'ro.build.version.sdk']
_log_debug(cmd)
output = dm.shellCheckOutput(cmd, timeout=10)
if output:
sdk_level = int(output)
except:
_log_warning("unable to determine Android sdk level")
pie = ''
if sdk_level and sdk_level >= 21:
pie = '-pie'
if substs['TARGET_CPU'].startswith('arm'):
return 'arm%s' % pie
return 'x86%s' % pie
def _update_gdbinit(substs, path):
if os.path.exists(path):
obj_replaced = False
src_replaced = False
# update existing objdir/srcroot in place
for line in fileinput.input(path, inplace=True):
if "feninit.default.objdir" in line and substs and 'MOZ_BUILD_ROOT' in substs:
print("python feninit.default.objdir = '%s'" % substs['MOZ_BUILD_ROOT'])
obj_replaced = True
elif "feninit.default.srcroot" in line and substs and 'top_srcdir' in substs:
print("python feninit.default.srcroot = '%s'" % substs['top_srcdir'])
src_replaced = True
else:
print(line.strip())
# append objdir/srcroot if not updated
if (not obj_replaced) and substs and 'MOZ_BUILD_ROOT' in substs:
with open(path, "a") as f:
f.write("\npython feninit.default.objdir = '%s'\n" % substs['MOZ_BUILD_ROOT'])
if (not src_replaced) and substs and 'top_srcdir' in substs:
with open(path, "a") as f:
f.write("python feninit.default.srcroot = '%s'\n" % substs['top_srcdir'])
else:
# write objdir/srcroot to new gdbinit file
with open(path, "w") as f:
if substs and 'MOZ_BUILD_ROOT' in substs:
f.write("python feninit.default.objdir = '%s'\n" % substs['MOZ_BUILD_ROOT'])
if substs and 'top_srcdir' in substs:
f.write("python feninit.default.srcroot = '%s'\n" % substs['top_srcdir'])
| Yukarumya/Yukarum-Redfoxes | testing/mozbase/mozrunner/mozrunner/devices/android_device.py | Python | mpl-2.0 | 30,367 |
from pytos.securechange.xml_objects.restapi.step.initialize import *
logger = logging.getLogger(XML_LOGGER_NAME)
class AbsNetwork(XML_Object_Base, metaclass=SubclassWithIdentifierRegistry):
"""Base class for parsing all network object"""
@classmethod
def from_xml_node(cls, xml_node):
try:
network_type = xml_node.attrib[Attributes.XSI_NAMESPACE_TYPE]
except KeyError:
msg = 'XML node is missing the XSI attribute "{}"'.format(Attributes.XSI_NAMESPACE_TYPE)
logger.error(msg)
raise ValueError(msg)
else:
try:
return cls.registry[network_type](xml_node)
except KeyError:
logger.error('Unknown violation object type "{}"'.format(network_type))
class NetworkObject(AbsNetwork):
"""Base class for all sub type of the network object"""
def __init__(self, xml_node, element):
self.address_book = get_xml_text_value(xml_node, Elements.ADDRESS)
self.type_on_device = get_xml_text_value(xml_node, Elements.TYPE)
self.version_id = get_xml_int_value(xml_node, Elements.VERSION_ID)
self.referenced = get_xml_text_value(xml_node, Elements.REFERENCED)
interface_name = get_xml_text_value(xml_node, Elements.INTERFACE_NAME)
self.nat_info = NatInfo(interface_name)
self.installable_target = get_xml_text_value(xml_node, Elements.INSTALLABLE_TARGET)
self.group_id = get_xml_text_value(xml_node, Elements.GROUP_ID)
self.device_type = get_xml_text_value(xml_node, Elements.DEVICE_TYPE)
self.ip_type = get_xml_text_value(xml_node, Elements.IP_TYPE)
self.id = get_xml_text_value(xml_node, Elements.ID)
zone_node = get_xml_node(xml_node, Elements.ZONE, True)
if zone_node is not None:
self.zone = PolicyZone(zone_node)
else:
self.zone = None
self.device_id = get_xml_int_value(xml_node, Elements.DEVICE_ID)
admin_domain_node = get_xml_node(xml_node, Elements.ADMIN_DOMAIN, True)
if admin_domain_node is not None:
self.admin_domain = AdminDomain.from_xml_node(admin_domain_node)
else:
self.admin_domain = None
self.inDomainElementId = get_xml_text_value(xml_node, Elements.INDOMAINELEMENTID)
self.global_el = Flat_XML_Object_Base(Elements.GLOBAL, None, get_xml_text_value(xml_node, Elements.GLOBAL))
self.origin = get_xml_text_value(xml_node, Elements.ORIGIN)
self.comment = get_xml_text_value(xml_node, Elements.COMMENT)
self.shared = get_xml_text_value(xml_node, Elements.SHARED)
self.name = get_xml_text_value(xml_node, Elements.NAME)
self.implicit = get_xml_text_value(xml_node, Elements.IMPLICIT)
self.class_name = get_xml_text_value(xml_node, Elements.CLASS_NAME)
self.display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
self.uid = get_xml_text_value(xml_node, Elements.UID)
self.any_zone = get_xml_text_value(xml_node, Elements.ANY_ZONE)
self.management_domain = get_xml_text_value(xml_node, Elements.MANAGEMENT_DOMAIN)
self.domain_id = get_xml_int_value(xml_node, Elements.DOMAIN_ID)
self.application_name = get_xml_text_value(xml_node, Elements.APPLICATION_NAME)
super().__init__(element)
def __str__(self):
return self.display_name
class AnyNetworkObject(NetworkObject):
"""The class represents the any_network_object"""
class_identifier = Attributes.VIOLATION_ANY_NETWORK_OBJECT
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.VIOLATION_ANY_NETWORK_OBJECT)
def __str__(self):
return "Any"
class HostNetworkObject(NetworkObject):
"""The class represents the host_network_object"""
class_identifier = Attributes.HOST_NETWORK_OBJECT
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.ip = get_xml_text_value(xml_node, Elements.IP)
self.subnet_mask = get_xml_text_value(xml_node, Elements.SUBNET_MASK)
self.set_attrib(Attributes.XSI_TYPE, Attributes.HOST_NETWORK_OBJECT)
def __str__(self):
return self.ip
class SubnetNetworkObject(NetworkObject):
"""The class represents the subnet_network_object"""
class_identifier = Attributes.SUBNET_NETWORK_OBJECT
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.ip = get_xml_text_value(xml_node, Elements.IP)
self.subnet_mask = get_xml_text_value(xml_node, Elements.SUBNET_MASK)
self.set_attrib(Attributes.XSI_TYPE, Attributes.SUBNET_NETWORK_OBJECT)
def __str__(self):
return "{}/{}".format(self.ip, self.subnet_mask)
class RangeNetworkObject(NetworkObject):
class_identifier = Attributes.RANGE_NETWORK_OBJECT
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.RANGE_NETWORK_OBJECT)
self.min_ip = get_xml_text_value(xml_node, Elements.MIN_IP)
self.max_ip = get_xml_text_value(xml_node, Elements.MAX_IP)
def __str__(self):
return self.min_ip + '-' + self.max_ip
class NetworkObjectGroup(NetworkObject):
"""The class represents the subnet_network_object"""
class_identifier = Attributes.NETWORK_OBJECT_GROUP
def __init__(self, xml_node):
self.members = []
for member_node in xml_node.iter(tag=Elements.MEMBER):
self.members.append(NetworkObject.from_xml_node(member_node))
self.exclusions = []
for member_node in xml_node.iter(tag=Elements.EXCLUSION):
self.exclusions.append(NetworkObject.from_xml_node(member_node))
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.NETWORK_OBJECT_GROUP)
class DomainNetworkObject(NetworkObject):
class_identifier = Attributes.DOMAIN_NETWORK_OBJECT
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.DOMAIN_NETWORK_OBJECT)
class InstallOnNetworkObject(NetworkObject):
class_identifier = Attributes.INSTALL_ON_NETWORK_OBJECT
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.INSTALL_ON_NETWORK_OBJECT)
class HostNetworkObjectWithInterfaces(NetworkObject):
class_identifier = Attributes.HOST_NETWORK_OBJECT_WITH_INTERFACES
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.HOST_NETWORK_OBJECT_WITH_INTERFACES)
self.ip = get_xml_text_value(xml_node, Elements.IP)
self.subnet_mask = get_xml_text_value(xml_node, Elements.SUBNET_MASK)
self.interfaces = []
for member_node in xml_node.iter(tag=Elements.INTERFACE_FOR_NETWORK_OBJECT):
self.interfaces.append(NetworkObject.from_xml_node(member_node))
class CloudSecurityGroup(NetworkObject):
class_identifier = Attributes.CLOUD_SECURITY_GROUP_NETWORK_OBJECT
def __init__(self, xml_node):
self.members = []
for member_node in xml_node.iter(tag=Elements.MEMBER):
self.members.append(NetworkObject.from_xml_node(member_node))
self.exclusions = []
for member_node in xml_node.iter(tag=Elements.EXCLUSION):
self.exclusions.append(NetworkObject.from_xml_node(member_node))
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.CLOUD_SECURITY_GROUP_NETWORK_OBJECT)
class InternetNetworkObject(NetworkObject):
class_identifier = Attributes.INTERNET_NETWORK_OBJECT
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.INTERNET_NETWORK_OBJECT)
class AbsService(XML_Object_Base, metaclass=SubclassWithIdentifierRegistry):
"""Base class for parsing all services objects"""
@classmethod
def from_xml_node(cls, xml_node):
if xml_node is None:
return None
try:
service_type = xml_node.attrib[Attributes.XSI_NAMESPACE_TYPE]
except KeyError:
msg = 'XML node is missing the XSI attribute "{}"'.format(Attributes.XSI_NAMESPACE_TYPE)
logger.error(msg)
raise ValueError(msg)
else:
try:
return cls.registry[service_type](xml_node)
except KeyError:
logger.error('Unknown violation object type "{}"'.format(service_type))
class Service(AbsService):
"""Base class for all sub type of the services objects"""
def __init__(self, xml_node, element):
self.version_id = get_xml_text_value(xml_node, Elements.VERSION_ID)
self.referenced = get_xml_text_value(xml_node, Elements.REFERENCED)
self.match_rule = get_xml_text_value(xml_node, Elements.MATCH_RULE)
self.id = get_xml_text_value(xml_node, Elements.ID)
self.device_id = get_xml_int_value(xml_node, Elements.DEVICE_ID)
self.admin_domain = AdminDomain.from_xml_node(xml_node)
self.in_domain_element_id = get_xml_text_value(xml_node, Elements.INDOMAINELEMENTID)
self.global_el = Flat_XML_Object_Base(Elements.GLOBAL, None, get_xml_text_value(xml_node, Elements.GLOBAL))
self.origin = get_xml_text_value(xml_node, Elements.ORIGIN)
self.comment = get_xml_text_value(xml_node, Elements.COMMENT)
self.shared = get_xml_text_value(xml_node, Elements.SHARED)
self.name = get_xml_text_value(xml_node, Elements.NAME)
self.implicit = get_xml_text_value(xml_node, Elements.IMPLICIT)
self.class_name = get_xml_text_value(xml_node, Elements.CLASS_NAME)
self.display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
self.uid = get_xml_text_value(xml_node, Elements.UID)
super().__init__(element)
def __str__(self):
return self.display_name
class AnyService(Service):
"""The class represents the any_service_object"""
class_identifier = Attributes.VIOLATION_ANY_SERVICE
def __init__(self, xml_node):
self.negate = get_xml_text_value(xml_node, Elements.NEGATE)
self.match_for_any = get_xml_text_value(xml_node, Elements.MATCH_FOR_ANY)
self.timeout = get_xml_text_value(xml_node, Elements.TIMEOUT)
self.min_protocol = get_xml_int_value(xml_node, Elements.MIN_PROTOCOL)
self.max_protocol = get_xml_int_value(xml_node, Elements.MAX_PROTOCOL)
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.VIOLATION_ANY_SERVICE)
class TransportService(Service):
"""The class represents the transport_service_object"""
class_identifier = Attributes.TRANSPORT_SERVICE
def __init__(self, xml_node):
self.cp_inspect_streaming_name = get_xml_text_value(xml_node, Elements.CP_INSPECT_STREAMING_NAME)
self.min_protocol = get_xml_int_value(xml_node, Elements.MIN_PROTOCOL)
self.max_protocol = get_xml_int_value(xml_node, Elements.MAX_PROTOCOL)
self.min_port = get_xml_int_value(xml_node, Elements.MIN_PORT)
self.max_port = get_xml_int_value(xml_node, Elements.MAX_PORT)
self.protocol = get_xml_int_value(xml_node, Elements.PROTOCOL)
self.min_value_source = get_xml_int_value(xml_node, Elements.MIN_VALUE_SOURCE)
self.max_value_source = get_xml_int_value(xml_node, Elements.MAX_VALUE_SOURCE)
self.cp_prototype_name = get_xml_text_value(xml_node, Elements.CP_PROTOTYPE_NAME)
self.match_for_any = get_xml_text_value(xml_node, Elements.MATCH_FOR_ANY)
self.negate = get_xml_text_value(xml_node, Elements.NEGATE)
self.timeout = get_xml_text_value(xml_node, Elements.TIMEOUT)
self.display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.TRANSPORT_SERVICE)
class IcmpService(Service):
"""The class represents the icmp_service_object"""
class_identifier = Attributes.ICMP_SERVICE
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.ICMP_SERVICE)
self.type_on_device = get_xml_text_value(xml_node, Elements.TYPE_ON_DEVICE)
self.negate = get_xml_text_value(xml_node, Elements.NEGATE)
self.match_for_any = get_xml_text_value(xml_node, Elements.MATCH_FOR_ANY)
self.timeout = get_xml_int_value(xml_node, Elements.TIMEOUT)
self.min_icmp_type = get_xml_int_value(xml_node, Elements.MIN_ICMP_TYPE)
self.max_icmp_type = get_xml_int_value(xml_node, Elements.MAX_ICMP_TYPE)
class IPService(Service):
"""The class represents the ip_service_object"""
class_identifier = Attributes.IP_SERVICE
def __init__(self, xml_node):
self.negate = get_xml_text_value(xml_node, Elements.NEGATE)
self.match_for_any = get_xml_text_value(xml_node, Elements.MATCH_FOR_ANY)
self.timeout = get_xml_text_value(xml_node, Elements.TIMEOUT)
self.min_protocol = get_xml_int_value(xml_node, Elements.MIN_PROTOCOL)
self.max_protocol = get_xml_int_value(xml_node, Elements.MAX_PROTOCOL)
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.IP_SERVICE)
class ServiceGroup(Service):
"""The class represents the ip_service_object"""
class_identifier = Attributes.SERVICE_GROUP
def __init__(self, xml_node):
super().__init__(xml_node, xml_node.find('.').tag)
self.set_attrib(Attributes.XSI_TYPE, Attributes.SERVICE_GROUP)
self.members = [Service.from_xml_node(node) for node in xml_node.findall('member')]
class Binding(XML_Object_Base, metaclass=SubclassWithIdentifierRegistry):
"""Base Binding Class that handles all Binding sub Binding DTO parsing"""
@classmethod
def from_xml_node(cls, xml_node):
if xml_node is None:
return None
try:
binding_type = xml_node.attrib[Attributes.XSI_NAMESPACE_TYPE]
except KeyError:
msg = 'XML node is missing the XSI attribute "{}"'.format(Attributes.XSI_NAMESPACE_TYPE)
logger.error(msg)
raise ValueError(msg)
else:
try:
return cls.registry[binding_type](xml_node)
except KeyError:
logger.error('Unknown binding object type "{}"'.format(binding_type))
class AclBinding(Binding):
"""The class represents the acl_binding_object which is sub type of Binding_DTO"""
class_identifier = Attributes.ACL__BINDING
def __init__(self, xml_node):
self.acl_name = get_xml_text_value(xml_node, Elements.ACL_NAME)
self.incoming_interface_names = [node.text for node in xml_node.iter(Elements.INCOMING_INTERFACE_NAME)]
self.outgoing_interface_names = [node.text for node in xml_node.iter(Elements.OUTGOING_INTERFACE_NAME)]
super().__init__(Elements.BINDING)
self.set_attrib(Attributes.XSI_TYPE, Attributes.ACL__BINDING)
class ZoneBinding(Binding):
"""The class represents the zone_binding object which is sub type of Binding_DTO"""
class_identifier = Attributes.ZONE__BINDING
def __init__(self, xml_node):
self.from_zone = get_xml_text_value(xml_node, Elements.FROM_ZONE)
self.to_zone = get_xml_text_value(xml_node, Elements.TO_ZONE)
super().__init__(Elements.BINDING)
self.set_attrib(Attributes.XSI_TYPE, Attributes.ZONE__BINDING)
class PolicyBinding(Binding):
class_identifier = Attributes.POLICY__BINDING
def __init__(self, xml_node):
self.policy_name = get_xml_text_value(xml_node, Elements.POLICY_NAME)
self.installed_on_module = get_xml_text_value(xml_node, Elements.INSTALLED_ON_MODULE)
super().__init__(Elements.BINDING)
self.set_attrib(Attributes.XSI_TYPE, Attributes.POLICY__BINDING)
class AbsSlimRule(XML_Object_Base, metaclass=SubclassWithIdentifierRegistry):
"""AbsSlimRule Class that handles all SlimRule sub DTO parsing"""
@classmethod
def from_xml_node(cls, xml_node):
if xml_node is None:
return None
try:
rule_type = xml_node.attrib[Attributes.XSI_NAMESPACE_TYPE]
except KeyError:
msg = 'XML node is missing the XSI attribute "{}"'.format(Attributes.XSI_NAMESPACE_TYPE)
logger.error(msg)
raise ValueError(msg)
else:
try:
return cls.registry[rule_type].from_xml_node(xml_node)
except KeyError:
logger.error('Unknown binding object type "{}"'.format(rule_type))
class SlimRule(AbsSlimRule):
"""The class represents the SlimRule which is sub type of SlimRule"""
def __init__(self, uid, destination_networks=None, source_networks=None,
destination_services=None, rule_number=None, additional_parameters=None, communities=None,
rule_location=None, applications=None, install_ons=None, users=None, track=None, source_services=None,
from_zone=None, to_zone=None, action=None, comment=None, name=None, is_disabled=None):
self.additional_parameters = additional_parameters
self.communities = communities
self.sourceNetworks = source_networks
self.destinationNetworks = destination_networks
self.destination_services = destination_services
self.install_ons = install_ons
self.track = track
self.rule_location = rule_location
self.source_services = source_services
self.uid = uid
self.rule_number = rule_number
if applications is not None:
self.applications = applications
self.from_zone = from_zone
self.to_zone = to_zone
self.action = action
self.comment = comment
self.name = name
self.is_disabled = is_disabled
self.users = users
super().__init__(Elements.RULE)
self.set_attrib(Attributes.XSI_TYPE, Attributes.SLIM_RULE_WITH_META_DATA)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
rule_uid = get_xml_text_value(xml_node, Elements.UID)
rule_location = get_xml_text_value(xml_node, Elements.RULE_LOCATION)
rule_number = get_xml_int_value(xml_node, Elements.RULENUMBER)
from_zone = get_xml_text_value(xml_node, Elements.FROM_ZONE)
to_zone = get_xml_text_value(xml_node, Elements.TO_ZONE)
action = get_xml_text_value(xml_node, Elements.ACTION)
comment = get_xml_text_value(xml_node, Elements.COMMENT)
name = get_xml_text_value(xml_node, Elements.NAME)
is_disabled = get_xml_text_value(xml_node, Elements.ISDISABLED)
destination_networks = []
for destination_network_node in xml_node.iter(tag=Elements.DESTNETWORKS):
network_object = NetworkObject.from_xml_node(destination_network_node)
destination_networks.append(network_object)
source_networks = []
for source_network_node in xml_node.iter(tag=Elements.SOURCENETWORKS):
network_object = NetworkObject.from_xml_node(source_network_node)
source_networks.append(network_object)
destination_services = []
for destination_service_node in xml_node.iter(Elements.DESTINATIONSERVICES):
service = Service.from_xml_node(destination_service_node)
destination_services.append(service)
additional_parameters = []
parameters_node = get_xml_node(xml_node, Elements.ADDITIONAL_PARAMETERS, True)
if parameters_node is not None:
for parameter_node in parameters_node.iter(Elements.ADDITIONAL_PARAMETER):
additional_parameter = AdditionalParameter.from_xml_node(parameter_node)
additional_parameters.append(additional_parameter)
communities = []
communities_node = get_xml_node(xml_node, Elements.COMMUNITIES, True)
if communities_node is not None:
for community_node in communities_node.iter(Elements.COMMUNITY):
community = VpnCommunity.from_xml_node(community_node)
communities.append(community)
applications = []
applications_node = get_xml_node(xml_node, Elements.APPLICATIONS, True)
if applications_node is not None:
for application_node in applications_node.iter(Elements.APPLICATION):
application = Application.from_xml_node(application_node)
applications.append(application)
install_ons = []
install_ons_node = get_xml_node(xml_node, Elements.INSTALL_ONS, True)
if install_ons_node is not None:
for install_on_node in install_ons_node.iter(tag=Elements.INSTALL_ON):
network_object = NetworkObject.from_xml_node(install_on_node)
install_ons.append(network_object)
users = []
users_node = get_xml_node(xml_node, Elements.USERS, True)
if users_node is not None:
for user_node in users_node.iter(tag=Elements.USER):
user = DeviceUser.from_xml_node(user_node)
users.append(user)
track_node = get_xml_node(xml_node, Elements.TRACK, True)
if track_node is not None:
track = RuleTrack.from_xml_node(track_node)
else:
track = None
source_services = []
for source_service_node in xml_node.iter(Elements.SOURCESERVICES):
service = Service.from_xml_node(source_service_node)
source_services.append(service)
return cls(rule_uid, destination_networks, source_networks, destination_services,
rule_number, additional_parameters, communities, rule_location, applications, install_ons,
users, track, source_services, from_zone, to_zone, action, comment, name, is_disabled)
def to_pretty_str(self):
rule_string = "Rule name: {}\n".format(self.name)
rule_string += "From zone: {}\n".format(self.from_zone)
rule_string += "To zone: {}\n".format(self.to_zone)
rule_string += "Sources: {}\n".format(", ".join(str(src) for src in self.sourceNetworks))
rule_string += "Destinations: {}\n".format(", ".join(str(src) for src in self.destinationNetworks))
rule_string += "Services: {}\n".format(", ".join(str(srv) for srv in self.destination_services))
if self.comment is not None:
rule_string += "Comment: {}\n".format(unescape(self.comment))
return rule_string
class SlimRuleWithMetadata(SlimRule):
"""This class represents the SlimRuleWithMetadata"""
def __init__(self, uid, destination_networks=None, source_networks=None,
destination_services=None, rule_number=None, additional_parameters=None, communities=None,
rule_location=None, applications=None, install_ons=None, users=None, track=None, source_services=None,
from_zone=None, to_zone=None, action=None, comment=None, name=None, is_disabled=None, rule_meta_data=None):
super().__init__(uid, destination_networks, source_networks,
destination_services, rule_number, additional_parameters, communities,
rule_location, applications, install_ons, users, track, source_services,
from_zone, to_zone, action, comment, name, is_disabled)
self.rule_meta_data = rule_meta_data
self.set_attrib(Attributes.XSI_TYPE, Attributes.SLIM_RULE_WITH_META_DATA)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
slim_rule = super().from_xml_node(xml_node)
rule_meta_data_node = get_xml_node(xml_node, Elements.RULE_METADATA, True)
if rule_meta_data_node is not None:
slim_rule.rule_meta_data = RuleMetaData.from_xml_node(rule_meta_data_node)
else:
slim_rule.rule_meta_data = None
return slim_rule
def to_pretty_str(self):
rule_string = super().to_pretty_str()
rule_string += self.rule_meta_data.to_pretty_str()
return rule_string
class RuleMetaData(XML_Object_Base):
"""This class represents the RuleMetaData used in rule decommission field"""
def __init__(self, violations, permissiveness_level, legacy_rule, ticket_ids, tech_owner, last_hit,
rule_description, business_owners, last_modified, shadowed_status, applications):
self.violations = violations
self.permissiveness_level = permissiveness_level
self.legacy_rule = legacy_rule
self.ticket_ids = ticket_ids
self.tech_owner = tech_owner
self.last_hit = last_hit
self.rule_description = rule_description
self.business_owners = business_owners
self.last_modified = last_modified
self.shadowed_status = shadowed_status
self.applications = applications
super().__init__(Elements.RULE_METADATA)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
violations = get_xml_text_value(xml_node, Elements.VIOLATIONS)
permissiveness_level = get_xml_text_value(xml_node, Elements.PERMISSIVENESS_LEVEL)
legacy_rule = get_xml_text_value(xml_node, Elements.LEGACY_RULE)
ticket_ids = get_xml_text_value(xml_node, Elements.TICKET_IDS)
last_hit = get_xml_text_value(xml_node, Elements.LAST_HIT)
rule_description = get_xml_text_value(xml_node, Elements.RULE_DESCRIPTION)
business_owners = get_xml_text_value(xml_node, Elements.BUSINESS_OWNERS)
last_modified = get_xml_text_value(xml_node, Elements.LAST_MODIFIED)
shadowed_status = get_xml_text_value(xml_node, Elements.SHADOWED_STATUS)
tech_owner = get_xml_text_value(xml_node, Elements.TECH_OWNER)
applications = []
applications_node = get_xml_node(xml_node, Elements.APPLICATIONS, True)
if applications_node is not None:
for application_node in applications_node.iter(Elements.APPLICATION):
application = SaApplication.from_xml_node(application_node)
applications.append(application)
return cls(violations, permissiveness_level, legacy_rule, ticket_ids, tech_owner, last_hit, rule_description,
business_owners, last_modified, shadowed_status, applications)
def to_pretty_str(self):
meta_data_string = ''
if self.violations is not None:
meta_data_string += "Violations: {}\n".format(self.violations)
if self.permissiveness_level is not None:
meta_data_string += "Permissiveness level: {}\n".format(self.permissiveness_level)
if self.legacy_rule is not None:
meta_data_string += "Legacy rule: {}\n".format(self.legacy_rule)
if self.ticket_ids is not None:
meta_data_string += "Ticket IDs: {}\n".format(self.ticket_ids)
if self.tech_owner is not None:
meta_data_string += "Tech Owner: {}\n".format(self.tech_owner)
if self.last_hit is not None:
meta_data_string += "Last hit: {}\n".format(self.last_hit)
if self.rule_description is not None:
meta_data_string += "Rule description: {}\n".format(self.rule_description)
if self.business_owners is not None:
meta_data_string += "Business owners: {}\n".format(self.business_owners)
if self.last_modified is not None:
meta_data_string += "Last modified: {}\n".format(self.last_modified)
if self.shadowed_status is not None:
meta_data_string += "Shadowed status: {}\n".format(self.shadowed_status)
if self.applications:
meta_data_string += "Applications: {}\n".format(", ".join(str(app) for app in self.applications))
return meta_data_string
class AdditionalParameter(XML_Object_Base):
"""This class represents the Additional Parameter used in rule decommission field"""
def __init__(self, num_id, display_name, class_name, name, uid):
self.id = num_id
self.display_name = display_name
self.class_name = class_name
self.name = name
self.uid = uid
super().__init__(Elements.ADDITIONAL_PARAMETER)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
class_name = get_xml_text_value(xml_node, Elements.CLASS_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
uid = get_xml_text_value(xml_node, Elements.UID)
return cls(num_id, display_name, class_name, name, uid)
class VpnCommunity(XML_Object_Base):
"""This class represents the VpnCommunity used in rule decommission field"""
def __init__(self, class_name, name, uid):
self.class_name = class_name
self.name = name
self.uid = uid
super().__init__(Elements.COMMUNITIES)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
class_name = get_xml_text_value(xml_node, Elements.CLASS_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
uid = get_xml_text_value(xml_node, Elements.UID)
return cls(class_name, name, uid)
class Application(XML_Object_Base):
"""This class represents the Application used in rule decommission field"""
def __init__(self, application_name, in_domain_element_id, domain_id, device_id, admin_domain, a_global, origin,
comment, shared, name, implicit, class_name, display_name, uid):
self.application_name = application_name
self.inDomainElementId = in_domain_element_id
self.domain_id = domain_id
self.device_id = device_id
self.admin_domain = admin_domain
self.a_global = a_global
self.origin = origin
self.comment = comment
self.shared = shared
self.name = name
self.implicit = implicit
self.class_name = class_name
self.display_name = display_name
self.uid = uid
super().__init__(Elements.APPLICATION)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
application_name = get_xml_text_value(xml_node, Elements.APPLICATION_NAME)
in_domain_element_id = get_xml_text_value(xml_node, Elements.INDOMAINELEMENTID)
domain_id = get_xml_int_value(xml_node, Elements.DOMAIN_ID)
device_id = get_xml_int_value(xml_node, Elements.DEVICE_ID)
admin_domain_node = get_xml_node(xml_node, Elements.ADMIN_DOMAIN, True)
if admin_domain_node is not None:
admin_domain = AdminDomain.from_xml_node(admin_domain_node)
else:
admin_domain = None
a_global = get_xml_text_value(xml_node, Elements.GLOBAL)
origin = get_xml_text_value(xml_node, Elements.ORIGIN)
comment = get_xml_text_value(xml_node, Elements.COMMENT)
shared = get_xml_text_value(xml_node, Elements.SHARED)
name = get_xml_text_value(xml_node, Elements.NAME)
implicit = get_xml_text_value(xml_node, Elements.IMPLICIT)
class_name = get_xml_text_value(xml_node, Elements.CLASS_NAME)
display_name = get_xml_text_value(xml_node, Elements.DISPLAY_NAME)
uid = get_xml_text_value(xml_node, Elements.UID)
return cls(application_name, in_domain_element_id, domain_id, device_id, admin_domain, a_global, origin,
comment, shared, name, implicit, class_name, display_name, uid)
class AdminDomain(XML_Object_Base):
"""This class represents the AdminDomain used in rule decommission field"""
def __init__(self, name, uid):
self.name = name
self.uid = uid
super().__init__(Elements.ADMIN_DOMAIN)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
name = get_xml_text_value(xml_node, Elements.NAME)
uid = get_xml_text_value(xml_node, Elements.UID)
return cls(name, uid)
class NatInfo(XML_Object_Base):
def __init__(self, interface_name):
self.interface_name = interface_name
super().__init__(Elements.NAT_INFO)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
interface_name = get_xml_text_value(xml_node, Elements.INTERFACE_NAME)
return cls(interface_name)
class PolicyZone(XML_Object_Base):
"""The class represents the PolicyZoneDTO"""
def __init__(self, xml_node):
self.zone_name_in_parent = get_xml_text_value(xml_node, Elements.ZONE_NAME_IN_PARENT)
self.address_book = get_xml_text_value(xml_node, Elements.ADDRESS_BOOK)
self.version_id = get_xml_int_value(xml_node, Elements.VERSION_ID)
self.admin_domain = AdminDomain.from_xml_node(xml_node)
self.global_el = Flat_XML_Object_Base(Elements.GLOBAL, None, get_xml_text_value(xml_node, Elements.GLOBAL))
self.name = get_xml_text_value(xml_node, Elements.NAME)
super().__init__(Elements.ZONE)
class DeviceUser(XML_Object_Base):
"""This class represents the DeviceUser used in rule decommission field"""
def __init__(self, class_name, name, uid):
self.class_name = class_name
self.name = name
self.uid = uid
super().__init__(Elements.USERS)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
class_name = get_xml_text_value(xml_node, Elements.CLASS_NAME)
name = get_xml_text_value(xml_node, Elements.NAME)
uid = get_xml_text_value(xml_node, Elements.UID)
return cls(class_name, name, uid)
class RuleTrack(XML_Object_Base):
"""This class represents the RuleTrack used in rule decommission field"""
def __init__(self, track_interval, track_level):
self.track_interval = track_interval
self.track_level = track_level
super().__init__(Elements.TRACK)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
track_interval = get_xml_text_value(xml_node, Elements.TRACK_INTERVAL)
track_level = get_xml_text_value(xml_node, Elements.TRACK_LEVEL)
return cls(track_interval, track_level)
class SaApplication(XML_Object_Base):
"""This class represents the aApplication used in rule decommission field"""
def __init__(self, num_id, domain_id, name, owner):
self.id = num_id
self.domain_id = domain_id
self.name = name
self.owner = owner
super().__init__(Elements.APPLICATIONS)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
domain_id = get_xml_int_value(xml_node, Elements.DOMAIN_ID)
owner = get_xml_text_value(xml_node, Elements.OWNER)
return cls(num_id, name, domain_id, owner)
def __str__(self):
return self.name
| Tufin/pytos | pytos/securechange/xml_objects/restapi/step/step.py | Python | apache-2.0 | 37,444 |
"""Unit tests for the Robot Framework Jenkins plugin source up-to-dateness collector."""
from ..jenkins_plugin_test_case import JenkinsPluginSourceUpToDatenessMixin
from .base import RobotFrameworkJenkinsPluginTestCase
class RobotFrameworkJenkinsPluginSourceUpToDatenessTest( # skipcq: PTC-W0046
JenkinsPluginSourceUpToDatenessMixin, RobotFrameworkJenkinsPluginTestCase
):
"""Unit tests for the Robot Framework Jenkins plugin source up-to-dateness collector."""
| ICTU/quality-time | components/collector/tests/source_collectors/robot_framework_jenkins_plugin/test_source_up_to_dateness.py | Python | apache-2.0 | 475 |
#!/usr/bin/env python3
# See [1] https://pubs.acs.org/doi/pdf/10.1021/j100247a015
# Banerjee, 1985
# [2] https://aip.scitation.org/doi/abs/10.1063/1.2104507
# Heyden, 2005
# [3] https://onlinelibrary.wiley.com/doi/abs/10.1002/jcc.540070402
# Baker, 1985
# [4] 10.1007/s002140050387
# Bofill, 1998, Restricted-Step-RFO
# [5] https://link.springer.com/article/10.1007/s00214-016-1847-3
# Birkholz, 2016
import numpy as np
from pysisyphus.optimizers.HessianOptimizer import HessianOptimizer
class RSRFOptimizer(HessianOptimizer):
"""Optimizer to find first-order saddle points."""
rfo_dict = {
"min": (0, "min"),
"max": (-1, "max"),
}
def __init__(self, geometry, max_micro_cycles=50, **kwargs):
super().__init__(geometry, **kwargs)
self.max_micro_cycles = int(max_micro_cycles)
assert max_micro_cycles >= 1
self.alpha0 = 1
self.alpha_max = 1e8
def solve_rfo(self, rfo_mat, kind="min"):
# So if I use eig instead of eigh here it even works ...
# my bad, ahhh! The unscaled RFO matrix may be symmetric,
# but the scaled ones aren't anymore.
eigenvalues, eigenvectors = np.linalg.eig(rfo_mat)
eigenvalues = eigenvalues.real
eigenvectors = eigenvectors.real
sorted_inds = np.argsort(eigenvalues)
# Depending on wether we want to minimize (maximize) along
# the mode(s) in the rfo mat we have to select the smallest
# (biggest) eigenvalue and corresponding eigenvector.
first_or_last, verbose = self.rfo_dict[kind]
ind = sorted_inds[first_or_last]
# Given sorted eigenvalue-indices (sorted_inds) use the first
# (smallest eigenvalue) or the last (largest eigenvalue) index.
step_nu = eigenvectors.T[ind]
nu = step_nu[-1]
self.log(f"nu_{verbose}={nu:.4e}")
# Scale eigenvector so that its last element equals 1. The
# final is step is the scaled eigenvector without the last element.
step = step_nu[:-1] / nu
eigval = eigenvalues[ind]
self.log(f"eigenvalue_{verbose}={eigval:.4e}")
return step, eigval, nu
def optimize(self):
forces = self.geometry.forces
self.forces.append(forces)
self.energies.append(self.geometry.energy)
if self.cur_cycle > 0:
self.update_trust_radius()
self.update_hessian()
H = self.H
if self.geometry.internal:
H = self.geometry.internal.project_hessian(self.H)
eigvals, eigvecs = np.linalg.eigh(H)
# Transform to eigensystem of hessian
forces_trans = eigvecs.T.dot(forces)
# Minimize energy along all modes
min_mat = np.asarray(np.bmat((
(np.diag(eigvals), -forces_trans[:,None]),
(-forces_trans[None,:], [[0]])
)))
alpha = self.alpha0
min_diag_indices = np.diag_indices(eigvals.size)
for mu in range(self.max_micro_cycles):
assert alpha > 0, "alpha should not be negative"
self.log(f"RS-RFO micro cycle {mu:02d}, alpha={alpha:.6f}")
# We only have to update one eigenvalue
min_mat_scaled = min_mat.copy()
min_mat_scaled[min_diag_indices] /= alpha
min_mat_scaled[:-1,-1] /= alpha
rfo_step, eigval_min, nu_min = self.solve_rfo(min_mat_scaled, "min")
# As of Eq. (8a) of [4] max_eigval and min_eigval also
# correspond to:
# eigval_min_ = -forces_trans.dot(rfo_step)
# np.testing.assert_allclose(eigval_min, eigval_min_)
# Create the full PRFO step
rfo_norm = np.linalg.norm(rfo_step)
self.log(f"rfo_norm={rfo_norm:.6f}")
inside_trust = rfo_norm < self.trust_radius + 1e-3
if inside_trust:
self.log("step is inside trust radius. breaking.")
break
elif alpha > self.alpha_max:
print("alpha > alpha_max. breaking.")
break
# Derivative of the squared step w.r.t. alpha
tval = 2*eigval_min/(1+rfo_norm**2 * alpha)
numer = forces_trans**2
denom = (eigvals - eigval_min * alpha)**3
quot = np.sum(numer / denom)
self.log(f"quot={quot:.6f}")
dstep2_dalpha = (2*eigval_min/(1+rfo_norm**2 * alpha)
* np.sum(forces_trans**2
/ ((eigvals - eigval_min * alpha)**3)
)
)
self.log(f"analytic deriv.={dstep2_dalpha:.6f}")
# Update alpha
alpha_step = (2*(self.trust_radius*rfo_norm - rfo_norm**2)
/ dstep2_dalpha
)
self.log(f"alpha_step={alpha_step:.4f}")
alpha += alpha_step
self.log("")
# Right now the step is still given in the Hessians eigensystem. We
# transform it back now.
step = eigvecs.dot(rfo_step)
step_norm = np.linalg.norm(step)
# This would correspond to "pure" RFO without the iterative
# step-restriction. Here we will just scale down the step, if it
# is too big.
if self.max_micro_cycles == 1 and step_norm > self.trust_radius:
self.log("Scaled down step")
step = step / step_norm * self.trust_radius
step_norm = np.linalg.norm(step)
self.log(f"norm(step)={np.linalg.norm(step):.6f}")
# Calculating the energy change from eigval_min and nu_min seems to give
# big problems.
# predicted_energy_change = 1/2 * eigval_min / nu_min**2
predicted_change = step.dot(-forces) + 0.5 * step.dot(self.H).dot(step)
self.predicted_energy_changes.append(predicted_change)
self.log("")
return step
| eljost/pysisyphus | deprecated/optimizers/RSRFOptimizer.py | Python | gpl-3.0 | 5,961 |
def make_album(name, album='L.P', number=''):
album_list = {}
polling_active = True
album_list = {'name':name,'album':album,'number':number}
#while polling_active = True:
while polling_active:
name=input("\nWhat's the name?")
album=input('\nPlease input the album')
album_list[name]=name
album_list[album]=album
repet = input('\nwould like to quit?')
if repet == 'quit':
polling_active = False
#print(''summay'') x
for name ,album in album_list.items():
print(name + '\t' + album + '\n' )
return album_list
#care=make_album('bqb', 'bb')
#print(care)
xiaoxin = make_album('')
#print(xiaoxin)
#反回值是字典,下面可以继续操作
#zhu=make_album('zdmx','xzc','8')
#print(zhu)
#print (album_list)
#函数里的字典,调用不能用,也就是不能用返回值以外的
#字典是单次的,对于每个参数是独立的
#返回值的用法忘掉了,重新回忆下
#不清字典的话,重复运行会累计,格式也有问题
#字典的输出都很乱,人家遍历的
#n次排错,格式,对齐,开关的写法,还是要多写啊
| lluxury/pcc_exercise | 08/user_album.py | Python | mit | 1,146 |
import _plotly_utils.basevalidators
class TicklabelpositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="ticklabelposition", parent_name="contour.colorbar", **kwargs
):
super(TicklabelpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop(
"values",
[
"outside",
"inside",
"outside top",
"inside top",
"outside left",
"inside left",
"outside right",
"inside right",
"outside bottom",
"inside bottom",
],
),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/contour/colorbar/_ticklabelposition.py | Python | mit | 916 |
from scipy.linalg import norm
import numpy as np
from menpo.lucaskanade.appearance.base import AppearanceLucasKanade
class AdaptiveForwardAdditive(AppearanceLucasKanade):
type = 'AdaFA'
def _align(self, lk_fitting, max_iters=20, project=True):
# Initial error > eps
error = self.eps + 1
image = lk_fitting.image
lk_fitting.weights = []
n_iters = 0
# Initial appearance weights
if project:
# Obtained weights by projection
IWxp = image.warp_to(self.template.mask, self.transform,
interpolator=self._interpolator)
weights = self.appearance_model.project(IWxp)
# Reset template
self.template = self.appearance_model.instance(weights)
else:
# Set all weights to 0 (yielding the mean)
weights = np.zeros(self.appearance_model.n_active_components)
lk_fitting.weights.append(weights)
# Forward Additive Algorithm
while n_iters < max_iters and error > self.eps:
# Compute warped image with current parameters
IWxp = image.warp_to(self.template.mask, self.transform,
interpolator=self._interpolator)
# Compute warp Jacobian
dW_dp = self.transform.jacobian(
self.template.mask.true_indices)
# Compute steepest descent images, VI_dW_dp
J_aux = self.residual.steepest_descent_images(
image, dW_dp, forward=(self.template, self.transform,
self._interpolator))
# Project out appearance model from VT_dW_dp
self._J = self.appearance_model.project_out_vectors(J_aux.T).T
# Compute Hessian and inverse
self._H = self.residual.calculate_hessian(self._J)
# Compute steepest descent parameter updates
sd_delta_p = self.residual.steepest_descent_update(
self._J, self.template, IWxp)
# Compute gradient descent parameter updates
delta_p = np.real(self._calculate_delta_p(sd_delta_p))
# Update warp parameters
parameters = self.transform.as_vector() + delta_p
self.transform.from_vector_inplace(parameters)
lk_fitting.parameters.append(parameters)
# Update appearance weights
error_img = self.template.from_vector(
self.residual._error_img - np.dot(J_aux, delta_p))
weights -= self.appearance_model.project(error_img)
self.template = self.appearance_model.instance(weights)
lk_fitting.weights.append(weights)
# Test convergence
error = np.abs(norm(delta_p))
n_iters += 1
lk_fitting.fitted = True
return lk_fitting
class AdaptiveForwardCompositional(AppearanceLucasKanade):
type = 'AdaFC'
def _precompute(self):
# Compute warp Jacobian
self._dW_dp = self.transform.jacobian(
self.template.mask.true_indices)
def _align(self, lk_fitting, max_iters=20, project=True):
# Initial error > eps
error = self.eps + 1
image = lk_fitting.image
lk_fitting.weights = []
n_iters = 0
# Initial appearance weights
if project:
# Obtained weights by projection
IWxp = image.warp_to(self.template.mask, self.transform,
interpolator=self._interpolator)
weights = self.appearance_model.project(IWxp)
# Reset template
self.template = self.appearance_model.instance(weights)
else:
# Set all weights to 0 (yielding the mean)
weights = np.zeros(self.appearance_model.n_active_components)
lk_fitting.weights.append(weights)
# Forward Additive Algorithm
while n_iters < max_iters and error > self.eps:
# Compute warped image with current parameters
IWxp = image.warp_to(self.template.mask, self.transform,
interpolator=self._interpolator)
# Compute steepest descent images, VI_dW_dp
J_aux = self.residual.steepest_descent_images(IWxp, self._dW_dp)
# Project out appearance model from VT_dW_dp
self._J = self.appearance_model.project_out_vectors(J_aux.T).T
# Compute Hessian and inverse
self._H = self.residual.calculate_hessian(self._J)
# Compute steepest descent parameter updates
sd_delta_p = self.residual.steepest_descent_update(
self._J, self.template, IWxp)
# Compute gradient descent parameter updates
delta_p = np.real(self._calculate_delta_p(sd_delta_p))
# Update warp parameters
self.transform.compose_after_from_vector_inplace(delta_p)
lk_fitting.parameters.append(self.transform.as_vector())
# Update appearance weights
error_img = self.template.from_vector(
self.residual._error_img - np.dot(J_aux, delta_p))
weights -= self.appearance_model.project(error_img)
self.template = self.appearance_model.instance(weights)
lk_fitting.weights.append(weights)
# Test convergence
error = np.abs(norm(delta_p))
n_iters += 1
lk_fitting.fitted = True
return lk_fitting
class AdaptiveInverseCompositional(AppearanceLucasKanade):
type = 'AdaIC'
def _precompute(self):
# Compute warp Jacobian
self._dW_dp = self.transform.jacobian(
self.template.mask.true_indices)
def _align(self, lk_fitting, max_iters=20, project=True):
# Initial error > eps
error = self.eps + 1
image = lk_fitting.image
lk_fitting.weights = []
n_iters = 0
# Initial appearance weights
if project:
# Obtained weights by projection
IWxp = image.warp_to(self.template.mask, self.transform,
interpolator=self._interpolator)
weights = self.appearance_model.project(IWxp)
# Reset template
self.template = self.appearance_model.instance(weights)
else:
# Set all weights to 0 (yielding the mean)
weights = np.zeros(self.appearance_model.n_active_components)
lk_fitting.weights.append(weights)
# Baker-Matthews, Inverse Compositional Algorithm
while n_iters < max_iters and error > self.eps:
# Compute warped image with current parameters
IWxp = image.warp_to(self.template.mask, self.transform,
interpolator=self._interpolator)
# Compute steepest descent images, VT_dW_dp
J_aux = self.residual.steepest_descent_images(self.template,
self._dW_dp)
# Project out appearance model from VT_dW_dp
self._J = self.appearance_model.project_out_vectors(J_aux.T).T
# Compute Hessian and inverse
self._H = self.residual.calculate_hessian(self._J)
# Compute steepest descent parameter updates
sd_delta_p = self.residual.steepest_descent_update(
self._J, IWxp, self.template)
# Compute gradient descent parameter updates
delta_p = np.real(self._calculate_delta_p(sd_delta_p))
# Request the pesudoinverse vector from the transform
inv_delta_p = self.transform.pseudoinverse_vector(delta_p)
# Update warp parameters
self.transform.compose_after_from_vector_inplace(inv_delta_p)
lk_fitting.parameters.append(self.transform.as_vector())
# Update appearance parameters
error_img = self.template.from_vector(
self.residual._error_img - np.dot(J_aux, delta_p))
weights -= self.appearance_model.project(error_img)
self.template = self.appearance_model.instance(weights)
lk_fitting.weights.append(weights)
# Test convergence
error = np.abs(norm(delta_p))
n_iters += 1
lk_fitting.fitted = True
return lk_fitting
| ikassi/menpo | menpo/lucaskanade/appearance/adaptive.py | Python | bsd-3-clause | 8,415 |
import unittest
import mock
import github
from github import Requester
from prboard import utils, filters, settings, hub
class TestGithub(unittest.TestCase):
def setUp(self):
pass
def test_github_init(self):
""" Test if Github gets instantiated with addditional methods """
g = hub.Github()
self.assertTrue(hasattr(g, 'get_user_repos'))
self.assertTrue(hasattr(g, 'get_org_repos'))
@mock.patch.object(github.PaginatedList, "PaginatedList")
def test_github_get_user_repos_raises_assert_error(self, mock_paginated_list):
""" Test if Github.get_user_repos raises assertion error if since is not a valid value """
g = hub.Github()
self.assertRaises(AssertionError, g.get_user_repos, "kumar", "a")
@mock.patch.object(github.PaginatedList, "PaginatedList")
def test_github_get_user_repos_pass(self, mock_paginated_list):
""" Test if Github.get_user_repos raises assertion error if since is not a valid value """
args = [mock.MagicMock(), "", "", ""]
data = [github.Repository.Repository(*args), github.Repository.Repository(*args), github.Repository.Repository(*args)]
mock_paginated_list.return_value = data
g = hub.Github()
repos = g.get_user_repos("kumar")
# Cannot use assert_called_once_with as the requester object gets an instance
self.assertEqual(mock_paginated_list.call_args[0][0], github.Repository.Repository)
self.assertEqual(mock_paginated_list.call_args[0][2], "/users/{0}/repos".format("kumar"))
self.assertEqual(repos, data)
@mock.patch.object(github.PaginatedList, "PaginatedList")
def test_github_get_org_repos_pass(self, mock_paginated_list):
""" Test if Github.get_org_repos raises assertion error if since is not a valid value """
args = [mock.MagicMock(), "", "", ""]
data = [github.Repository.Repository(*args), github.Repository.Repository(*args), github.Repository.Repository(*args)]
mock_paginated_list.return_value = data
g = hub.Github()
repos = g.get_org_repos("kumar")
# Cannot use assert_called_once_with as the requester object gets an instance
self.assertEqual(mock_paginated_list.call_args[0][0], github.Repository.Repository)
self.assertEqual(mock_paginated_list.call_args[0][2], "orgs/{0}/repositories".format("kumar"))
self.assertEqual(repos, data)
| kumarvaradarajulu/prboard | prboard/tests/unit/test_hub.py | Python | gpl-3.0 | 2,446 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
from io import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe a CSV dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class unix_dialect(Dialect):
"""Describe the usual properties of Unix-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\n'
quoting = QUOTE_ALL
register_dialect("unix", unix_dialect)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = next(self.reader)
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
% extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: "
+ ", ".join(wrong_fields))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error("Could not determine delimiter")
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = max(quotes, key=quotes.get)
if delims:
delim = max(delims, key=delims.get)
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':delim, 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = list(filter(None, data.split('\n')))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = max(items, key=lambda x: x[1])
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- sum(item[1] for item in items))
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = next(rdr) # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in list(columnTypes.keys()):
for thisType in [int, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| lfcnassif/MultiContentViewer | release/modules/ext/libreoffice/program/python-core-3.3.0/lib/csv.py | Python | lgpl-3.0 | 16,123 |
from typing import Any
from flask import g
from psycopg2 import connect, extras
def open_connection(config: dict[str, Any]) -> None:
g.db = connect(
database=config['DATABASE_NAME'],
user=config['DATABASE_USER'],
password=config['DATABASE_PASS'],
port=config['DATABASE_PORT'],
host=config['DATABASE_HOST'])
g.db.autocommit = True
g.cursor = g.db.cursor(cursor_factory=extras.DictCursor)
def close_connection() -> None:
if hasattr(g, 'db'):
g.db.close()
class Transaction:
@staticmethod
def begin() -> None:
g.cursor.execute('BEGIN')
@staticmethod
def commit() -> None:
g.cursor.execute('COMMIT')
@staticmethod
def rollback() -> None:
g.cursor.execute('ROLLBACK')
| craws/OpenAtlas | openatlas/database/connect.py | Python | gpl-2.0 | 785 |
import Pyro4
class Server(object):
def welcomeMessage(self, name):
return ("Hi welcome " + str (name))
def startServer():
server = Server()
# make a Pyro daemon
daemon = Pyro4.Daemon()
# locate the name server running
ns = Pyro4.locateNS()
# register the server as a Pyro object
uri = daemon.register(server)
# register the object with a name in the name server
ns.register("server", uri)
# print the uri so we can use it in the client later
print("Ready. Object uri =", uri)
# start the event loop of the server to wait for calls
daemon.requestLoop()
if __name__ == "__main__":
startServer()
| IdiosyncraticDragon/Reading-Notes | Python Parallel Programming Cookbook_Code/Chapter 5/Pyro4/First Example/server.py | Python | apache-2.0 | 726 |
from pycipher import PolybiusSquare
import unittest
class TestPolybius(unittest.TestCase):
def test_encipher(self):
keys = (('phqgiumeaylnofdxkrcvstzwb',5,'ABCDE'),
('uqfigkydlvmznxephrswaotcb',5,'BCDEF'))
plaintext = ('abcdefghiiklmnopqrstuvwxyzabcdefghiiklmnopqrstuvwxyz',
'abcdefghiiklmnopqrstuvwxyzabcdefghiiklmnopqrstuvwxyz')
ciphertext = ('BDEEDDCEBCCDADABAEAEDBCABBCBCCAAACDCEAEBBADEEDDABEECBDEEDDCEBCCDADABAEAEDBCABBCBCCAAACDCEAEBBADEEDDABEEC',
'FBFFFECDDFBDBFECBEBECBCEDBDDFCEBBCEDEEFDBBCFEFDECCDCFBFFFECDDFBDBFECBEBECBCEDBDDFCEBBCEDEEFDBBCFEFDECCDC')
for i,key in enumerate(keys):
enc = PolybiusSquare(*key).encipher(plaintext[i])
self.assertEqual(enc.upper(), ciphertext[i].upper())
def test_decipher(self):
keys = (('phqgiumeaylnofdxkrcvstzwb',5,'ABCDE'),
('uqfigkydlvmznxephrswaotcb',5,'BCDEF'))
plaintext= ('abcdefghiiklmnopqrstuvwxyzabcdefghiiklmnopqrstuvwxyz',
'abcdefghiiklmnopqrstuvwxyzabcdefghiiklmnopqrstuvwxyz')
ciphertext = ('BDEEDDCEBCCDADABAEAEDBCABBCBCCAAACDCEAEBBADEEDDABEECBDEEDDCEBCCDADABAEAEDBCABBCBCCAAACDCEAEBBADEEDDABEEC',
'FBFFFECDDFBDBFECBEBECBCEDBDDFCEBBCEDEEFDBBCFEFDECCDCFBFFFECDDFBDBFECBEBECBCEDBDDFCEBBCEDEEFDBBCFEFDECCDC')
for i,key in enumerate(keys):
dec = PolybiusSquare(*key).decipher(ciphertext[i])
self.assertEqual(dec.upper(), plaintext[i].upper())
if __name__ == '__main__':
unittest.main()
| jameslyons/pycipher | tests/test_polybius.py | Python | mit | 1,597 |
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import optparse
import unittest
TESTDIR = os.path.dirname(os.path.abspath(__file__))
SRCDIR = os.path.abspath(os.path.join(TESTDIR, os.path.pardir))
sys.path.insert(0, SRCDIR)
from arachne.error import EmptyQueue
from arachne.result import CrawlResult, ResultQueue
from arachne.task import CrawlTask
from arachne.url import URL
class TestResultQueue(unittest.TestCase):
def setUp(self):
self._db_home = os.path.join(TESTDIR, 'testresultqueue')
os.mkdir(self._db_home)
self._sites_info = {
'a78e6853355ad5cdc751ad678d15339382f9ed21':
{'url': URL('ftp://atlantis.uh.cu/')},
'7e019d6f671d336a0cc31f137ba034efb13fc327':
{'url': URL('ftp://andromeda.uh.cu/')},
'aa958756e769188be9f76fbdb291fe1b2ddd4777':
{'url': URL('ftp://deltha.uh.cu/')},
'd4af25db08f5fb6e768db027d51b207cd1a7f5d0':
{'url': URL('ftp://anduin.uh.cu/')},
'886b46f54bcd45d4dd5732e290c60e9639b0d101':
{'url': URL('ftp://tigris.uh.cu/')},
'ee5b017839d97507bf059ec91f1e5644a30b2fa6':
{'url': URL('ftp://lara.uh.cu/')},
'341938200f949daa356e0b62f747580247609f5a':
{'url': URL('ftp://nimbo.uh.cu/')},
'd64f2fc98d015a43da3be34668341e3ee6f79133':
{'url': URL('ftp://liverpool.reduh.uh.cu/')},
'0d3465f2b9fd5cf55748797c590ea621e3017a29':
{'url': URL('ftp://london.reduh.uh.cu/')},
'c5bcce5953866b673054f8927648d634a7237a9b':
{'url': URL('ftp://bristol.reduh.uh.cu/')},
}
self._results = []
self._results_per_site = 10
for site_id, info in self._sites_info.iteritems():
for name in (str(n) for n in xrange(self._results_per_site)):
task = CrawlTask(site_id, info['url'].join(name))
self._results.append(CrawlResult(task, True))
self._queue = ResultQueue(self._sites_info, self._db_home)
def test_length(self):
self.assertEquals(len(self._queue), 0)
for i, result in enumerate(self._results):
self._queue.put(result)
self.assertEquals(len(self._queue), i + 1)
num_results = len(self._results)
for i in xrange(num_results):
result = self._queue.get()
self._queue.report_done(result)
self.assertEquals(len(self._queue), num_results - i - 1)
def test_populate(self):
self.assertRaises(EmptyQueue, self._queue.get)
self._populate_queue()
for result in self._results:
returned = self._queue.get()
self.assertEquals(str(returned.task.url), str(result.task.url))
self._queue.report_done(result)
self.assertRaises(EmptyQueue, self._queue.get)
def test_persistence(self):
self._populate_queue()
for i, result in enumerate(self._results):
if i % (self._results_per_site / 2) == 0:
# When a few results have been removed close the database to
# write all the results to disk and open it again.
self._queue.close()
self._queue = ResultQueue(self._sites_info, self._db_home)
returned = self._queue.get()
self.assertEquals(str(returned.task.url), str(result.task.url))
self._queue.report_done(returned)
def test_remove_site(self):
self._populate_queue()
self._queue.close()
# Remove a site. It should not return results from this site but it
# should keep the order of the other results in the queue.
del self._sites_info[self._sites_info.keys()[0]]
self._queue = ResultQueue(self._sites_info, self._db_home)
for result in self._results:
if result.task.site_id in self._sites_info:
returned = self._queue.get()
self.assertEquals(str(returned.task.url), str(result.task.url))
self._queue.report_done(returned)
self.assertEquals(len(self._queue), 0)
def test_report_done(self):
self._populate_queue()
self._clear_queue(remain=1)
result = self._queue.get()
self._queue.report_done(result)
self.assertEquals(len(self._queue), 0)
def test_report_error_one_result(self):
self._populate_queue()
self._clear_queue(remain=1)
result = self._queue.get()
self._queue.report_error(result)
returned = self._queue.get()
self.assertEquals(str(result.task.url), str(returned.task.url))
self._queue.report_done(returned)
def test_report_error_two_results(self):
self._populate_queue()
self._clear_queue(remain=2)
result = self._queue.get()
self._queue.report_error(result)
returned = self._queue.get()
self.assertTrue(str(result.task.url) != str(returned.task.url))
self._queue.report_done(returned)
returned = self._queue.get()
self.assertEquals(str(result.task.url), str(returned.task.url))
self._queue.report_done(returned)
def _clear_queue(self, remain=0):
# Remove results from the queue until the specified number of results
# (default 0) remains in the queue.
for i in xrange(len(self._queue) - remain):
self._queue.report_done(self._queue.get())
self.assertEquals(len(self._queue), remain)
def _populate_queue(self):
for result in self._results:
self._queue.put(result)
def tearDown(self):
if os.path.isdir(self._db_home):
self._queue.close()
shutil.rmtree(self._db_home)
def main():
parser = optparse.OptionParser()
parser.add_option('-v', dest='verbosity', default='2',
type='choice', choices=['0', '1', '2'],
help='verbosity level: 0 = minimal, 1 = normal, 2 = all')
options = parser.parse_args()[0]
module = os.path.basename(__file__)[:-3]
suite = unittest.TestLoader().loadTestsFromName(module)
runner = unittest.TextTestRunner(verbosity=int(options.verbosity))
result = runner.run(suite)
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
main()
| yasserglez/arachne | tests/testresultqueue.py | Python | gpl-3.0 | 6,369 |
#!/usr/local/bin/python3
# Python 3
class Country:
index = {'cname':0,'population':1,'capital':2,'citypop':3,'continent':4,
'ind_date':5,'currency':6,'religion':7,'language':8}
def __init__(self, row):
self.__attr = row.split(',')
# Added to support + and -
self.__attr[Country.index['population']] = \
int(self.__attr[Country.index['population']])
def __str__(self):
return "{:<10} {:<10} {:>010}".format(self.cname, self.capital, self.population)
def __add__(self,amount):
self.__attr[Country.index['population']] += amount
return self
def __sub__(self,amount):
self.__attr[Country.index['population']] -= amount
return self
def __eq__(self, key):
return (key == self.cname)
# TODO: implement an attribute get function
def __getattr__(self, attr):
if attr in Country.index:
return self.__attr[Country.index[attr]]
else:
raise(AttributeError)
# TODO: implement an attribute delete function
def __delattr__(self, attr):
if attr in Country.index:
if isinstance(self.__attr[Country.index[attr]], int):
self.__attr[Country.index[attr]] = 0
else:
self.__attr[Country.index[attr]] = ""
else:
raise(AttributeError)
# JSON exercise
def convert_to_std_type(self, obj):
retn = {}
for k, v in Country.index.items():
retn[k] = obj.__attr[v]
print(retn)
return retn
def dict_to_country(inp):
rlist = list(range(0, 9))
print(inp)
print(rlist)
for k, v in inp.items():
field_pos = Country.index[k]
rlist[int(field_pos)] = str(v)
return Country(','.join(rlist))
######################################################################################
if __name__ == "__main__":
belgium = Country("Belgium,10445852,Brussels,737966,Europe,1830,Euro,Catholicism,Dutch,French,German")
japan = Country("Japan,127920000,Tokyo,31139900,Orient,-660,Yen,Shinto;Buddhism,Japanese")
myanmar = Country("Myanmar,42909464,Yangon,4344100,Asia,1948,Kyat,Buddhism,Burmese")
sweden = Country("Sweden,9001774,Stockholm,1622300,Europe,1523,Swedish Krona,Lutheran,Swedish")
import json
fo = open('belgium.json', 'w')
json.dump(belgium, fo, default=belgium.convert_to_std_type)
fo.close()
fi = open('belgium.json', 'r')
obj = json.load(fi, object_hook=dict_to_country)
print(obj) | rbprogrammer/advanced_python_topics | course-material/py3/solutions/07 XML Processing/country.py | Python | apache-2.0 | 2,626 |
""" Solves a MMS problem with smooth control """
from firedrake import *
from firedrake_adjoint import *
import pytest
try:
from petsc4py import PETSc
except ImportError:
pass
def solve_pde(u, V, m):
v = TestFunction(V)
F = (inner(grad(u), grad(v)) - m*v)*dx
bc = DirichletBC(V, 0.0, "on_boundary")
solve(F == 0, u, bc)
@pytest.mark.skipif("petsc4py.PETSc" not in sys.modules or not hasattr(PETSc, "TAO"),
reason="PETSc bindings with TAO support unavailable")
def test_optimization_tao():
n = 100
mesh = UnitSquareMesh(n, n)
V = FunctionSpace(mesh, "CG", 1)
u = Function(V, name='State')
W = FunctionSpace(mesh, "DG", 0)
m = Function(W, name='Control')
x = SpatialCoordinate(mesh)
u_d = 1/(2*pi**2)*sin(pi*x[0])*sin(pi*x[1])
J = Functional((inner(u-u_d, u-u_d))*dx*dt[FINISH_TIME])
# Run the forward model once to create the annotation
solve_pde(u, V, m)
# Run the optimisation
rf = ReducedFunctional(J, FunctionControl(m, value=m))
problem = MinimizationProblem(rf)
opts = PETSc.Options()
opts["tao_monitor"] = None
opts["tao_view"] = None
opts["tao_nls_ksp_type"] = "gltr"
opts["tao_nls_pc_type"] = "none"
opts["tao_ntr_pc_type"] = "none"
parameters = {'method': 'nls',
'max_it': 20,
'fatol': 0.0,
'frtol': 0.0,
'gatol': 1e-9,
'grtol': 0.0
}
solver = TAOSolver(problem, parameters=parameters)
m_opt = solver.solve()
solve_pde(u, V, m_opt)
x, y = SpatialCoordinate(mesh)
# Define the analytical expressions
m_analytic = sin(pi*x)*sin(pi*y)
u_analytic = 1.0/(2*pi*pi)*sin(pi*x)*sin(pi*y)
# Compute the error
control_error = sqrt(assemble((m_analytic - m_opt)**2*dx))
state_error = sqrt(assemble((u_analytic - u)**2*dx))
assert control_error < 0.01
assert state_error < 1e-5
# Check that values are below the threshold
tao_p = solver.get_tao()
assert tao_p.gnorm < 1e-9
assert tao_p.getIterationNumber() <= 20
| live-clones/dolfin-adjoint | tests_firedrake/optimization_tao/test_optimization_tao.py | Python | lgpl-3.0 | 2,124 |
from django.core.exceptions import ValidationError as DjangoValidationError
from django.shortcuts import get_object_or_404
from rest_framework import fields, status
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.serializers import Serializer
from wagtail.core.actions.move_page import MovePageAction
from wagtail.core.models import Page
from .base import APIAction
class MovePageAPIActionSerializer(Serializer):
destination_page_id = fields.IntegerField(required=True)
position = fields.ChoiceField(
required=False,
choices=[
"left",
"right",
"first-child",
"last-child",
"first-sibling",
"last-sibling",
],
)
class MovePageAPIAction(APIAction):
serializer = MovePageAPIActionSerializer
def _action_from_data(self, instance, data):
destination_page_id = data["destination_page_id"]
target = get_object_or_404(Page, id=destination_page_id)
return MovePageAction(
page=instance,
target=target,
pos=data.get("position"),
user=self.request.user,
)
def execute(self, instance, data):
action = self._action_from_data(instance, data)
try:
action.execute()
except DjangoValidationError as e:
raise ValidationError(e.message_dict)
instance.refresh_from_db()
serializer = self.view.get_serializer(instance)
return Response(serializer.data, status=status.HTTP_200_OK)
| wagtail/wagtail | wagtail/admin/api/actions/move.py | Python | bsd-3-clause | 1,612 |
from django.conf.urls import patterns, url
from chatting.views import mainPage, addUser, userHome, joinChat, quitChat, getChat
urlpatterns = patterns('',
url(r'^main/$', mainPage.as_view(), name='home'),
url(r'^add_user/$', addUser.as_view(), name='add_user'),
url(r'^logged_user/$', userHome.as_view(), name='logged_user'),
url(r'^join_chat/$', joinChat.as_view(), name='join_chat'),
url(r'^quit_chat/$', quitChat.as_view(), name='quit_chat'),
url(r'^getChat/$', getChat, name='get_chat'),
)
| botchat/sampleGroupChat | chatting/urls.py | Python | gpl-2.0 | 518 |
# Hello, django! Please, load my template tags.
| kmike/django-mootools-behavior | mootools_behavior/models.py | Python | mit | 48 |
# coding: utf-8
#
# drums-backend a simple interactive audio sampler that plays vorbis samples
# Copyright (C) 2009 C.D. Immanuel Albrecht
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from Tix import *
import tkFileDialog
import Pmw
import sys
import os
import errno
import subprocess
import tkMessageBox
from pysqlite2 import dbapi2 as sqlite
from df_global import *
from ScrollableMultiSelectTree import *
from math import *
from biquad_calculations import *
def get_filter_list(filters):
"""get_filter_list(filters) turn filters string into a list of filter values to be passed to the backend"""
filters = filters.replace('\n',' ')
f = [x.strip().split('(') for x in filters.split(')')]
vals = []
for filter in f:
if len(filter)==2:
type = filter[0]
type = type.strip().lower()
try:
params = [float(x) for x in filter[1].split(",")]
nbr = 3
if type.endswith("pass"):
nbr = 2
if len(params) != nbr:
print "Error in filter ",type,"(",filter[1],"): Should have ", nbr, "parameters!"
elif type=="band":
a,b = band_biquad(params[0],params[1],params[2],DfGlobal()["sample-rate"])
vals += [(a,b)]
elif type=="low":
a,b = lowshelf_biquad(params[0],params[1],params[2],DfGlobal()["sample-rate"])
vals += [(a,b)]
elif type=="high":
a,b = highshelf_biquad(params[0],params[1],params[2],DfGlobal()["sample-rate"])
vals += [(a,b)]
elif type=="bandpass":
a,b = bandpass_biquad(params[0],params[1],DfGlobal()["sample-rate"])
vals += [(a,b)]
elif type=="lowpass":
a,b = bandpass_biquad(params[0],params[1],DfGlobal()["sample-rate"])
vals += [(a,b)]
elif type=="highpass":
a,b = bandpass_biquad(params[0],params[1],DfGlobal()["sample-rate"])
vals += [(a,b)]
else:
print "Error in filter ",type,"(",filter[1],"): Unknown filter type"
except ValueError, msg:
print "Error in filter ",type,"(",filter[1],"): ", msg
elif filter != ['']:
print "Error in filter list: Parenthesis mismatch: ", filter
return vals
def generate_command_dyn(channel,filters):
"""generate_command_dyn(channel,filters) generate dynamic filter command for channel from filters"""
filters = filters.replace('\n',' ')
f = [x.strip().split('(') for x in filters.split(')')]
cmd = ""
for filter in f:
if len(filter)==2:
type = filter[0]
type = type.strip().lower()
params = filter[1]
if type=="meter":
cmd += "CONSOLE:ssetdynmeter " + str(channel)+ " "+params+"\n"
elif type=="gain":
cmd += "CONSOLE:ssetdyn " + str(channel) + " "+ params+ "\n"
else:
print "Error in filter list: Unknown type: ", type
else:
print "Error in filter list: Parenthesis mismatch: ", filter
return cmd
def generate_command(type,channel,filters):
"""generate_command(type,channel,filters) generate command for channel and filters, using "sset"+type command"""
cmd = "sset"+type+" " + str(channel)
for f in filters:
a,b = f
cmd += " ( "
for x in a:
cmd += str(x) + " "
cmd += "| "
for x in b:
cmd += str(x) + " "
cmd += ")"
return cmd
def create_channel_panel(num,notebook,root):
"""create_channel_panel(num,notebook,root) creates the panel window for channel num in notebook, creating shortcut in root"""
global chan_page
num += 1;
pname = "#"+str(num)
print "UI: #",str(num)
page = notebook.insert(pname,'control')
DfGlobal()["notebook"] = notebook
if (num < 10):
shortcut = "<Control-Key-"+str(num)+">"
elif (num==10):
shortcut = "<Control-Key-0>"
if num <= 10:
root.bind(shortcut,eval("lambda x: DfGlobal()[\"notebook\"].selectpage('"+pname+"')"))
Label(page,text="Pre-dynamic filter equalizer settings:",justify=LEFT,anchor="w").pack(side=TOP)
prefr = Frame(page)
prefr.pack(fill=X,side=TOP)
pre = Text(prefr,width=60,height=6)
pre.pack(side=LEFT,fill=X,expand=True)
pre_r = Frame(prefr)
pre_r.pack(side=RIGHT)
def help_callback():
tkMessageBox.showinfo(message=\
"Enter the codes for the subsequent equalizer-filters, where\n\n\
band(freq, gain, width)\t... adjust gain to band around freq with bandwidth width\n\n\
low(freq, gain, slope)\t... adjust gain to low-shelf with cutoff freq and slope\n\n\
high(freq, gain, slope)\t... adjust gain to high-shelf with cutoff freq and slope\n\n\
bandpass(freq, width)\t... apply band-pass filter around freq with bandwidth with\n\n\
lowpass(freq, slope)\t... apply low-pass filter with cutoff freq and slope\n\n\
highpass(freq, slope)\t... apply high-pass filter with cutoff freq and slope\n\n")
def apply_callback():
filters = get_filter_list(pre.get(1.0,END))
DfGlobal()[pname+".pre"] = pre.get(1.0,END)
o = DfGlobal()["UI_O"]
o.write("CONSOLE:"+generate_command("pre",num-1,filters)+"\n")
o.flush()
Button(pre_r,text="Help",command=help_callback).pack(side=TOP,fill=X)
Button(pre_r,text="Set/Apply",command=apply_callback).pack(side=TOP,fill=X)
Label(page,text="Dynamic filter settings:",justify=LEFT,anchor="w").pack(side=TOP,pady=10)
dynfr = Frame(page)
dynfr.pack(fill=X,side=TOP)
dyn = Text(dynfr,width=60,height=6)
dyn.pack(side=LEFT,fill=X,expand=True)
dyn_r = Frame(dynfr)
dyn_r.pack(side=RIGHT)
def help_callback2():
tkMessageBox.showinfo(message=\
"Enter the following setup codes:\n\n\
meter(AVG_BLOCK_SIZE METER_DECAY)\twhere AVG_BLOCK_SIZE is the size of the blocks that are\
used to determine the current volume and METER_DECAY is the influence factor of the old meter value\n\n\
gain([METER_LEVEL GAIN ATTACK_FACTOR RELEASE_FACTOR](n times) METER_LEVEL GAIN )\t set the dynamic filter behaviour\n\n")
pass
def apply_callback3():
filters = dyn.get(1.0,END)
DfGlobal()[pname+".dyn"] = dyn.get(1.0,END)
o = DfGlobal()["UI_O"]
o.write(generate_command_dyn(num-1,filters))
o.flush()
Button(dyn_r,text="Help",command=help_callback2).pack(side=TOP,fill=X)
Button(dyn_r,text="Set/Apply",command=apply_callback3).pack(side=TOP,fill=X)
Label(page,text="Post-dynamic filter equalizer settings:",justify=LEFT,anchor="w").pack(side=TOP,pady=10)
postfr = Frame(page)
postfr.pack(fill=X,side=TOP)
post = Text(postfr,width=60,height=6)
post.pack(side=LEFT,fill=X,expand=True)
post_r = Frame(postfr)
post_r.pack(side=RIGHT)
def apply_callback2():
filters = get_filter_list(post.get(1.0,END))
DfGlobal()[pname+".post"] = post.get(1.0,END)
o = DfGlobal()["UI_O"]
o.write("CONSOLE:"+generate_command("post",num-1,filters)+"\n")
o.flush()
Button(post_r,text="Help",command=help_callback).pack(side=TOP,fill=X)
Button(post_r,text="Set/Apply",command=apply_callback2).pack(side=TOP,fill=X)
Label(page,text="Master volume gain factor:",justify=LEFT,anchor="w").pack(side=TOP,pady=10)
fr = Frame(page)
fr.pack(fill=X,side=TOP)
DfGlobal()[pname+".volume"] = StringVar()
DfGlobal()[pname+".volume"].set("1.0")
entry = Entry(fr,width=20,justify=RIGHT,textvariable=DfGlobal()[pname+".volume"])
entry.pack(side=RIGHT)
scale = Scale(fr,from_=-10.0,to=log(10.0),resolution=0.025,showvalue=0,orient=HORIZONTAL)
scale.pack(side=LEFT,fill=X,expand=True)
scale.set(log(1.0))
DfGlobal()[pname+".volume.slider"] = log(1.0)
def callback_fn(*args):
value = DfGlobal()[pname+".volume"].get()
o = DfGlobal()["UI_O"]
print "SCALE CALLBACK"
try:
newval = float(value)
cmd = "svol " + str(num-1) + " " + str(newval)
o.write("CONSOLE:"+cmd+"\n")
o.flush()
if newval > 0:
if DfGlobal()[pname+".volume.slider"] != log(newval):
scale.set(log(newval))
else:
if DfGlobal()[pname+".volume.slider"] != log(-10.0):
scale.set(-10.0)
except ValueError:
pass
def scale_callback(event):
newslider = float(scale.get())
oldslider = float(DfGlobal()[pname+".volume.slider"])
if (newslider != oldslider):
DfGlobal()[pname+".volume.slider"] = newslider
entry.delete(0,END)
if newslider <= -10.0:
entry.insert(0,str(0.0))
else:
entry.insert(0,str(exp(newslider)))
callback_fn(0)
DfGlobal()[pname+".volume"].trace("w",callback_fn)
scale.config(command=scale_callback)
Label(page,text="Input database:",justify=LEFT,anchor="w").pack(side=TOP,pady=10)
fr = Frame(page)
fr.pack(fill=X,side=TOP)
def load_db():
in_db = sqlite.connect(DfGlobal()["in_db_name"])
cur = in_db.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS channel_"+str(num-1)+"_filter_settings ( name TEXT, value TEXT ) ")
settings = cur.execute("SELECT * FROM channel_"+str(num-1)+"_filter_settings")
for s in settings:
if s[0] == "post":
post.delete(1.0, END)
post.insert(END, s[1])
elif s[0] == "pre":
pre.delete(1.0, END)
pre.insert(END, s[1])
elif s[0] == "dyn":
dyn.delete(1.0, END)
dyn.insert(END, s[1])
elif s[0] == "master":
DfGlobal()[pname+".volume"].set(s[1])
apply_callback()
apply_callback2()
apply_callback3()
def clear_db():
in_db = sqlite.connect(DfGlobal()["in_db_name"])
cur = in_db.cursor()
cur.execute("DROP TABLE IF EXISTS channel_"+str(num-1)+"_filter_settings")
in_db.commit()
pass
def save_db():
in_db = sqlite.connect(DfGlobal()["in_db_name"])
cur = in_db.cursor()
cur.execute("DROP TABLE IF EXISTS channel_"+str(num-1)+"_filter_settings")
cur.execute("CREATE TABLE IF NOT EXISTS channel_"+str(num-1)+"_filter_settings ( name TEXT, value TEXT ) ")
cur.execute("INSERT INTO channel_"+str(num-1)+"_filter_settings VALUES (?,?)", ("post",post.get(1.0,END),))
cur.execute("INSERT INTO channel_"+str(num-1)+"_filter_settings VALUES (?,?)", ("pre",pre.get(1.0,END),))
cur.execute("INSERT INTO channel_"+str(num-1)+"_filter_settings VALUES (?,?)", ("dyn",dyn.get(1.0,END),))
cur.execute("INSERT INTO channel_"+str(num-1)+"_filter_settings VALUES (?,?)", ("master",DfGlobal()[pname+".volume"].get()))
in_db.commit()
pass
Button(fr,text="Load database settings (Reset current)",command=load_db).pack(side=TOP,fill=X)
Button(fr,text="Clear database settings",command=clear_db).pack(side=TOP,fill=X)
Button(fr,text="Save settings to database",command=save_db).pack(side=TOP,fill=X)
load_db()
| immo/pyTOM | df/df_ui_channels.py | Python | gpl-3.0 | 11,639 |
from dotmailer.templates import Template
def test_get_all(connection):
"""
:param connection:
:return:
"""
templates = Template.get_all()
for template in templates:
assert template.id is not None
| Mr-F/dotmailer | tests/templates/test_get_all.py | Python | mit | 231 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Jocelyn Jaubert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import hashlib
from PIL import Image
from weboob.tools.log import getLogger
class TileError(Exception):
def __init__(self, msg, tile=None):
Exception.__init__(self, msg)
self.tile = tile
class Captcha(object):
def __init__(self, file, infos):
self.inim = Image.open(file)
self.infos = infos
self.nbr = int(infos["nbrows"])
self.nbc = int(infos["nbcols"])
(self.nx, self.ny) = self.inim.size
self.inmat = self.inim.load()
self.map = {}
self.tiles = [[Tile(y * self.nbc + x) for y in xrange(4)] for x in xrange(4)]
def __getitem__(self, coords):
x, y = coords
return self.inmat[x % self.nx, y % self.ny]
def all_coords(self):
for y in xrange(self.ny):
for x in xrange(self.nx):
yield x, y
def get_codes(self, code):
s = ''
num = 0
for c in code:
index = self.map[int(c)].id
keycode = str(self.infos["grid"][num * self.nbr * self.nbc + index])
s += keycode
if num < 5:
s += ','
num += 1
return s
def build_tiles(self):
for ty in xrange(0, self.nbc):
y = ty * 23
for tx in xrange(0, self.nbr):
x = tx * 24
tile = self.tiles[tx][ty]
for yy in xrange(y, y + 23):
for xx in xrange(x, x + 24):
tile.map.append(self[xx, yy])
num = tile.get_num()
if num > -1:
tile.valid = True
self.map[num] = tile
class Tile(object):
hash = {'ff1441b2c5f90703ef04e688e399aca5': 1,
'53d7f3dfd64f54723b231fc398b6be57': 2,
'5bcba7fa2107ba9a606e8d0131c162eb': 3,
'9db6e7ed063e5f9a69ab831e6cc0d721': 4,
'30ebb75bfa5077f41ccfb72e8c9cc15b': 5,
'61e27275e494038e524bc9fbbd0be130': 6,
'0e0816f1b743f320ca561f090df0fbb1': 7,
'11e7d4a6d447e66a5a112c1d9f7fc442': 8,
'2ea3c82768030d91571d360acf7a0f75': 9,
'28a834ebbf0238b46d3fffae1a0b781b': 0,
'04211db029ce488e07010f618a589c71': -1,
'9a1bdf493d4067e98d3f364586c81e9d': 1,
'932032493860463bb4a3df7c99a900ad': 2,
'59cd90f1fa0b416ecdb440bc16d0b8e7': 3,
'53fe822c5efebe5f6fdef0f272c29638': 4,
'2082a9c830c0c7c9c22e9c809c6cadf7': 5,
'7f24aa97f0037bddcf2a4c8c2dbf5948': 6,
'725b6f11f44ecc2e9f6e79e86e3a82a5': 7,
'61d57da23894b96fab11f7b83c055bba': 8,
'18f6290c1cfaecadc5992e7ef6047a49': 9,
'1ce77709ec1d7475685d7b50d6f1c89e': 0,
'6718858a509fff4b86604f3096cf65e1': -1,
}
def __init__(self, _id):
self.id = _id
self.valid = False
self.logger = getLogger('societegenerale.captcha')
self.map = []
def __repr__(self):
return "<Tile(%02d) valid=%s>" % (self.id, self.valid)
def checksum(self):
s = ''
for pxls in self.map:
for pxl in pxls:
s += '%02d' % pxl
return hashlib.md5(s).hexdigest()
def get_num(self):
sum = self.checksum()
try:
return self.hash[sum]
except KeyError:
self.display()
raise TileError('Tile not found ' + sum, self)
def display(self):
self.logger.debug(self.checksum())
#im = Image.new('RGB', (24, 23))
#im.putdata(self.map)
#im.save('/tmp/%s.png' % self.checksum())
| sputnick-dev/weboob | modules/societegenerale/captcha.py | Python | agpl-3.0 | 4,384 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ownmusicweb.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| Lightshadow244/OwnMusicWeb | ownmusicweb/manage.py | Python | apache-2.0 | 809 |
__author__ = 'kaef'
import uinput
import time
from pymouse import PyMouse
class Clicker:
def __init__(self):
# self.device = uinput.Device([
# uinput.ABS_X + (0, 1920, 0, 0),
# uinput.ABS_Y + (0, 1080, 0, 0),
# # uinput.REL_X,
# # uinput.REL_Y,
# uinput.BTN_LEFT,
# uinput.BTN_RIGHT])
self.m = PyMouse()
def set_next_button_pos(self, x, y):
self.next_button_pos = [x, y]
def set_size_button_pos(self, x, y):
self.size_button_pos = [x, y]
def set_blank_page_pos(self, x, y):
self.blank_page_pos = [x, y]
def set_vertical_resize_button_pos(self, x, y):
self.vertical_resize_button_pos = [x, y]
def click(self, pos):
print pos
self.m.click(pos[0], pos[1], 1)
time.sleep(1)
def click_next_button(self):
print "NB ",
self.click(self.next_button_pos)
def click_size_button(self):
print "SB ",
self.click(self.size_button_pos)
def click_blank_page(self):
print "BP ",
self.click(self.blank_page_pos)
def click_vertical_resize_button_page(self):
print "VRB ",
self.click(self.vertical_resize_button_pos)
def switch_page(self):
self.click_blank_page()
self.click_next_button()
self.click_size_button()
self.click_vertical_resize_button_page()
self.click_blank_page()
if __name__ == "__main__":
i = 3
c = Clicker()
c.set_blank_page_pos(1698, 417)
c.set_next_button_pos(1902, 641)
c.set_size_button_pos(1898, 177)
c.set_vertical_resize_button_pos(1647, 184)
c.switch_page()
| bkolada/ibuk_scraper | Tool/Clicker.py | Python | gpl-3.0 | 1,699 |
from copy import copy
from decimal import Decimal
from typing import TYPE_CHECKING, Any, List, Optional, Union
from django.db.models import QuerySet
from django_countries.fields import Country
from prices import Money, MoneyRange, TaxedMoney, TaxedMoneyRange
from . import ConfigurationTypeField
from .models import PluginConfiguration
if TYPE_CHECKING:
# flake8: noqa
from ..core.taxes import TaxType
from ..checkout.models import Checkout, CheckoutLine
from ..discount import DiscountInfo
from ..product.models import Product, ProductType
from ..account.models import Address, User
from ..order.models import OrderLine, Order
from ..payment.interface import GatewayResponse, PaymentData, CustomerSource
class BasePlugin:
"""Abstract class for storing all methods available for any plugin.
All methods take previous_value parameter.
previous_value contains a value calculated by the previous plugin in the queue.
If the plugin is first, it will use default value calculated by the manager.
"""
PLUGIN_NAME = ""
CONFIG_STRUCTURE = None
def __init__(self, *args, **kwargs):
self._cached_config = None
self.active = None
def __str__(self):
return self.PLUGIN_NAME
def _initialize_plugin_configuration(self):
"""Initialize plugin by fetching configuration from internal cache or DB."""
plugin_config_qs = PluginConfiguration.objects.filter(name=self.PLUGIN_NAME)
plugin_config = self._cached_config or plugin_config_qs.first()
if plugin_config:
self._cached_config = plugin_config
self.active = plugin_config.active
def change_user_address(
self,
address: "Address",
address_type: Optional[str],
user: Optional["User"],
previous_value: "Address",
) -> "Address":
return NotImplemented
def calculate_checkout_total(
self,
checkout: "Checkout",
discounts: List["DiscountInfo"],
previous_value: TaxedMoney,
) -> TaxedMoney:
"""Calculate the total for checkout.
Overwrite this method if you need to apply specific logic for the calculation
of a checkout total. Return TaxedMoney.
"""
return NotImplemented
def calculate_checkout_subtotal(
self,
checkout: "Checkout",
discounts: List["DiscountInfo"],
previous_value: TaxedMoney,
) -> TaxedMoney:
"""Calculate the subtotal for checkout.
Overwrite this method if you need to apply specific logic for the calculation
of a checkout subtotal. Return TaxedMoney.
"""
return NotImplemented
def calculate_checkout_shipping(
self,
checkout: "Checkout",
discounts: List["DiscountInfo"],
previous_value: TaxedMoney,
) -> TaxedMoney:
"""Calculate the shipping costs for checkout.
Overwrite this method if you need to apply specific logic for the calculation
of shipping costs. Return TaxedMoney.
"""
return NotImplemented
def calculate_order_shipping(
self, order: "Order", previous_value: TaxedMoney
) -> TaxedMoney:
"""Calculate the shipping costs for the order.
Update shipping costs in the order in case of changes in shipping address or
changes in draft order. Return TaxedMoney.
"""
return NotImplemented
def calculate_checkout_line_total(
self,
checkout_line: "CheckoutLine",
discounts: List["DiscountInfo"],
previous_value: TaxedMoney,
) -> TaxedMoney:
"""Calculate checkout line total.
Overwrite this method if you need to apply specific logic for the calculation
of a checkout line total. Return TaxedMoney.
"""
return NotImplemented
def calculate_order_line_unit(
self, order_line: "OrderLine", previous_value: TaxedMoney
) -> TaxedMoney:
"""Calculate order line unit price.
Update order line unit price in the order in case of changes in draft order.
Return TaxedMoney.
Overwrite this method if you need to apply specific logic for the calculation
of an order line unit price.
"""
return NotImplemented
def get_tax_rate_type_choices(
self, previous_value: List["TaxType"]
) -> List["TaxType"]:
"""Return list of all tax categories.
The returned list will be used to provide staff users with the possibility to
assign tax categories to a product. It can be used by tax plugins to properly
calculate taxes for products.
Overwrite this method in case your plugin provides a list of tax categories.
"""
return NotImplemented
def show_taxes_on_storefront(self, previous_value: bool) -> bool:
"""Define if storefront should add info about taxes to the price.
It is used only by the old storefront. The returned value determines if
storefront should append info to the price about "including/excluding X% VAT".
"""
return NotImplemented
def apply_taxes_to_shipping_price_range(
self, prices: MoneyRange, country: Country, previous_value: TaxedMoneyRange
) -> TaxedMoneyRange:
"""Provide the estimation of shipping costs based on country.
It is used only by the old storefront in the cart view.
"""
return NotImplemented
def apply_taxes_to_shipping(
self, price: Money, shipping_address: "Address", previous_value: TaxedMoney
) -> TaxedMoney:
"""Apply taxes to the shipping costs based on the shipping address.
Overwrite this method if you want to show available shipping methods with
taxes.
"""
return NotImplemented
def apply_taxes_to_product(
self,
product: "Product",
price: Money,
country: Country,
previous_value: TaxedMoney,
) -> TaxedMoney:
"""Apply taxes to the product price based on the customer country.
Overwrite this method if you want to show products with taxes.
"""
return NotImplemented
def preprocess_order_creation(
self, checkout: "Checkout", discounts: List["DiscountInfo"], previous_value: Any
):
"""Trigger directly before order creation.
Overwrite this method if you need to trigger specific logic before an order is
created.
"""
return NotImplemented
def order_created(self, order: "Order", previous_value: Any):
"""Trigger when order is created.
Overwrite this method if you need to trigger specific logic after an order is
created.
"""
return NotImplemented
def assign_tax_code_to_object_meta(
self, obj: Union["Product", "ProductType"], tax_code: str, previous_value: Any
):
"""Assign tax code dedicated to plugin."""
return NotImplemented
def get_tax_code_from_object_meta(
self, obj: Union["Product", "ProductType"], previous_value: "TaxType"
) -> "TaxType":
"""Return tax code from object meta."""
return NotImplemented
def get_tax_rate_percentage_value(
self, obj: Union["Product", "ProductType"], country: Country, previous_value
) -> Decimal:
"""Return tax rate percentage value for a given tax rate type in a country.
It is used only by the old storefront.
"""
return NotImplemented
def customer_created(self, customer: "User", previous_value: Any) -> Any:
"""Trigger when user is created.
Overwrite this method if you need to trigger specific logic after a user is
created.
"""
return NotImplemented
def product_created(self, product: "Product", previous_value: Any) -> Any:
"""Trigger when product is created.
Overwrite this method if you need to trigger specific logic after a product is
created.
"""
return NotImplemented
def order_fully_paid(self, order: "Order", previous_value: Any) -> Any:
"""Trigger when order is fully paid.
Overwrite this method if you need to trigger specific logic when an order is
fully paid.
"""
return NotImplemented
def order_updated(self, order: "Order", previous_value: Any) -> Any:
"""Trigger when order is updated.
Overwrite this method if you need to trigger specific logic when an order is
changed.
"""
return NotImplemented
def order_cancelled(self, order: "Order", previous_value: Any) -> Any:
"""Trigger when order is cancelled.
Overwrite this method if you need to trigger specific logic when an order is
canceled.
"""
return NotImplemented
def order_fulfilled(self, order: "Order", previous_value: Any) -> Any:
"""Trigger when order is fulfilled.
Overwrite this method if you need to trigger specific logic when an order is
fulfilled.
"""
return NotImplemented
def authorize_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return NotImplemented
def capture_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return NotImplemented
def refund_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return NotImplemented
def confirm_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return NotImplemented
def process_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return NotImplemented
def list_payment_sources(
self, customer_id: str, previous_value
) -> List["CustomerSource"]:
return NotImplemented
def get_client_token(self, token_config, previous_value):
return NotImplemented
def get_payment_config(self, previous_value):
return NotImplemented
@classmethod
def _update_config_items(
cls, configuration_to_update: List[dict], current_config: List[dict]
):
config_structure = (
cls.CONFIG_STRUCTURE if cls.CONFIG_STRUCTURE is not None else {}
)
for config_item in current_config:
for config_item_to_update in configuration_to_update:
config_item_name = config_item_to_update.get("name")
if config_item["name"] == config_item_name:
new_value = config_item_to_update.get("value")
item_type = config_structure.get(config_item_name, {}).get("type")
if item_type == ConfigurationTypeField.BOOLEAN and not isinstance(
new_value, bool
):
new_value = new_value.lower() == "true"
config_item.update([("value", new_value)])
@classmethod
def validate_plugin_configuration(cls, plugin_configuration: "PluginConfiguration"):
"""Validate if provided configuration is correct.
Raise django.core.exceptions.ValidationError otherwise.
"""
return
@classmethod
def save_plugin_configuration(
cls, plugin_configuration: "PluginConfiguration", cleaned_data
):
current_config = plugin_configuration.configuration
configuration_to_update = cleaned_data.get("configuration")
if configuration_to_update:
cls._update_config_items(configuration_to_update, current_config)
if "active" in cleaned_data:
plugin_configuration.active = cleaned_data["active"]
cls.validate_plugin_configuration(plugin_configuration)
plugin_configuration.save()
if plugin_configuration.configuration:
# Let's add a translated descriptions and labels
cls._append_config_structure(plugin_configuration.configuration)
return plugin_configuration
@classmethod
def _get_default_configuration(cls):
"""Return default configuration for plugin.
Each configurable plugin has to provide the default structure of the
configuration. If plugin configuration is not found in DB, the manager will use
the config structure to create a new one in DB.
"""
defaults = None
return defaults
@classmethod
def _append_config_structure(cls, configuration):
"""Append configuration structure to config from the database.
Database stores "key: value" pairs, the definition of fields should be declared
inside of the plugin. Based on this, the plugin will generate a structure of
configuration with current values and provide access to it via API.
"""
config_structure = getattr(cls, "CONFIG_STRUCTURE") or {}
for configuration_field in configuration:
structure_to_add = config_structure.get(configuration_field.get("name"))
if structure_to_add:
configuration_field.update(structure_to_add)
@classmethod
def _update_configuration_structure(cls, configuration):
config_structure = getattr(cls, "CONFIG_STRUCTURE") or {}
desired_config_keys = set(config_structure.keys())
config = configuration.configuration or []
configured_keys = set(d["name"] for d in config)
missing_keys = desired_config_keys - configured_keys
if not missing_keys:
return
default_config = cls._get_default_configuration()
if not default_config:
return
update_values = [
copy(k)
for k in default_config["configuration"]
if k["name"] in missing_keys
]
config.extend(update_values)
configuration.configuration = config
configuration.save(update_fields=["configuration"])
@classmethod
def get_plugin_configuration(
cls, queryset: QuerySet = None
) -> "PluginConfiguration":
if not queryset:
queryset = PluginConfiguration.objects.all()
defaults = cls._get_default_configuration()
configuration = queryset.get_or_create(name=cls.PLUGIN_NAME, defaults=defaults)[
0
]
cls._update_configuration_structure(configuration)
if configuration.configuration:
# Let's add a translated descriptions and labels
cls._append_config_structure(configuration.configuration)
return configuration
| maferelo/saleor | saleor/extensions/base_plugin.py | Python | bsd-3-clause | 14,742 |
# import asyncio
# import pytest
# from ai.backend.agent.server import (
# AgentRPCServer,
# )
# TODO: rewrite
'''
@pytest.fixture
async def agent(request, tmpdir, event_loop):
config = argparse.Namespace()
config.namespace = os.environ.get('BACKEND_NAMESPACE', 'testing')
config.agent_host = '127.0.0.1'
config.agent_port = 6001 # default 6001
config.stat_port = 6002
config.kernel_host_override = '127.0.0.1'
etcd_addr = os.environ.get('BACKEND_ETCD_ADDR', '127.0.0.1:2379')
redis_addr = os.environ.get('BACKEND_REDIS_ADDR', '127.0.0.1:6379')
config.etcd_addr = host_port_pair(etcd_addr)
config.redis_addr = host_port_pair(redis_addr)
config.event_addr = '127.0.0.1:5000' # dummy value
config.docker_registry = 'lablup'
config.debug = True
config.debug_kernel = None
config.kernel_aliases = None
config.scratch_root = Path(tmpdir)
config.limit_cpus = None
config.limit_gpus = None
config.debug_kernel = None
config.debug_hook = None
config.debug_jail = None
config.debug_skip_container_deletion = False
agent = None
config.instance_id = await identity.get_instance_id()
config.inst_type = await identity.get_instance_type()
config.region = await identity.get_instance_region()
print(f'serving test agent: {config.instance_id} ({config.inst_type}),'
f' ip: {config.agent_host}')
agent = AgentRPCServer(config, loop=event_loop)
await agent.init(skip_detect_manager=True)
await asyncio.sleep(0)
yield agent
print('shutting down test agent...')
if agent:
await agent.shutdown()
await asyncio.sleep(3)
@pytest.mark.asyncio
async def test_get_extra_volumes(docker):
# No extra volumes
mnt_list = await get_extra_volumes(docker, 'python:latest')
assert len(mnt_list) == 0
# Create fake deeplearning sample volume and check it will be returned
vol = None
try:
config = {'Name': 'deeplearning-samples'}
vol = await docker.volumes.create(config)
mnt_list = await get_extra_volumes(docker, 'python-tensorflow:latest')
finally:
if vol:
await vol.delete()
assert len(mnt_list) == 1
assert mnt_list[0].name == 'deeplearning-samples'
@pytest.mark.asyncio
async def test_get_kernel_id_from_container(docker, container):
container_list = await docker.containers.list()
kid = await get_kernel_id_from_container(container_list[0])
assert kid == 'test-container' # defined as in the fixture
@pytest.fixture
async def kernel_info(agent, docker):
kernel_id = str(uuid.uuid4())
config = {
'lang': 'lua:5.3-alpine',
'limits': {'cpu_slot': 1, 'gpu_slot': 0, 'mem_slot': 1, 'tpu_slot': 0},
'mounts': [],
'environ': {},
}
kernel_info = await agent.create_kernel(kernel_id, config)
try:
yield kernel_info
finally:
if kernel_info['id'] in agent.container_registry:
# Container id may be changed (e.g. restarting kernel), so we
# should not rely on the initial value of the container_id.
container_info = agent.container_registry[kernel_info['id']]
container_id = container_info['container_id']
else:
# If fallback to initial container_id if kernel is deleted.
container_id = kernel_info['container_id']
try:
container = docker.containers.container(container_id)
cinfo = await container.show() if container else None
except aiodocker.exceptions.DockerError:
cinfo = None
if cinfo and cinfo['State']['Status'] != 'removing':
await container.delete(force=True)
@pytest.mark.integration
def test_ping(agent):
ret = agent.ping('ping~')
assert ret == 'ping~'
@pytest.mark.integration
@pytest.mark.asyncio
async def test_scan_running_containers(agent, kernel_info, docker):
agent.container_registry.clear()
assert kernel_info['id'] not in agent.container_registry
await agent.scan_running_containers()
assert agent.container_registry[kernel_info['id']]
@pytest.mark.integration
@pytest.mark.asyncio
async def test_create_kernel(agent, docker):
kernel_id = str(uuid.uuid4())
config = {
'lang': 'lablup/lua:5.3-alpine',
'limits': {'cpu_slot': 1, 'gpu_slot': 0, 'mem_slot': 1, 'tpu_slot': 0},
'mounts': [],
'environ': {},
}
kernel_info = container_info = None
try:
kernel_info = await agent.create_kernel(kernel_id, config)
container_info = agent.container_registry[kernel_id]
finally:
container = docker.containers.container(kernel_info['container_id'])
await container.delete(force=True)
assert kernel_info
assert container_info
assert kernel_info['id'] == kernel_id
# TODO: rewrite using resource_spec:
# assert len(kernel_info['cpu_set']) == 1
assert container_info['lang'] == config['lang']
assert container_info['container_id'] == kernel_info['container_id']
# TODO: rewrite using resource_spec:
# assert container_info['limits'] == config['limits']
# assert container_info['mounts'] == config['mounts']
@pytest.mark.integration
@pytest.mark.asyncio
async def test_destroy_kernel(agent, kernel_info):
stat = await agent.destroy_kernel(kernel_info['id'])
assert stat
assert 'cpu_used' in stat
assert 'mem_max_bytes' in stat
assert 'mem_cur_bytes' in stat
assert 'net_rx_bytes' in stat
assert 'net_tx_bytes' in stat
assert 'io_read_bytes' in stat
assert 'io_write_bytes' in stat
assert 'io_max_scratch_size' in stat
assert 'io_cur_scratch_size' in stat
@pytest.mark.integration
@pytest.mark.asyncio
async def test_restart_kernel(agent, kernel_info):
kernel_id = kernel_info['id']
container_id = kernel_info['container_id']
new_config = {
'lang': 'lablup/lua:5.3-alpine',
'limits': {'cpu_slot': 1, 'gpu_slot': 0, 'mem_slot': 1, 'tpu_slot': 0},
'mounts': [],
}
ret = await agent.restart_kernel(kernel_id, new_config)
assert container_id != ret['container_id']
@pytest.mark.integration
@pytest.mark.asyncio
async def test_restart_kernel_cancel_code_execution(
agent, kernel_info, event_loop):
async def execute_code():
nonlocal kernel_info
api_ver = 2
kid = kernel_info['id']
runid = 'test-run-id'
mode = 'query'
code = ('local clock = os.clock\n'
'function sleep(n)\n'
' local t0 = clock()\n'
' while clock() - t0 <= n do end\n'
'end\n'
'sleep(10)\nprint("code executed")')
while True:
ret = await agent.execute(api_ver, kid, runid, mode, code, {})
if ret is None:
break
elif ret['status'] == 'finished':
break
elif ret['status'] == 'continued':
mode = 'continue',
code = ''
else:
raise Exception('Invalid execution status')
return ret
async def restart_kernel():
nonlocal kernel_info
kernel_id = kernel_info['id']
new_config = {
'lang': 'lablup/lua:5.3-alpine',
'limits': {'cpu_slot': 1, 'gpu_slot': 0, 'mem_slot': 1, 'tpu_slot': 0},
'mounts': [],
}
await agent.restart_kernel(kernel_id, new_config)
t1 = asyncio.ensure_future(execute_code(), loop=event_loop)
start = datetime.now()
await asyncio.sleep(1)
t2 = asyncio.ensure_future(restart_kernel(), loop=event_loop)
results = await asyncio.gather(t1, t2)
end = datetime.now()
assert results[0] is None # no execution result
assert (end - start).total_seconds() < 10
@pytest.mark.integration
@pytest.mark.asyncio
async def test_execute(agent, kernel_info):
# Test with lua:5.3-alpine image only
api_ver = 2
kid = kernel_info['id']
runid = 'test-run-id'
mode = 'query'
code = 'print(17)'
while True:
ret = await agent.execute(api_ver, kid, runid, mode, code, {})
if ret['status'] == 'finished':
break
elif ret['status'] == 'continued':
mode = 'continue',
code = ''
else:
raise Exception('Invalid execution status')
assert ret['console'][0][0] == 'stdout'
assert ret['console'][0][1] == '17\n'
@pytest.mark.integration
@pytest.mark.asyncio
async def test_execute_batch_mode(agent, kernel_info):
# Test with lua:5.3-alpine image only
api_ver = 2
kid = kernel_info['id']
runid = 'test-run-id'
mode = 'batch'
code = ''
opt = {'clean': '*',
'build': '*',
'exec': '*'}
# clean_finished = False
build_finished = False
await agent.upload_file(kid, 'main.lua', b'print(17)')
while True:
ret = await agent.execute(api_ver, kid, runid, mode, code, opt)
if ret['status'] == 'finished':
# assert clean_finished and build_finished
assert build_finished
break
# elif ret['status'] == 'clean-finished':
# assert not clean_finished and not build_finished
# clean_finished = True
# mode = 'continue'
elif ret['status'] == 'build-finished':
# assert clean_finished and not build_finished
assert not build_finished
build_finished = True
mode = 'continue'
elif ret['status'] == 'continued':
mode = 'continue'
else:
raise Exception('Invalid execution status')
assert ret['console'][0][0] == 'stdout'
assert ret['console'][0][1] == '17\n'
@pytest.mark.integration
@pytest.mark.asyncio
async def test_upload_file(agent, kernel_info):
fname = 'test.txt'
await agent.upload_file(kernel_info['id'], fname, b'test content')
uploaded_to = agent.config.scratch_root / kernel_info['id'] / '.work' / fname
assert uploaded_to.exists()
@pytest.mark.integration
@pytest.mark.asyncio
async def test_reset(agent, docker):
kernel_ids = []
container_ids = []
config = {
'lang': 'lablup/lua:5.3-alpine',
'limits': {'cpu_slot': 1, 'gpu_slot': 0, 'mem_slot': 1, 'tpu_slot': 0},
'mounts': [],
}
try:
# Create two kernels
for i in range(2):
kid = str(uuid.uuid4())
kernel_ids.append(kid)
info = await agent.create_kernel(kid, config)
container_ids.append(info['container_id'])
# 2 containers are created
assert docker.containers.container(container_ids[0])
assert docker.containers.container(container_ids[1])
await agent.reset()
# Containers are destroyed
with pytest.raises(aiodocker.exceptions.DockerError):
c1 = docker.containers.container(container_ids[0])
c1info = await c1.show()
if c1info['State']['Status'] == 'removing':
raise aiodocker.exceptions.DockerError(
404, {'message': 'success'})
with pytest.raises(aiodocker.exceptions.DockerError):
c2 = docker.containers.container(container_ids[1])
c2info = await c2.show()
if c2info['State']['Status'] == 'removing':
raise aiodocker.exceptions.DockerError(
404, {'message': 'success'})
finally:
for cid in container_ids:
try:
container = docker.containers.container(cid)
cinfo = await container.show() if container else None
except aiodocker.exceptions.DockerError:
cinfo = None
if cinfo and cinfo['State']['Status'] != 'removing':
await container.delete(force=True)
'''
| lablup/sorna-agent | tests/test_server.py | Python | lgpl-3.0 | 11,911 |
# Author: Jose G Perez
# Version 1.0
# Last Modified: January 31, 2018
import pylab as plt
import numpy as np
def imshow(im, title=''):
figure = plt.figure()
plt.axis('off')
plt.tick_params(axis='both',
left='off', top='off', right='off', bottom='off',
labelleft='off', labeltop='off', labelright='off', labelbottom='off')
plt.title(title)
plt.imshow(im)
return figure
def imshow_matches__(im, title):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('PW Level')
ax.set_ylabel('S Level')
ax.set_title(title)
plt.set_cmap(plt.get_cmap('hot'))
plt.imshow(im)
def imshow_matches(im, title):
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.set_xlabel('PW Level')
# ax.set_ylabel('S Level')
# ax.set_xticks(s_label)
# ax.set_yticks(pw_label)
ax.set_title(title)
plt.set_cmap(plt.get_cmap('hot'))
plt.imshow(im)
| DeveloperJose/Vision-Rat-Brain | feature_matching_v3/util_im.py | Python | mit | 950 |
"""
Account-related URLs.
"""
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from session_csrf import anonymous_csrf
from . import views
urlpatterns = patterns(
"moztrap.view.users.views",
# auth -------------------------------------------------------------------
url(r"^login/", "login", name="auth_login"),
url(r"^logout/", "logout", name="auth_logout"),
url(r"^password/change/$", "password_change", name="auth_password_change"),
url(r"^password/reset/$", "password_reset", name="auth_password_reset"),
url(r"^reset/(?P<uidb64>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$",
"password_reset_confirm",
name="auth_password_reset_confirm"),
url(r"^set_name/$", "set_username", name="auth_set_username"),
url(r"^(?P<user_id>\d+)/apikey/$", "create_apikey", name="auth_create_apikey"),
# registration -----------------------------------------------------------
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r"^activate/(?P<activation_key>\w+)/$",
anonymous_csrf(views.ActivationView.as_view()),
name="registration_activate"),
url(r"^register/$",
anonymous_csrf(views.RegistrationView.as_view()),
name="registration_register"),
url(r"^register/closed/$",
TemplateView.as_view(template_name="users/registration_closed.html"),
name="registration_disallowed"),
)
| mozilla/moztrap | moztrap/view/users/urls.py | Python | bsd-2-clause | 1,655 |
import ctypes
import configparser
import os
import sys
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.QtPrintSupport import QPrintPreviewDialog
from PyQt5.QtPrintSupport import QPrinter
CONFIG_FILE_PATH = "notepad.ini"
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("notepad")
QtCore.QTextCodec.setCodecForLocale(QtCore.QTextCodec.codecForName("utf-8"))
class Notepad(QtWidgets.QMainWindow):
def __init__(self):
self.judgeConfig()
self.cur_file = ''
self.default_dir = ''
self.clipboard = QtWidgets.QApplication.clipboard()
self.last_search = ''
self.font_family = 'Consolas'
self.font_size = '16'
self.font_bold = 'False'
self.font_italic = 'False'
self.font_strikeOut = 'False'
self.font_underline = 'False'
self.config = configparser.ConfigParser()
self.config.read(CONFIG_FILE_PATH, 'utf-8')
super(QtWidgets.QMainWindow, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('Без названия - Блокнот')
self.setWindowIcon(QtGui.QIcon('resource/notepad.png'))
self.statusBar().showMessage('Ready')
self.createEditText()
self.createActions()
self.createMenubar()
self.createToolbar()
self.readSettings()
self.cutAction.setEnabled(False)
self.copyAction.setEnabled(False)
self.undoAction.setEnabled(False)
self.redoAction.setEnabled(False)
self.text.copyAvailable.connect(self.cutAction.setEnabled)
self.text.copyAvailable.connect(self.copyAction.setEnabled)
self.text.undoAvailable.connect(self.undoAction.setEnabled)
self.text.redoAvailable.connect(self.redoAction.setEnabled)
self.text.textChanged.connect(self.findEnable)
def findEnable(self):
if self.text.toPlainText():
self.findAction.setEnabled(True)
else:
self.findAction.setEnabled(False)
self.findNextAction.setEnabled(False)
def createEditText(self):
self.text = QtWidgets.QPlainTextEdit()
self.text.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.text.customContextMenuRequested.connect(self.showContextMenu)
self.setCentralWidget(self.text)
def showContextMenu(self):
menu = QtWidgets.QMenu(self)
menu.addAction(self.undoAction)
menu.addAction(self.redoAction)
menu.addSeparator()
menu.addAction(self.cutAction)
menu.addAction(self.copyAction)
menu.addAction(self.pasteAction)
menu.addSeparator()
menu.addAction(self.selectAllAction)
menu.exec_(QtGui.QCursor.pos())
def judgeConfig(self):
if not os.path.exists(CONFIG_FILE_PATH):
f = open(CONFIG_FILE_PATH, 'w', encoding='utf-8')
f.close()
def readSettings(self):
# регулировка размера окна
width = self.getConfig('Display', 'width', 800)
height = self.getConfig('Display', 'height ', 600)
px = self.getConfig('Display', 'x', 0)
py = self.getConfig('Display', 'y', 0)
self.move(int(px), int(py))
self.resize(int(width), (height))
self.default_dir = self.getConfig('Setting', 'dir', '')
self.font_family = self.getConfig('Font', 'family', 'Consolas')
self.font_size = self.getConfig('Font', 'size', '10')
self.font_bold = self.getConfig('Font', 'bold', '0')
self.font_italic = self.getConfig('Font', 'italic', '0')
self.font_strikeOut = self.getConfig('Font', 'strikeOut', '0')
self.font_underline = self.getConfig('Font', 'underline', '0')
font = QtGui.QFont(self.font_family, int(self.font_size))
font.setBold(int(self.font_bold))
font.setItalic(int(self.font_italic))
font.setStrikeOut(int(self.font_strikeOut))
font.setUnderline(int(self.font_underline))
self.text.setFont(font)
def writeSetting(self):
self.writeConfig('Display', 'width', str(self.size().width()))
self.writeConfig('Display', 'height', str(self.size().height()))
self.writeConfig('Display', 'x', str(self.pos().x()))
self.writeConfig('Display', 'y', str(self.pos().y()))
self.writeConfig('Setting', 'dir', self.default_dir)
self.writeConfig('Font', 'family', self.text.font().family())
self.writeConfig('Font', 'size', str(self.text.font().pointSize()))
self.writeConfig('Font', 'bold', int(self.text.font().bold()))
self.writeConfig('Font', 'italic', int(self.text.font().italic()))
self.writeConfig('Font', 'strikeOut', int(
self.text.font().strikeOut()))
self.writeConfig('Font', 'underline', int(
self.text.font().underline()))
self.config.write(open(CONFIG_FILE_PATH, 'w', encoding='utf-8'))
def createMenubar(self):
fileMenu = QtWidgets.QMenu('ФАЙЛ', self)
fileMenu.addAction(self.newAction)
fileMenu.addAction(self.openAction)
fileMenu.addAction(self.saveAction)
fileMenu.addAction(self.saveAsAction)
fileMenu.addSeparator()
fileMenu.addAction(self.printAction)
fileMenu.addAction(self.printReviewAction)
fileMenu.addSeparator()
fileMenu.addAction(self.quitAction)
editMenu = QtWidgets.QMenu('РЕДАКТИРОВАТЬ', self)
editMenu.addAction(self.undoAction)
editMenu.addAction(self.redoAction)
editMenu.addSeparator()
editMenu.addAction(self.cutAction)
editMenu.addAction(self.copyAction)
editMenu.addAction(self.pasteAction)
editMenu.addSeparator()
self.findAction.setEnabled(False)
self.findNextAction.setEnabled(False)
editMenu.addAction(self.findAction)
editMenu.addAction(self.findNextAction)
editMenu.addAction(self.replaceAction)
styleMenu = QtWidgets.QMenu('ФОРМАТ', self)
styleMenu.addAction(self.lineWrapAction)
styleMenu.addAction(self.fontAction)
helpMenu = QtWidgets.QMenu('ПОМОЩЬ', self)
helpMenu.addAction(self.aboutAction)
self.menuBar().addMenu(fileMenu)
self.menuBar().addMenu(editMenu)
self.menuBar().addMenu(styleMenu)
self.menuBar().addMenu(helpMenu)
def createToolbar(self):
toolbar = self.addToolBar('показать меню')
toolbar.addAction(self.newAction)
toolbar.addAction(self.openAction)
toolbar.addAction(self.saveAction)
toolbar.addSeparator()
toolbar.addAction(self.cutAction)
toolbar.addAction(self.copyAction)
toolbar.addAction(self.pasteAction)
def createActions(self):
self.undoAction = QtWidgets.QAction(QtGui.QIcon(
'resource/undo.png'), "ОТМЕНИТЬ", self, shortcut=QtGui.QKeySequence.Undo, statusTip="ОТМЕНИТЬ ПОСЛЕДНЕЕ ДЕЙСТВИЕ", triggered=self.text.undo)
self.redoAction = QtWidgets.QAction(QtGui.QIcon(
'resource/redo.png'), 'ВЕРНУТЬ', self, shortcut=QtGui.QKeySequence.Redo, statusTip='ВЕРНУТЬ ОТМЕНЕННОЕ ДЕЙСТВИЕ', triggered=self.text.redo)
self.cutAction = QtWidgets.QAction(QtGui.QIcon(
'resource/cut.png'), "ВЫРЕЗАТЬ", self, shortcut=QtGui.QKeySequence.Cut, statusTip="ВЫРЕЗАТЬ", triggered=self.text.cut)
self.copyAction = QtWidgets.QAction(QtGui.QIcon(
'resource/copy.png'), "КОПИРОВАТЬ", self, shortcut=QtGui.QKeySequence.Copy, statusTip="КОПИРОВАТЬ В БУФЕР ОБМЕНА", triggered=self.text.copy)
self.pasteAction = QtWidgets.QAction(QtGui.QIcon(
'resource/paste.png'), "ВСТАВИТЬ", self, shortcut=QtGui.QKeySequence.Paste, statusTip="ВСТАВИТЬ ИЗ БУФЕРА ОБМЕНА", triggered=self.text.paste)
self.selectAllAction = QtWidgets.QAction(QtGui.QIcon(
'resource/SelectAll.png'), "ВЫБРАТЬ ВСЁ", self, shortcut=QtGui.QKeySequence.SelectAll, statusTip="ВЫБРАТЬ ВСЁ", triggered=self.text.selectAll)
self.newAction = QtWidgets.QAction(QtGui.QIcon(
'resource/new.png'), 'НОВЫЙ', self, shortcut=QtGui.QKeySequence.New, statusTip='НОВЫЙ ФАЙЛ', triggered=self.newFile)
self.openAction = QtWidgets.QAction(QtGui.QIcon(
'resource/open.png'), 'ОТКРЫТЬ', self, shortcut=QtGui.QKeySequence.Open, statusTip='ОТКРЫТЬ ФАЙЛ', triggered=self.openFile)
self.saveAction = QtWidgets.QAction(QtGui.QIcon(
'resource/save.png'), 'СОХРАНИТЬ', self, shortcut=QtGui.QKeySequence.Save, statusTip='СОХРАНИТЬ ФАЙЛ', triggered=self.saveFile)
self.saveAsAction = QtWidgets.QAction(QtGui.QIcon(
'resource/save.png'), 'СОХРАНИТЬ КАК', self, shortcut=QtGui.QKeySequence.SaveAs, statusTip='СОХРАНИТЬ ФАЙЛ', triggered=self.saveAsFile)
self.quitAction = QtWidgets.QAction(QtGui.QIcon(
'resource/exit.png'), 'ВЫХОД', self, shortcut="Ctrl+Q", statusTip='ЗАКРЫТЬ ПРОГРАММУ', triggered=self.close)
self.lineWrapAction = QtWidgets.QAction(QtGui.QIcon(
'resource/check.png'), 'ПРОВЕРИТЬ', self, triggered=self.setLineWrap)
self.fontAction = QtWidgets.QAction(QtGui.QIcon(
'resource/font.png'), 'ШРИФТ', self, statusTip='ИЗМЕНЕНИЕ ШРИФТА', triggered=self.setFont)
self.aboutAction = QtWidgets.QAction(QtGui.QIcon(
'resource/about.png'), 'О ПРОГРАММЕ', self, statusTip='О ПРОГРАММЕ', triggered=self.about)
self.findAction = QtWidgets.QAction(QtGui.QIcon(
'resource/find.png'), 'НАЙТИ', self, statusTip='НАЙТИ', shortcut='Ctrl+F', triggered=self.findText)
self.findNextAction = QtWidgets.QAction(QtGui.QIcon(
'resource/find.png'), 'НАЙТИ ДАЛЕЕ', self, statusTip='НАЙТИ ДАЛЕЕ', shortcut='F3', triggered=self.searchText)
self.replaceAction = QtWidgets.QAction(QtGui.QIcon(
'resource/replace.png'), 'ЗАМЕНИТЬ', self, statusTip='ЗАМЕНИТЬ', shortcut='Ctrl+H', triggered=self.replace)
self.printAction = QtWidgets.QAction(QtGui.QIcon(
'resource/print.png'), 'ПЕЧАТЬ', self, statusTip='ПЕЧАТЬ', shortcut='Ctrl+P', triggered=self.printDocument)
self.printReviewAction = QtWidgets.QAction(QtGui.QIcon(
'resource/print.png'), 'ПРЕДВАРИТЕЛЬНЫЙ ПРОСМОТР', self, statusTip='ПРЕДВАРИТЕЛЬНЫЙ ПРОСМОТР', triggered=self.printReview)
def closeEvent(self, event):
if self.maybeSave():
self.writeSetting()
event.accept()
else:
event.ignore()
def newFile(self):
if self.maybeSave():
self.text.clear()
def openFile(self):
if self.maybeSave():
filename, _ = QtWidgets.QFileDialog.getOpenFileName(
self, '', self.default_dir, 'Текст (*.txt);;Все файлы(*.*)')
file = QtCore.QFile(filename)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
return
inf = QtCore.QTextStream(file)
self.text.setPlainText(inf.readAll())
self.setCurrentFile(filename)
def saveFile(self):
if not self.cur_file:
return self.saveAsFile()
writer = QtGui.QTextDocumentWriter(self.cur_file)
success = writer.write(self.text.document())
self.setCurrentFile(self.cur_file)
if success:
self.statusBar().showMessage('Сохранено', 1000)
return success
def saveAsFile(self):
filename, _ = QtWidgets.QFileDialog.getSaveFileName(
self, '', self.default_dir + 'Без названия', 'текст (*.txt);;Все файлы(*.*)')
if not filename:
return False
self.setCurrentFile(filename)
return self.saveFile()
def getConfig(self, section, key, default):
try:
return self.config[section][key]
except:
return default
def findText(self):
self.find_dialog = QtWidgets.QDialog(self)
self.find_dialog.setWindowTitle('Найти')
search_label = QtWidgets.QLabel('Найти:')
self.search_text = QtWidgets.QLineEdit(self.last_search)
search_label.setBuddy(self.search_text)
self.search_btn = QtWidgets.QPushButton('Найти далее')
self.search_btn.setDefault(True)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(search_label)
layout.addWidget(self.search_text)
layout.addWidget(self.search_btn)
self.search_btn.clicked.connect(self.searchText)
self.find_dialog.setLayout(layout)
self.find_dialog.show()
def searchText(self):
cursor = self.text.textCursor()
start = cursor.anchor()
text = self.search_text.text()
self.last_search = text
if self.last_search:
self.findNextAction.setEnabled(True)
text_len = len(text)
context = self.text.toPlainText()
index = context.find(text, start)
if -1 == index:
QtWidgets.QMessageBox.information(
self.find_dialog, 'Блокнот', 'Не найдено\"%s\"' % text)
else:
start = index
cursor = self.text.textCursor()
cursor.clearSelection()
cursor.movePosition(QtGui.QTextCursor.Start,
QtGui.QTextCursor.MoveAnchor)
cursor.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.MoveAnchor, start + text_len)
cursor.movePosition(QtGui.QTextCursor.Left,
QtGui.QTextCursor.KeepAnchor, text_len)
cursor.selectedText()
self.text.setTextCursor(cursor)
def replaceText(self):
cursor = self.text.textCursor()
start = cursor.anchor()
text = self.search_text.text()
text_len = len(text)
context = self.text.toPlainText()
index = context.find(text, start)
sender = self.sender()
if sender is self.replace_button:
if text == cursor.selectedText():
position = cursor.anchor()
cursor.removeSelectedText()
replace_text = self.replace_text.text()
cursor.insertText(replace_text)
self.replaceText()
return
if -1 == index:
QtWidgets.QMessageBox.information(
self.replace_dialog, 'Блокнот', 'Не найдено\"%s\"' % text)
else:
start = index
cursor = self.text.textCursor()
cursor.clearSelection()
cursor.movePosition(QtGui.QTextCursor.Start,
QtGui.QTextCursor.MoveAnchor)
cursor.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.MoveAnchor, start + text_len)
cursor.movePosition(QtGui.QTextCursor.Left,
QtGui.QTextCursor.KeepAnchor, text_len)
cursor.selectedText()
self.text.setTextCursor(cursor)
def replaceAll(self):
context = self.text.toPlainText()
search_word = self.search_text.text()
replace_word = self.replace_text.text()
new_context = context.replace(search_word, replace_word)
doc = self.text.document()
curs = QtGui.QTextCursor(doc)
curs.select(QtGui.QTextCursor.Document)
curs.insertText(new_context)
def printDocument(self):
document = self.text.document()
printer = QPrinter()
dlg = QPrintPreviewDialog(printer, self)
if dlg.exec_() != QtWidgets.QDialog.Accepted:
return
document.print_(printer)
self.statusBar().showMessage("Печать успешна", 2000)
def printReview(self):
printer = QPrinter(QPrinter.HighResolution)
review = QPrintPreviewDialog(printer, self)
review.setWindowFlags(QtCore.Qt.Window)
review.paintRequested.connect(self.print)
review.exec_()
def print(self, printer):
self.text.print_(printer)
def replace(self):
self.replace_dialog = QtWidgets.QDialog(self)
self.replace_dialog.setWindowTitle('Заменить')
search_label = QtWidgets.QLabel('Поиск текста:')
self.search_text = QtWidgets.QLineEdit()
search_label.setBuddy(self.search_text)
replace_label = QtWidgets.QLabel('Заменить на:')
self.replace_text = QtWidgets.QLineEdit()
replace_label.setBuddy(self.replace_text)
self.find_button = QtWidgets.QPushButton('Найти далее')
self.replace_button = QtWidgets.QPushButton('Заменить')
self.replace_all_button = QtWidgets.QPushButton('Заменить всё')
self.replace_button.setEnabled(False)
self.replace_all_button.setEnabled(False)
self.find_button.clicked.connect(self.replaceText)
self.replace_button.clicked.connect(self.replaceText)
self.replace_all_button.clicked.connect(self.replaceAll)
self.search_text.textChanged.connect(self.replaceEnable)
layout = QtWidgets.QGridLayout()
layout.addWidget(search_label, 0, 0)
layout.addWidget(self.search_text, 0, 1)
layout.addWidget(self.find_button, 0, 2)
layout.addWidget(replace_label, 1, 0)
layout.addWidget(self.replace_text, 1, 1)
layout.addWidget(self.replace_button, 1, 2)
layout.addWidget(self.replace_all_button, 2, 2)
self.replace_dialog.setLayout(layout)
self.replace_dialog.show()
def replaceEnable(self):
if not self.search_text.text():
self.replace_button.setEnabled(False)
self.replace_all_button.setEnabled(False)
else:
self.replace_button.setEnabled(True)
self.replace_all_button.setEnabled(True)
def maybeSave(self):
if self.text.document().isModified():
alert = QtWidgets.QMessageBox(self)
alert.setWindowTitle('Блокнот')
alert.setText('Сохранить изменения %s ?' % self.cur_file)
saveButton = alert.addButton(
'Да', QtWidgets.QMessageBox.ActionRole)
unSaveButton = alert.addButton(
'Нет', QtWidgets.QMessageBox.ActionRole)
cancelButton = alert.addButton(
'Отмена', QtWidgets.QMessageBox.ActionRole)
alert.exec_()
ret = alert.clickedButton()
if ret == saveButton:
return self.saveFile()
elif ret == unSaveButton:
return True
elif ret == cancelButton:
return False
return True
def about(self):
QtWidgets.QMessageBox.about(
self, 'О программе', r'<h2>КП по ОП</h2><p> <b>Выполнил студент</b> <br> <b>Группы P3175</b> <br>Головатый А.Д. <br>при использовании PYQT5 и Python3.4</p>')
def setLineWrap(self):
if not self.text.lineWrapMode():
self.text.setLineWrapMode(QtWidgets.QPlainTextEdit.WidgetWidth)
self.lineWrapAction.setIcon(QtGui.QIcon('resource/check.png'))
else:
self.text.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.lineWrapAction.setIcon(QtGui.QIcon(''))
def setFont(self):
font, ok = QtWidgets.QFontDialog.getFont(self.text.font(), self, 'Выбор шрифта')
if ok:
self.text.setFont(QtGui.QFont(font))
def setCurrentFile(self, filename):
self.cur_file = filename
path, _ = os.path.split(filename)
self.default_dir = path + '/'
if not filename:
self.setWindowTitle('Без названия - Блокнот')
else:
self.setWindowTitle('%s - Блокнот' % filename)
self.text.document().setModified(False)
def writeConfig(self, section, key, value):
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, key, str(value))
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
if len(sys.argv) > 1:
locale = sys.argv[1]
else:
locale = QtCore.QLocale.system().name()
notepad = Notepad()
notepad.show()
app.exec_()
| golovatyi/1Term_Project | notepad.py | Python | mit | 21,004 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Coded by: [email protected]
#
##############################################################################
import calificacion
| pato-kun/SSPP | Calificacion/__init__.py | Python | agpl-3.0 | 1,179 |
"""
@created_at 2014-07-15
@author Exequiel Fuentes <[email protected]>
@author Brian Keith <[email protected]>
"""
# Se recomienda seguir los siguientes estandares:
# 1. Para codificacion: PEP 8 - Style Guide for Python Code (http://legacy.python.org/dev/peps/pep-0008/)
# 2. Para documentacion: PEP 257 - Docstring Conventions (http://legacy.python.org/dev/peps/pep-0257/)
import os
import traceback
import sys
from lib import *
def check_version():
"""Python v2.7 es requerida por el curso, entonces verificamos la version"""
if sys.version_info[:2] != (2, 7):
raise Exception("Parece que python v2.7 no esta instalado en el sistema")
def db_path():
"""Retorna el path de las base de datos"""
pathfile = os.path.dirname(os.path.abspath(__file__))
return os.path.join(pathfile, "db")
if __name__ == "__main__":
try:
# Verificar version de python
check_version()
# Cargar los datos
my_pca_lda = FKSkLearn(os.path.join(db_path(), "datos_diabetes.npz"))
# Preparar los datos para validacion
my_pca_lda.fk_train_test_split()
# Se entrena el clasificador PCA + LDA con la dimension optima.
my_pca_lda.fk_pca_lda()
# Contruye el clasificar Bayes usando la libreria sklearn
my_pca_lda.fk_bayes_classifier()
print("**************")
print("sklearn_Bayes:")
print("Number of mislabeled points : %d" % (my_pca_lda.fk_get_y_test() != my_pca_lda.fk_get_y_pred()).sum())
print("Accuracy: ", my_pca_lda.fk_score())
print("**************")
# Implementacion propia del clasificador.
fknb = FKNaiveBayesClassifier()
fknb.fit(my_pca_lda.fk_get_lda_train(), my_pca_lda.fk_get_y_train())
y_pred_FK = fknb.predict(my_pca_lda.fk_get_lda_test())
print("FK_Bayes")
print("Number of mislabeled points : %d" % (my_pca_lda.fk_get_y_test() != y_pred_FK).sum())
print("Accuracy: ", fknb.score(my_pca_lda.fk_get_lda_test(), my_pca_lda.fk_get_y_test()))
print("**************")
# Esto es para verificar que las predicciones son iguales, deberia entregar una lista vacia.
print("...probando igualdad...")
y_pred_SK = [int(i) for i in my_pca_lda.fk_get_y_pred()]
#print y_pred_SK
#print y_pred_FK
# Se verifica si la lista esta vacia.
if y_pred_SK == y_pred_FK:
print "Son iguales los dos metodos!"
else:
print "No son iguales. :("
# Se grafica la informacion.
graph = Graph(my_pca_lda.fk_get_lda_train(), my_pca_lda.fk_get_y_train())
graph.frequencies_histogram()
graph.probability_density_functions()
graph.conditional_probability(my_pca_lda.fk_get_lda_train(), my_pca_lda.fk_get_y_prob())
graph.show_graphs()
except Exception, err:
print traceback.format_exc()
finally:
sys.exit()
| efulet/pca | pca/main.py | Python | mit | 3,043 |
config = {
"interfaces": {
"google.spanner.admin.database.v1.DatabaseAdmin": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 32000,
"initial_rpc_timeout_millis": 60000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000,
}
},
"methods": {
"ListDatabases": {
"timeout_millis": 30000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateDatabase": {
"timeout_millis": 3600000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"GetDatabase": {
"timeout_millis": 30000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateDatabaseDdl": {
"timeout_millis": 3600000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"DropDatabase": {
"timeout_millis": 3600000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetDatabaseDdl": {
"timeout_millis": 30000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"SetIamPolicy": {
"timeout_millis": 30000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"GetIamPolicy": {
"timeout_millis": 30000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"TestIamPermissions": {
"timeout_millis": 30000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| tseaver/google-cloud-python | spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py | Python | apache-2.0 | 2,633 |
import json
from collections import (
Counter,
defaultdict as deft
)
from copy import deepcopy as cp
# from cPickle import (
# dump as to_pickle,
# load as from_pickle
# )
from StringIO import StringIO
from TfIdfMatrix import TfIdfMatrix
from Tools import from_csv
class CategoryTree:
def __init__(self, categories_by_concept, terms,
categories, tfidf, max_depth=5, min_df=20
):
self.min_df = min_df
self.path_categories_by_concept = categories_by_concept
self.path_categories = categories
self.path_terms = terms
self.max_depth = max_depth
self.observed_category = deft(bool)
self.id_by_concept = dict([])
self.concept_by_id = dict([])
self.term_is_category = deft(bool)
self.parents_by_category = dict([])
self.parents_by_concept = deft(list)
self.id_by_term = dict([])
self.term_by_id = dict([])
self.has_parents = deft(bool)
self.tfidf = tfidf
self.pulling = set([])
self.vector_by_category = deft(Counter)
self.contributors_by_category = deft(set)
self.projected = Counter()
def build(self):
for i, c in enumerate(self.concept_by_id.values()):
self(c)
if not i % 100:
t = float(len(self.concept_by_id.keys()))
print i, int(t), round(i / t, 2)
# if i >= 5000:
# break
def dump(self):
# Simulate a file with StringIO
out = open('vector.dump.txt', 'wb')
for i, (_id, projections) in enumerate(self.projected.items()):
if not i % 100:
print i, len(self.projected.keys())
if not projections:
continue
features = [
(self.tfidf.word_by_id[wid], round(weight, 4))
for wid, weight in self.vector_by_category[_id].most_common()
if round(weight, 4)
]
record = (
_id,
self.concept_by_id[_id],
features
)
out.write('%s\n' % str(record))
out.close()
def __call__(self, category):
self.pulling = set([])
return self.__pull(None, 0, category, dict([]))
def __get_parents(self, _id):
parents = []
name = self.concept_by_id[_id]
if (
not self.observed_category[name] or
not self.observed_category[_id] or
not self.has_parents[_id]
):
return []
else:
for i in self.parents_by_category[_id]:
if not self.observed_category[i]:
continue
_name = self.concept_by_id[i]
parents.append(_name)
return set(parents) - self.pulling
def __pull(self, vector, depth, category, tree):
_id = self.id_by_concept[category]
if not self.pulling:
# print
# print
# print category, _id
# print [self.term_by_id[x] for x in self.contributors_by_category[_id]]
# print self.vector_by_category[_id].most_common(20)
vector = self.vector_by_category[_id]
if not self.observed_category[category]:
return dict([])
parents = self.__get_parents(_id)
if not parents or depth >= self.max_depth:
tree[category] = dict([])
else:
subtree = dict([])
self.pulling.update(parents)
for parent in parents:
subtree = self.__pull(vector, depth + 1, parent, subtree)
tree[category] = subtree
self.__project(vector, tree)
return tree
def __project(self, vector, tree):
if not tree.keys():
return
else:
for key, subtree in tree.items():
_id = self.id_by_concept[key]
self.projected[_id] += 1
self.__add2vec(vector, _id)
self.__project(vector, subtree)
def __add2vec(self, vector, _id):
# for w, weight in vector.items():
# __id = self.tfidf.id_by_word[w]
for __id, weight in vector.items():
self.vector_by_category[_id][__id] += weight
def load(self):
self.__load_terms()
self.__load_categories()
self.__load_assignments()
def __load_categories(self):
for concept, _id in from_csv(self.path_categories):
_id = int(_id)
self.id_by_concept[concept] = _id
self.concept_by_id[_id] = concept
self.observed_category[_id] = True
self.observed_category[concept] = True
# print concept, _id, len(self.id_by_concept.keys())
# exit()
def __load_terms(self):
for term, _id in from_csv(self.path_terms):
_id = int(_id)
self.term_by_id[_id] = term
self.id_by_term[term] = _id
if not term.startswith('Category:'):
continue
self.term_is_category[term] = True
self.term_is_category[_id] = True
def __load_assignments(self):
for row in from_csv(self.path_categories_by_concept):
ints = [int(field) for field in row]
term_id = ints[0]
term = self.term_by_id[term_id]
if self.term_is_category[term_id] and \
self.observed_category[term]:
term = self.term_by_id[term_id]
cat_id = self.id_by_concept[term]
assignments = [i for i in ints[1:] if self.observed_category[i]]
self.parents_by_category[cat_id] = assignments
self.has_parents[cat_id] = True
else:
vector = self.tfidf.content(term_id)
assignments = [i for i in ints[1:] if self.observed_category[i]]
self.parents_by_concept[term_id] = assignments
for a_id in assignments:
for w, weight in vector:
if self.tfidf.df[w] < self.min_df:
continue
#print term, term_id, self.concept_by_id[a_id], w, self.vector_by_category[a_id][w], '\t+%f' % weight
self.vector_by_category[a_id][w] += weight
self.contributors_by_category[a_id].update([term_id])
if __name__ == '__main__':
import random
from random import shuffle as randomize
tfidf = TfIdfMatrix()
tfidf.load_features('bkp.big.out/vector.term.csv')
tfidf.load_distribution('bkp.big.out/vector.index.csv')
# tfidf.load_features('vector.term.csv')
# tfidf.load_distribution('vector.index.csv')
ctree = CategoryTree(
'bkp.big.out/category.index.csv',
'bkp.big.out/term.csv',
'bkp.big.out/category.csv',
# 'category.index.csv',
# 'term.csv',
# 'category.csv',
tfidf,
max_depth=1
)
ctree.load()
ctree.build()
ctree.dump()
| JordiCarreraVentura/spellchecker | lib/CategoryTree.py | Python | gpl-3.0 | 7,145 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
__author__ = 'Rodrigo'
@login_not_required
@no_csrf
def index():
return TemplateResponse(template_path='/meuperfil/caixaesquerda/itens/itensmeusitens.html')
| rmmariano/ProjScriptsTekton | backend/appengine/routes/meuperfil/caixaesquerda/meusitens.py | Python | mit | 397 |
#!/usr/bin/env python
"""Provides the interface to define and manage policy and a repository to store and retrieve policy
and templates for policy definitions, aka attribute authority.
@see https://confluence.oceanobservatories.org/display/syseng/CIAD+COI+OV+Policy+Management+Service
"""
__author__ = 'Stephen P. Henrie'
from interface.services.coi.ipolicy_management_service import BasePolicyManagementService
from pyon.core.exception import NotFound, BadRequest, Inconsistent
from pyon.public import PRED, RT, Container, CFG, OT, IonObject
from pyon.util.containers import is_basic_identifier, create_basic_identifier
from pyon.util.log import log
from pyon.event.event import EventPublisher
from pyon.ion.endpoint import ProcessEventSubscriber
from ion.util.related_resources_crawler import RelatedResourcesCrawler
class PolicyManagementService(BasePolicyManagementService):
def __init__(self, *args, **kwargs):
BasePolicyManagementService.__init__(self,*args,**kwargs)
self.event_pub = None # For unit tests
def on_start(self):
self.event_pub = EventPublisher(process=self)
self.policy_event_subscriber = ProcessEventSubscriber(event_type="ResourceModifiedEvent",
origin_type="Policy",
queue_name="policy_management_policy_update_events",
callback=self._policy_event_callback,
process=self)
self._process.add_endpoint(self.policy_event_subscriber)
def create_resource_access_policy(self, resource_id='', policy_name='', description='', policy_rule=''):
"""Helper operation for creating an access policy for a specific resource. The id string returned
is the internal id by which Policy will be identified in the data store.
@param resource_id str
@param policy_name str
@param description str
@param policy_rule str
@retval policy_id str
@throws BadRequest If any of the paramaters are not set.
"""
if not resource_id:
raise BadRequest("The resource_id parameter is missing")
if not policy_name:
raise BadRequest("The policy_name parameter is missing")
if not description:
raise BadRequest("The description parameter is missing")
if not policy_rule:
raise BadRequest("The policy_rule parameter is missing")
resource_policy_obj = IonObject(OT.ResourceAccessPolicy, policy_rule=policy_rule, resource_id=resource_id)
policy_obj = IonObject(RT.Policy, name=policy_name, description=description, policy_type=resource_policy_obj)
policy_id = self.create_policy(policy_obj)
self._add_resource_policy(resource_id, policy_id, publish_event=False)
return policy_id
def create_service_access_policy(self, service_name='', policy_name='', description='', policy_rule=''):
"""Helper operation for creating an access policy for a specific service. The id string returned
is the internal id by which Policy will be identified in the data store.
@param service_name str
@param policy_name str
@param description str
@param policy_rule str
@retval policy_id str
@throws BadRequest If any of the paramaters are not set.
"""
if not service_name:
raise BadRequest("The service_name parameter is missing")
if not policy_name:
raise BadRequest("The policy_name parameter is missing")
if not description:
raise BadRequest("The description parameter is missing")
if not policy_rule:
raise BadRequest("The policy_rule parameter is missing")
service_policy_obj = IonObject(OT.ServiceAccessPolicy, policy_rule=policy_rule, service_name=service_name)
policy_obj = IonObject(RT.Policy, name=policy_name, description=description, policy_type=service_policy_obj)
return self.create_policy(policy_obj)
def create_common_service_access_policy(self, policy_name='', description='', policy_rule=''):
"""Helper operation for creating a service access policy common to all services. The id string returned
is the internal id by which Policy will be identified in the data store.
@param policy_name str
@param description str
@param policy_rule str
@retval policy_id str
@throws BadRequest If any of the paramaters are not set.
"""
if not policy_name:
raise BadRequest("The policy_name parameter is missing")
if not description:
raise BadRequest("The description parameter is missing")
if not policy_rule:
raise BadRequest("The policy_rule parameter is missing")
service_policy_obj = IonObject(OT.CommonServiceAccessPolicy, policy_rule=policy_rule)
policy_obj = IonObject(RT.Policy, name=policy_name, description=description, policy_type=service_policy_obj)
return self.create_policy(policy_obj)
def add_process_operation_precondition_policy(self, process_name='', op='', policy_content=''):
"""Helper operation for adding a precondition policy for a specific process operation; could be a service or agent.
The id string returned is the internal id by which Policy will be identified in the data store. The precondition
method must return a tuple (boolean, string).
@param process_name str
@param op str
@param policy_content str
@retval policy_id str
@throws BadRequest If any of the parameters are not set.
"""
if not process_name:
raise BadRequest("The process_name parameter is missing")
if not op:
raise BadRequest("The op parameter is missing")
if not policy_content:
raise BadRequest("The policy_content parameter is missing")
policy_name = process_name + "_" + op + "_Precondition_Policies"
policies,_ = self.clients.resource_registry.find_resources(restype=RT.Policy, name=policy_name)
if policies:
#Update existing policy by adding to list
if len(policies) > 1:
raise Inconsistent('There should only be one Policy object per process_name operation')
if policies[0].policy_type.op != op or policies[0].policy_type.type_ != OT.ProcessOperationPreconditionPolicy:
raise Inconsistent('There Policy object %s does not match the requested process operation %s: %s' % ( policies[0].name, process_name, op ))
policies[0].policy_type.preconditions.append(policy_content)
self.update_policy(policies[0])
return policies[0]._id
else:
#Create a new policy object
op_policy_obj = IonObject(OT.ProcessOperationPreconditionPolicy, process_name=process_name, op=op)
op_policy_obj.preconditions.append(policy_content)
policy_obj = IonObject(RT.Policy, name=policy_name, policy_type=op_policy_obj, description='List of operation precondition policies for ' + process_name)
return self.create_policy(policy_obj)
def create_policy(self, policy=None):
"""Persists the provided Policy object The id string returned
is the internal id by which Policy will be identified in the data store.
@param policy Policy
@retval policy_id str
@throws BadRequest if object passed has _id or _rev attribute
"""
if not policy:
raise BadRequest("The policy parameter is missing")
if not is_basic_identifier(policy.name):
raise BadRequest("The policy name '%s' can only contain alphanumeric and underscore characters" % policy.name)
try:
#If there is a policy_rule field then try to add the policy name and decription to the rule text
if hasattr(policy.policy_type, 'policy_rule'):
policy.policy_type.policy_rule = policy.policy_type.policy_rule % (policy.name, policy.description)
except Exception, e:
raise Inconsistent("Missing the elements in the policy rule to set the description: " + e.message)
policy_id, version = self.clients.resource_registry.create(policy)
log.debug('Policy created: ' + policy.name)
return policy_id
def update_policy(self, policy=None):
"""Updates the provided Policy object. Throws NotFound exception if
an existing version of Policy is not found. Throws Conflict if
the provided Policy object is not based on the latest persisted
version of the object.
@param policy Policy
@throws NotFound object with specified id does not exist
@throws BadRequest if object does not have _id or _rev attribute
@throws Conflict object not based on latest persisted object version
"""
if not policy:
raise BadRequest("The policy parameter is missing")
if not is_basic_identifier(policy.name):
raise BadRequest("The policy name '%s' can only contain alphanumeric and underscore characters" % policy.name)
self.clients.resource_registry.update(policy)
def read_policy(self, policy_id=''):
"""Returns the Policy object for the specified policy id.
Throws exception if id does not match any persisted Policy
objects.
@param policy_id str
@retval policy Policy
@throws NotFound object with specified id does not exist
"""
if not policy_id:
raise BadRequest("The policy_id parameter is missing")
policy = self.clients.resource_registry.read(policy_id)
if not policy:
raise NotFound("Policy %s does not exist" % policy_id)
return policy
def delete_policy(self, policy_id=''):
"""For now, permanently deletes Policy object with the specified
id. Throws exception if id does not match any persisted Policy.
@param policy_id str
@throws NotFound object with specified id does not exist
"""
if not policy_id:
raise BadRequest("The policy_id parameter is missing")
policy = self.clients.resource_registry.read(policy_id)
if not policy:
raise NotFound("Policy %s does not exist" % policy_id)
res_list = self._find_resources_for_policy(policy_id)
for res in res_list:
self._remove_resource_policy(res, policy)
self.clients.resource_registry.delete(policy_id)
#Force a publish since the policy object will have been deleted
self._publish_policy_event(policy, delete_policy=True)
def enable_policy(self, policy_id=''):
"""Sets a flag to enable the use of the policy
@param policy_id str
@throws NotFound object with specified id does not exist
"""
policy = self.read_policy(policy_id)
policy.enabled = True
self.update_policy(policy)
def disable_policy(self, policy_id=''):
"""Resets a flag to disable the use of the policy
@param policy_id str
@throws NotFound object with specified id does not exist
"""
policy = self.read_policy(policy_id)
policy.enabled = False
self.update_policy(policy)
def add_resource_policy(self, resource_id='', policy_id=''):
"""Associates a policy to a specific resource
@param resource_id str
@param policy_id str
@retval success bool
@throws NotFound object with specified id does not exist
"""
resource, policy = self._add_resource_policy(resource_id, policy_id)
return True
def remove_resource_policy(self, resource_id='', policy_id=''):
"""Removes an association for a policy to a specific resource
@param resource_id str
@param policy_id str
@retval success bool
@throws NotFound object with specified id does not exist
"""
if not resource_id:
raise BadRequest("The resource_id parameter is missing")
resource = self.clients.resource_registry.read(resource_id)
if not resource:
raise NotFound("Resource %s does not exist" % resource_id)
if not policy_id:
raise BadRequest("The policy_id parameter is missing")
policy = self.clients.resource_registry.read(policy_id)
if not policy:
raise NotFound("Policy %s does not exist" % policy_id)
self._remove_resource_policy(resource, policy)
return True
#Internal helper function for removing a policy resource association and publish event for containers to update
def _add_resource_policy(self, resource_id, policy_id, publish_event=True):
if not resource_id:
raise BadRequest("The resource_id parameter is missing")
resource = self.clients.resource_registry.read(resource_id)
if not resource:
raise NotFound("Resource %s does not exist" % resource_id)
if not policy_id:
raise BadRequest("The policy_id parameter is missing")
policy = self.clients.resource_registry.read(policy_id)
if not policy:
raise NotFound("Policy %s does not exist" % policy_id)
aid = self.clients.resource_registry.create_association(resource, PRED.hasPolicy, policy)
#Publish an event that the resource policy has changed
if publish_event:
self._publish_resource_policy_event(policy, resource)
return resource, policy
def _remove_resource_policy(self, resource, policy):
aid = self.clients.resource_registry.get_association(resource, PRED.hasPolicy, policy)
if not aid:
raise NotFound("The association between the specified Resource %s and Policy %s was not found" % (resource._id, policy._id))
self.clients.resource_registry.delete_association(aid)
#Publish an event that the resource policy has changed
self._publish_resource_policy_event(policy, resource)
def _policy_event_callback(self, *args, **kwargs):
"""
This method is a callback function for receiving Policy Events.
"""
policy_event = args[0]
policy_id = policy_event.origin
log.debug("Policy modified: %s" , str(policy_event.__dict__))
try:
policy = self.clients.resource_registry.read(policy_id)
if policy:
self._publish_policy_event(policy)
except Exception, e:
#If this is a delete operation, then don't bother with not finding the object. Should be handled by
#delete_policy operation
if policy_event.sub_type != 'DELETE':
log.error(e)
def _publish_policy_event(self, policy, delete_policy=False):
if policy.policy_type.type_ == OT.CommonServiceAccessPolicy:
self._publish_service_policy_event(policy, delete_policy)
elif policy.policy_type.type_ == OT.ServiceAccessPolicy or policy.policy_type.type_ == OT.ProcessOperationPreconditionPolicy:
self._publish_service_policy_event(policy, delete_policy)
else:
#Need to publish an event that a policy has changed for any associated resource
res_list = self._find_resources_for_policy(policy._id)
for res in res_list:
self._publish_resource_policy_event(policy, res, delete_policy)
def _publish_resource_policy_event(self, policy, resource, delete_policy=False):
if self.event_pub:
event_data = dict()
event_data['origin_type'] = 'Resource_Policy'
event_data['description'] = 'Updated Resource Policy'
event_data['resource_id'] = resource._id
event_data['resource_type'] = resource.type_
event_data['resource_name'] = resource.name
event_data['sub_type'] = 'DeletePolicy' if delete_policy else ''
self.event_pub.publish_event(event_type='ResourcePolicyEvent', origin=policy._id, **event_data)
def _publish_related_resource_policy_event(self, policy, resource_id, delete_policy=False):
if self.event_pub:
event_data = dict()
event_data['origin_type'] = 'Resource_Policy'
event_data['description'] = 'Updated Related Resource Policy'
event_data['resource_id'] = resource_id
event_data['sub_type'] = 'DeletePolicy' if delete_policy else ''
self.event_pub.publish_event(event_type='RelatedResourcePolicyEvent', origin=policy._id, **event_data)
def _publish_service_policy_event(self, policy, delete_policy=False):
if self.event_pub:
event_data = dict()
event_data['origin_type'] = 'Service_Policy'
event_data['description'] = 'Updated Service Policy'
event_data['sub_type'] = 'DeletePolicy' if delete_policy else ''
if policy.policy_type.type_ == OT.ProcessOperationPreconditionPolicy:
event_data['op'] = policy.policy_type.op
if hasattr(policy.policy_type, 'service_name'):
event_data['service_name'] = policy.policy_type.service_name
elif hasattr(policy.policy_type, 'process_name'):
event_data['service_name'] = policy.policy_type.process_name
else:
event_data['service_name'] = ''
self.event_pub.publish_event(event_type='ServicePolicyEvent', origin=policy._id, **event_data)
def find_resource_policies(self, resource_id=''):
"""Finds all policies associated with a specific resource
@param resource_id str
@retval policy_list list
@throws NotFound object with specified id does not exist
"""
if not resource_id:
raise BadRequest("The resource_id parameter is missing")
resource = self.clients.resource_registry.read(resource_id)
if not resource:
raise NotFound("Resource %s does not exist" % resource_id)
return self._find_resource_policies(resource)
def _find_resource_policies(self, resource, policy=None):
policy_list,_ = self.clients.resource_registry.find_objects(resource, PRED.hasPolicy, policy)
return policy_list
def _find_resources_for_policy(self, policy_id=''):
"""Finds all resources associated with a specific policy
@param policy_id str
@retval resource_list list
@throws NotFound object with specified id does not exist
"""
resource_list,_ = self.clients.resource_registry.find_subjects(None, PRED.hasPolicy, policy_id)
return resource_list
def get_active_resource_access_policy_rules(self, resource_id='', org_name=''):
"""Generates the set of all enabled access policies for the specified resource within the specified Org. If the org_name
is not provided, then the root ION Org will be assumed.
@param resource_id str
@param org_name str
@retval policy_rules str
@throws NotFound object with specified id does not exist
"""
if not resource_id:
raise BadRequest("The resource_id parameter is missing")
#TODO - extend to handle Org specific service policies at some point.
resource = self.clients.resource_registry.read(resource_id)
if not resource:
raise NotFound("Resource %s does not exist" % resource_id)
rules = ""
resource_id_list = self._get_related_resource_ids(resource)
if not len(resource_id_list):
resource_id_list.append(resource_id)
log.debug("Retrieving policies for resources: %s", resource_id_list)
for res_id in resource_id_list:
policy_set = self._find_resource_policies(res_id)
for p in policy_set:
if p.enabled and p.policy_type.type_ == OT.ResourceAccessPolicy :
log.debug("Including policy: %s", p.name)
rules += p.policy_type.policy_rule
return rules
def _get_related_resource_ids(self, resource):
resource_id_list = []
#Include related resource policies for specific resource types
#TODO - this is the first attempt at this... may have to iterate on this
if resource.type_ == RT.InstrumentDevice:
resource_types = [RT.InstrumentModel, RT.InstrumentSite, RT.PlatformDevice, RT.PlatformSite, RT.Subsite, RT.Observatory, RT.Org]
predicate_set = {PRED.hasModel: (True, True), PRED.hasDevice: (False, True), PRED.hasSite: (False, True), PRED.hasResource: (False, True)}
resource_id_list.extend(self._crawl_related_resources(resource_id=resource._id, resource_types=resource_types, predicate_set=predicate_set))
elif resource.type_ == RT.PlatformDevice:
resource_types = [RT.PlatformModel, RT.PlatformDevice, RT.PlatformSite, RT.Subsite, RT.Observatory, RT.Org]
predicate_set = {PRED.hasModel: (True, True), PRED.hasDevice: (False, True) , PRED.hasSite: (False, True), PRED.hasResource: (False, True)}
resource_id_list.extend(self._crawl_related_resources(resource_id=resource._id, resource_types=resource_types, predicate_set=predicate_set))
else:
#For anything else attempt to add Observatory by default
resource_types = [ RT.Observatory, RT.Org]
predicate_set = {PRED.hasSite: (False, True), PRED.hasResource: (False, True)}
resource_id_list.extend(self._crawl_related_resources(resource_id=resource._id, resource_types=resource_types, predicate_set=predicate_set))
return resource_id_list
def _crawl_related_resources(self, resource_id, resource_types=None, predicate_set=None):
"""
An internal helper function to generate a unique list of related resources
@return:
"""
resource_types = resource_types if resource_types is not None else []
predicate_set = predicate_set if predicate_set is not None else {}
r = RelatedResourcesCrawler()
test_real_fn = r.generate_get_related_resources_fn(self.clients.resource_registry, resource_whitelist=resource_types, predicate_dictionary=predicate_set)
related_objs = test_real_fn(resource_id)
unique_ids = []
for i in related_objs:
if i.o not in unique_ids: unique_ids.append(i.o)
if i.s not in unique_ids: unique_ids.append(i.s)
return unique_ids
def get_active_service_access_policy_rules(self, service_name='', org_name=''):
"""Generates the set of all enabled access policies for the specified service within the specified Org. If the org_name
is not provided, then the root ION Org will be assumed.
@param service_name str
@param org_name str
@retval policy_rules str
@throws NotFound object with specified id does not exist
"""
#TODO - extend to handle Org specific service policies at some point.
rules = ""
if not service_name:
policy_set,_ = self.clients.resource_registry.find_resources_ext(restype=RT.Policy, nested_type=OT.CommonServiceAccessPolicy)
for p in sorted(policy_set, key=lambda o: o.ts_created):
if p.enabled:
rules += p.policy_type.policy_rule
else:
policy_set,_ = self.clients.resource_registry.find_resources_ext(restype=RT.Policy, nested_type=OT.ServiceAccessPolicy)
for p in sorted(policy_set, key=lambda o: o.ts_created):
if p.enabled and p.policy_type.service_name == service_name:
rules += p.policy_type.policy_rule
return rules
def get_active_process_operation_preconditions(self, process_name='', op='', org_name=''):
"""Generates the set of all enabled precondition policies for the specified process operation within the specified
Org; could be a service or resource agent. If the org_name is not provided, then the root ION Org will be assumed.
@param process_name str
@param op str
@param org_name str
@retval preconditions list
@throws NotFound object with specified id does not exist
"""
if not process_name:
raise BadRequest("The process_name parameter is missing")
#TODO - extend to handle Org specific service policies at some point.
preconditions = list()
policy_set,_ = self.clients.resource_registry.find_resources_ext(restype=RT.Policy, nested_type=OT.ProcessOperationPreconditionPolicy)
for p in sorted(policy_set, key=lambda o: o.ts_created):
if op:
if p.enabled and p.policy_type.process_name == process_name and p.policy_type.op == op:
preconditions.append(p.policy_type)
else:
if p.enabled and p.policy_type.process_name == process_name:
preconditions.append(p.policy_type)
return preconditions
#Local helper functions for testing policies - do not remove
def func1_pass(self, msg, header):
return True, ''
def func2_deny(self, msg, header):
return False, 'Denied for no reason'
#
# ROLE CRUD Operations
#
def create_role(self, user_role=None):
"""Persists the provided UserRole object. The name of a role can only contain
alphanumeric and underscore characters while the description can me human
readable. The id string returned is the internal id by which a UserRole will
be indentified in the data store.
@param user_role UserRole
@retval user_role_id str
@throws BadRequest if object passed has _id or _rev attribute
"""
if not user_role:
raise BadRequest("The user_role parameter is missing")
#If this governance identifier is not set, then set to a safe version of the policy name.
if not user_role.governance_name:
user_role.governance_name = create_basic_identifier(user_role.name)
if not is_basic_identifier(user_role.governance_name):
raise BadRequest("The governance_name field '%s' can only contain alphanumeric and underscore characters" % user_role.governance_name)
user_role_id, version = self.clients.resource_registry.create(user_role)
return user_role_id
def update_role(self, user_role=None):
"""Updates the provided UserRole object. The name of a role can only contain
alphanumeric and underscore characters while the description can me human
readable.Throws NotFound exception if an existing version of UserRole is
not found. Throws Conflict if the provided UserRole object is not based on
the latest persisted version of the object.
@param user_role UserRole
@retval success bool
@throws BadRequest if object does not have _id or _rev attribute
@throws NotFound object with specified id does not exist
@throws Conflict object not based on latest persisted object version
"""
if not user_role:
raise BadRequest("The user_role parameter is missing")
#If this governance identifier is not set, then set to a safe version of the policy name.
if not user_role.governance_name:
user_role.governance_name = create_basic_identifier(user_role.name)
if not is_basic_identifier(user_role.governance_name):
raise BadRequest("The governance_name field '%s' can only contain alphanumeric and underscore characters" % user_role.governance_name)
self.clients.resource_registry.update(user_role)
def read_role(self, user_role_id=''):
"""Returns the UserRole object for the specified role id.
Throws exception if id does not match any persisted UserRole
objects.
@param user_role_id str
@retval user_role UserRole
@throws NotFound object with specified id does not exist
"""
if not user_role_id:
raise BadRequest("The user_role_id parameter is missing")
user_role = self.clients.resource_registry.read(user_role_id)
if not user_role:
raise NotFound("Role %s does not exist" % user_role_id)
return user_role
def delete_role(self, user_role_id=''):
"""For now, permanently deletes UserRole object with the specified
id. Throws exception if id does not match any persisted UserRole.
@throws NotFound object with specified id does not exist
"""
if not user_role_id:
raise BadRequest("The user_role_id parameter is missing")
user_role = self.clients.resource_registry.read(user_role_id)
if not user_role:
raise NotFound("Role %s does not exist" % user_role_id)
alist,_ = self.clients.resource_registry.find_subjects(RT.ActorIdentity, PRED.hasRole, user_role)
if len(alist) > 0:
raise BadRequest('The User Role %s cannot be removed as there are %s users associated to it' % (user_role.name, str(len(alist))))
self.clients.resource_registry.delete(user_role_id)
| ooici/coi-services | ion/services/coi/policy_management_service.py | Python | bsd-2-clause | 29,836 |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob.exc
from nova.api.openstack.compute.contrib import hosts as os_hosts
from nova.compute import power_state
from nova.compute import vm_states
from nova import context as context_maker
from nova import db
from nova.openstack.common import log as logging
from nova import test
from nova.tests import fake_hosts
LOG = logging.getLogger(__name__)
def stub_service_get_all(context, disabled=None):
return fake_hosts.SERVICES_LIST
def stub_service_get_by_host_and_topic(context, host_name, topic):
for service in stub_service_get_all(context):
if service['host'] == host_name and service['topic'] == topic:
return service
def stub_set_host_enabled(context, host_name, enabled):
"""
Simulates three possible behaviours for VM drivers or compute drivers when
enabling or disabling a host.
'enabled' means new instances can go to this host
'disabled' means they can't
"""
results = {True: "enabled", False: "disabled"}
if host_name == "notimplemented":
# The vm driver for this host doesn't support this feature
raise NotImplementedError()
elif host_name == "host_c2":
# Simulate a failure
return results[not enabled]
else:
# Do the right thing
return results[enabled]
def stub_set_host_maintenance(context, host_name, mode):
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
results = {True: "on_maintenance", False: "off_maintenance"}
if host_name == "notimplemented":
# The vm driver for this host doesn't support this feature
raise NotImplementedError()
elif host_name == "host_c2":
# Simulate a failure
return results[not mode]
else:
# Do the right thing
return results[mode]
def stub_host_power_action(context, host_name, action):
if host_name == "notimplemented":
raise NotImplementedError()
return action
def _create_instance(**kwargs):
"""Create a test instance."""
ctxt = context_maker.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
def _create_instance_dict(**kwargs):
"""Create a dictionary for a test instance."""
inst = {}
inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = kwargs.get('user_id', 'admin')
inst['project_id'] = kwargs.get('project_id', 'fake')
inst['instance_type_id'] = '1'
if 'host' in kwargs:
inst['host'] = kwargs.get('host')
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['root_gb'] = kwargs.get('root_gb', 30)
inst['ephemeral_gb'] = kwargs.get('ephemeral_gb', 30)
inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
inst['task_state'] = kwargs.get('task_state', None)
inst['availability_zone'] = kwargs.get('availability_zone', None)
inst['ami_launch_index'] = 0
inst['launched_on'] = kwargs.get('launched_on', 'dummy')
return inst
class FakeRequest(object):
environ = {"nova.context": context_maker.get_admin_context()}
GET = {}
class FakeRequestWithNovaZone(object):
environ = {"nova.context": context_maker.get_admin_context()}
GET = {"zone": "nova"}
class HostTestCase(test.TestCase):
"""Test Case for hosts."""
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
self.hosts_api = self.controller.api
self.req = FakeRequest()
# Pretend we have fake_hosts.HOST_LIST in the DB
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
# Only hosts in our fake DB exist
self.stubs.Set(db, 'service_get_by_host_and_topic',
stub_service_get_by_host_and_topic)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_maintenance',
stub_set_host_maintenance)
self.stubs.Set(self.hosts_api, 'host_power_action',
stub_host_power_action)
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
result = self.controller.update(self.req, host, body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the compute hosts are returned."""
result = self.controller.index(self.req)
self.assert_('hosts' in result)
hosts = result['hosts']
self.assertEqual(fake_hosts.HOST_LIST, hosts)
def test_list_hosts_with_zone(self):
result = self.controller.index(FakeRequestWithNovaZone())
self.assert_('hosts' in result)
hosts = result['hosts']
self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
def test_disable_host(self):
self._test_host_update('host_c1', 'status', 'disable', 'disabled')
self._test_host_update('host_c2', 'status', 'disable', 'enabled')
def test_enable_host(self):
self._test_host_update('host_c1', 'status', 'enable', 'enabled')
self._test_host_update('host_c2', 'status', 'enable', 'disabled')
def test_enable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'enable', 'on_maintenance')
def test_disable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'disable', 'off_maintenance')
def _test_host_update_notimpl(self, key, val):
def stub_service_get_all_notimpl(self, req):
return [{'host': 'notimplemented', 'topic': None,
'availability_zone': None}]
self.stubs.Set(db, 'service_get_all',
stub_service_get_all_notimpl)
body = {key: val}
self.assertRaises(webob.exc.HTTPNotImplemented,
self.controller.update,
self.req, 'notimplemented', body=body)
def test_disable_host_notimpl(self):
self._test_host_update_notimpl('status', 'disable')
def test_enable_maintenance_notimpl(self):
self._test_host_update_notimpl('maintenance_mode', 'enable')
def test_host_startup(self):
result = self.controller.startup(self.req, "host_c1")
self.assertEqual(result["power_action"], "startup")
def test_host_shutdown(self):
result = self.controller.shutdown(self.req, "host_c1")
self.assertEqual(result["power_action"], "shutdown")
def test_host_reboot(self):
result = self.controller.reboot(self.req, "host_c1")
self.assertEqual(result["power_action"], "reboot")
def _test_host_power_action_notimpl(self, method):
self.assertRaises(webob.exc.HTTPNotImplemented,
method, self.req, "notimplemented")
def test_host_startup_notimpl(self):
self._test_host_power_action_notimpl(self.controller.startup)
def test_host_shutdown_notimpl(self):
self._test_host_power_action_notimpl(self.controller.shutdown)
def test_host_reboot_notimpl(self):
self._test_host_power_action_notimpl(self.controller.reboot)
def test_bad_status_value(self):
bad_body = {"status": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body)
bad_body2 = {"status": "disablabc"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body2)
def test_bad_update_key(self):
bad_body = {"crazy": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body)
def test_bad_update_key_and_correct_udpate_key(self):
bad_body = {"status": "disable", "crazy": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body)
def test_good_udpate_keys(self):
body = {"status": "disable", "maintenance_mode": "enable"}
result = self.controller.update(self.req, 'host_c1', body)
self.assertEqual(result["host"], "host_c1")
self.assertEqual(result["status"], "disabled")
self.assertEqual(result["maintenance_mode"], "on_maintenance")
def test_show_forbidden(self):
self.req.environ["nova.context"].is_admin = False
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.show,
self.req, dest)
self.req.environ["nova.context"].is_admin = True
def test_show_host_not_exist(self):
# A host given as an argument does not exists.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, dest)
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
ctxt = self.req.environ["nova.context"]
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0}
s_ref = db.service_create(ctxt, dic)
dic = {'service_id': s_ref['id'],
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': '', 'stats': {}}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
"""No instances are running on the given host."""
ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 3)
for resource in result['host']:
self.assertTrue(resource['resource']['project'] in proj)
self.assertEqual(len(resource['resource']), 5)
self.assertTrue(set(resource['resource'].keys()) == set(column))
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
"""show() works correctly as expected."""
ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
host=s_ref['host'])
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 5)
for resource in result['host']:
self.assertTrue(resource['resource']['project'] in proj)
self.assertEqual(len(resource['resource']), 5)
self.assertTrue(set(resource['resource'].keys()) == set(column))
db.service_destroy(ctxt, s_ref['id'])
db.instance_destroy(ctxt, i_ref1['uuid'])
db.instance_destroy(ctxt, i_ref2['uuid'])
class HostSerializerTest(test.TestCase):
def setUp(self):
super(HostSerializerTest, self).setUp()
self.deserializer = os_hosts.HostUpdateDeserializer()
def test_index_serializer(self):
serializer = os_hosts.HostIndexTemplate()
text = serializer.serialize(fake_hosts.OS_API_HOST_LIST)
tree = etree.fromstring(text)
self.assertEqual('hosts', tree.tag)
self.assertEqual(len(fake_hosts.HOST_LIST), len(tree))
for i in range(len(fake_hosts.HOST_LIST)):
self.assertEqual('host', tree[i].tag)
self.assertEqual(fake_hosts.HOST_LIST[i]['host_name'],
tree[i].get('host_name'))
self.assertEqual(fake_hosts.HOST_LIST[i]['service'],
tree[i].get('service'))
def test_update_serializer_with_status(self):
exemplar = dict(host='host_c1', status='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_serializer_with_maintainance_mode(self):
exemplar = dict(host='host_c1', maintenance_mode='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_serializer_with_maintainance_mode_and_status(self):
exemplar = dict(host='host_c1',
maintenance_mode='enabled',
status='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_action_serializer(self):
exemplar = dict(host='host_c1', power_action='reboot')
serializer = os_hosts.HostActionTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_deserializer(self):
exemplar = dict(status='enabled', maintenance_mode='disable')
intext = """<?xml version='1.0' encoding='UTF-8'?>
<updates>
<status>enabled</status>
<maintenance_mode>disable</maintenance_mode>
</updates>"""
result = self.deserializer.deserialize(intext)
self.assertEqual(dict(body=exemplar), result)
| dstroppa/openstack-smartos-nova-grizzly | nova/tests/api/openstack/compute/contrib/test_hosts.py | Python | apache-2.0 | 15,292 |
import uuid
from django.core.management.base import BaseCommand
from ...models import VirtualMachine
from ...helpers import call_api
class Command(BaseCommand):
help = "Migrates VMs over from the old Conductor system"
def handle(self, *args, **options):
containers = call_api("container.list")
for container in containers:
if not VirtualMachine.objects.filter(uuid=container).exists():
print("[*] Importing {}...".format(container))
VirtualMachine.objects.create(name=container, uuid=uuid.UUID(container), description="Unknown Virtual Machine")
| tjcsl/director | web3/apps/vms/management/commands/migrate_vms.py | Python | mit | 618 |
import time
_config_ = {
'timeout': 3,
'noparallel': True
}
def test():
for i in range(0, 6):
time.sleep(1)
| zstackorg/zstack-woodpecker | zstackwoodpecker/test/cases/test2.py | Python | apache-2.0 | 146 |
import itertools
import numpy as np
import matplotlib.pyplot as plt
import ad3
grid_size = 20
num_states = 5
factor_graph = ad3.PFactorGraph()
multi_variables = []
random_grid = np.random.uniform(size=(grid_size, grid_size, num_states))
for i in xrange(grid_size):
multi_variables.append([])
for j in xrange(grid_size):
new_variable = factor_graph.create_multi_variable(num_states)
for state in xrange(num_states):
new_variable.set_log_potential(state, random_grid[i, j, state])
multi_variables[i].append(new_variable)
alpha = .5
potts_matrix = alpha * np.eye(num_states)
potts_potentials = potts_matrix.ravel().tolist()
for i, j in itertools.product(xrange(grid_size), repeat=2):
if (j > 0):
#horizontal edge
edge_variables = [multi_variables[i][j - 1], multi_variables[i][j]]
factor_graph.create_factor_dense(edge_variables, potts_potentials)
if (i > 0):
#horizontal edge
edge_variables = [multi_variables[i - 1][j], multi_variables[i][j]]
factor_graph.create_factor_dense(edge_variables, potts_potentials)
factor_graph.set_eta_ad3(.1)
factor_graph.adapt_eta_ad3(True)
factor_graph.set_max_iterations_ad3(1000)
value, marginals, edge_marginals, solver_status =\
factor_graph.solve_lp_map_ad3()
res = np.array(marginals).reshape(20, 20, 5)
plt.matshow(np.argmax(res, axis=-1), vmin=0, vmax=4)
plt.matshow(np.argmax(random_grid, axis=-1), vmin=0, vmax=4)
plt.show()
| pystruct/AD3 | python/example_grid.py | Python | lgpl-3.0 | 1,480 |
# -*- coding: utf-8 -*-
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
from peakdet import operations
from peakdet.physio import Physio
from peakdet.tests import utils as testutils
data = np.loadtxt(testutils.get_test_data_path('ECG.csv'))
WITHFS = Physio(data, fs=1000.)
NOFS = Physio(data)
def test_filter_physio():
# check lowpass and highpass filters
for meth in ['lowpass', 'highpass']:
params = dict(cutoffs=2, method=meth)
assert len(WITHFS) == len(operations.filter_physio(WITHFS, **params))
params['order'] = 5
assert len(WITHFS) == len(operations.filter_physio(WITHFS, **params))
params['cutoffs'] = [2, 10]
with pytest.raises(ValueError):
operations.filter_physio(WITHFS, **params)
with pytest.raises(ValueError):
operations.filter_physio(NOFS, **params)
# check bandpass and bandstop filters
for meth in ['bandpass', 'bandstop']:
params = dict(cutoffs=[2, 10], method=meth)
assert len(WITHFS) == len(operations.filter_physio(WITHFS, **params))
params['order'] = 5
assert len(WITHFS) == len(operations.filter_physio(WITHFS, **params))
params['cutoffs'] = 2
with pytest.raises(ValueError):
operations.filter_physio(WITHFS, **params)
with pytest.raises(ValueError):
operations.filter_physio(NOFS, **params)
# check appropriate filter methods
with pytest.raises(ValueError):
operations.filter_physio(WITHFS, 2, 'notafilter')
# check nyquist
with pytest.raises(ValueError):
operations.filter_physio(WITHFS, [2, 1000], 'bandpass')
def test_interpolate_physio():
with pytest.raises(ValueError):
operations.interpolate_physio(NOFS, 100.)
for fn in [50, 100, 200, 500, 2000, 5000]:
new = operations.interpolate_physio(WITHFS, fn)
assert new.fs == fn
if fn < WITHFS.fs:
assert len(new) < len(WITHFS)
else:
assert len(new) > len(WITHFS)
def test_peakfind_physio():
with pytest.raises(ValueError):
operations.peakfind_physio(NOFS)
operations.peakfind_physio(NOFS, dist=20)
operations.peakfind_physio(NOFS, thresh=0.4, dist=20)
operations.peakfind_physio(WITHFS)
operations.peakfind_physio(WITHFS, dist=20)
operations.peakfind_physio(WITHFS, thresh=0.4)
def test_delete_peaks():
to_delete = [24685, 44169]
peaks = operations.peakfind_physio(WITHFS)
deleted = operations.delete_peaks(peaks, to_delete)
assert len(deleted.peaks) == len(peaks.peaks) - len(to_delete)
def test_reject_peaks():
to_reject = [24685, 44169]
peaks = operations.peakfind_physio(WITHFS)
rejected = operations.reject_peaks(peaks, to_reject)
assert len(rejected.peaks) == len(peaks.peaks) - len(to_reject)
def test_edit_physio():
# value error when no sampling rate provided for interactive editing
with pytest.raises(ValueError):
operations.edit_physio(NOFS)
# if sampling rate provided but no peaks/troughs just return
operations.edit_physio(WITHFS)
def test_plot_physio():
for data in [NOFS, WITHFS]:
assert isinstance(operations.plot_physio(data), matplotlib.axes.Axes)
peaks = operations.peakfind_physio(WITHFS)
assert isinstance(operations.plot_physio(peaks), matplotlib.axes.Axes)
fig, ax = plt.subplots(1, 1)
assert ax == operations.plot_physio(peaks, ax=ax)
| rmarkello/peakdet | peakdet/tests/test_operations.py | Python | gpl-3.0 | 3,484 |
#
# This file is part of Chadwick
# Copyright (c) 2002-2022, Dr T L Turocy ([email protected])
# Chadwick Baseball Bureau (http://www.chadwick-bureau.com)
#
# FILE: src/python/chadwick/__init__.py
# Top-level module file for Chadwick Python library
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from libchadwick import create_game
from libchadwick import read_game
from libchadwick import create_scorebook
from libchadwick import read_scorebook
from libchadwick import create_league
from libchadwick import read_league
| chadwickbureau/chadwick | src/python/chadwick/__init__.py | Python | gpl-2.0 | 1,210 |
from anki import Collection
from .json_serializable import JsonSerializableAnkiDict
from ..utils.uuid import UuidFetcher
class DeckConfig(JsonSerializableAnkiDict):
def __init__(self, anki_deck_config=None):
super(DeckConfig, self).__init__(anki_deck_config)
@classmethod
def from_collection(cls, collection, deck_config_id):
decks = collection.decks
# TODO Remove compatibility shims for Anki 2.1.46 and lower.
get_conf = decks.get_config if hasattr(decks, 'get_config') else decks.getConf
anki_dict = get_conf(deck_config_id)
deck_config = DeckConfig(anki_dict)
deck_config._update_fields()
return deck_config
def save_to_collection(self, collection: Collection):
# Todo whole uuid matching thingy
# For now only create scenario
config_dict = self.fetch_or_create_config(collection)
config_dict.update(self.anki_dict)
collection.decks.update_config(config_dict)
self.anki_dict = config_dict
def fetch_or_create_config(self, collection):
return UuidFetcher(collection).get_deck_config(self.get_uuid()) or \
collection.decks.add_config(self.anki_dict["name"])
| Stvad/CrowdAnki | crowd_anki/representation/deck_config.py | Python | mit | 1,224 |
from __future__ import division
import warnings
from sympy import Abs, Rational, Float, S, Symbol, cos, pi, sqrt, oo
from sympy.functions.elementary.trigonometric import tan
from sympy.geometry import (Circle, Ellipse, GeometryError, Point, Polygon, Ray, RegularPolygon, Segment, Triangle, are_similar,
convex_hull, intersection, Line)
from sympy.utilities.pytest import raises
from sympy.utilities.randtest import verify_numerically
from sympy.geometry.polygon import rad, deg
def feq(a, b):
"""Test if two floating point values are 'equal'."""
t_float = Float("1.0E-10")
return -t_float < a - b < t_float
def test_polygon():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
x1 = Symbol('x1', real=True)
half = Rational(1, 2)
a, b, c = Point(0, 0), Point(2, 0), Point(3, 3)
t = Triangle(a, b, c)
assert Polygon(a, Point(1, 0), b, c) == t
assert Polygon(Point(1, 0), b, c, a) == t
assert Polygon(b, c, a, Point(1, 0)) == t
# 2 "remove folded" tests
assert Polygon(a, Point(3, 0), b, c) == t
assert Polygon(a, b, Point(3, -1), b, c) == t
raises(GeometryError, lambda: Polygon((0, 0), (1, 0), (0, 1), (1, 1)))
# remove multiple collinear points
assert Polygon(Point(-4, 15), Point(-11, 15), Point(-15, 15),
Point(-15, 33/5), Point(-15, -87/10), Point(-15, -15),
Point(-42/5, -15), Point(-2, -15), Point(7, -15), Point(15, -15),
Point(15, -3), Point(15, 10), Point(15, 15)) == \
Polygon(Point(-15,-15), Point(15,-15), Point(15,15), Point(-15,15))
p1 = Polygon(
Point(0, 0), Point(3, -1),
Point(6, 0), Point(4, 5),
Point(2, 3), Point(0, 3))
p2 = Polygon(
Point(6, 0), Point(3, -1),
Point(0, 0), Point(0, 3),
Point(2, 3), Point(4, 5))
p3 = Polygon(
Point(0, 0), Point(3, 0),
Point(5, 2), Point(4, 4))
p4 = Polygon(
Point(0, 0), Point(4, 4),
Point(5, 2), Point(3, 0))
p5 = Polygon(
Point(0, 0), Point(4, 4),
Point(0, 4))
p6 = Polygon(
Point(-11, 1), Point(-9, 6.6),
Point(-4, -3), Point(-8.4, -8.7))
r = Ray(Point(-9,6.6), Point(-9,5.5))
#
# General polygon
#
assert p1 == p2
assert len(p1.args) == 6
assert len(p1.sides) == 6
assert p1.perimeter == 5 + 2*sqrt(10) + sqrt(29) + sqrt(8)
assert p1.area == 22
assert not p1.is_convex()
# ensure convex for both CW and CCW point specification
assert p3.is_convex()
assert p4.is_convex()
dict5 = p5.angles
assert dict5[Point(0, 0)] == pi / 4
assert dict5[Point(0, 4)] == pi / 2
assert p5.encloses_point(Point(x, y)) is None
assert p5.encloses_point(Point(1, 3))
assert p5.encloses_point(Point(0, 0)) is False
assert p5.encloses_point(Point(4, 0)) is False
assert p1.encloses(Circle(Point(2.5,2.5),5)) is False
assert p1.encloses(Ellipse(Point(2.5,2),5,6)) is False
p5.plot_interval('x') == [x, 0, 1]
assert p5.distance(
Polygon(Point(10, 10), Point(14, 14), Point(10, 14))) == 6 * sqrt(2)
assert p5.distance(
Polygon(Point(1, 8), Point(5, 8), Point(8, 12), Point(1, 12))) == 4
warnings.filterwarnings(
"error", message="Polygons may intersect producing erroneous output")
raises(UserWarning,
lambda: Polygon(Point(0, 0), Point(1, 0),
Point(1, 1)).distance(
Polygon(Point(0, 0), Point(0, 1), Point(1, 1))))
warnings.filterwarnings(
"ignore", message="Polygons may intersect producing erroneous output")
assert hash(p5) == hash(Polygon(Point(0, 0), Point(4, 4), Point(0, 4)))
assert p5 == Polygon(Point(4, 4), Point(0, 4), Point(0, 0))
assert Polygon(Point(4, 4), Point(0, 4), Point(0, 0)) in p5
assert p5 != Point(0, 4)
assert Point(0, 1) in p5
assert p5.arbitrary_point('t').subs(Symbol('t', real=True), 0) == \
Point(0, 0)
raises(ValueError, lambda: Polygon(
Point(x, 0), Point(0, y), Point(x, y)).arbitrary_point('x'))
assert p6.intersection(r) == [Point(-9, 33/5), Point(-9, -84/13)]
#
# Regular polygon
#
p1 = RegularPolygon(Point(0, 0), 10, 5)
p2 = RegularPolygon(Point(0, 0), 5, 5)
raises(GeometryError, lambda: RegularPolygon(Point(0, 0), Point(0,
1), Point(1, 1)))
raises(GeometryError, lambda: RegularPolygon(Point(0, 0), 1, 2))
raises(ValueError, lambda: RegularPolygon(Point(0, 0), 1, 2.5))
assert p1 != p2
assert p1.interior_angle == 3*pi/5
assert p1.exterior_angle == 2*pi/5
assert p2.apothem == 5*cos(pi/5)
assert p2.circumcenter == p1.circumcenter == Point(0, 0)
assert p1.circumradius == p1.radius == 10
assert p2.circumcircle == Circle(Point(0, 0), 5)
assert p2.incircle == Circle(Point(0, 0), p2.apothem)
assert p2.inradius == p2.apothem == (5 * (1 + sqrt(5)) / 4)
p2.spin(pi / 10)
dict1 = p2.angles
assert dict1[Point(0, 5)] == 3 * pi / 5
assert p1.is_convex()
assert p1.rotation == 0
assert p1.encloses_point(Point(0, 0))
assert p1.encloses_point(Point(11, 0)) is False
assert p2.encloses_point(Point(0, 4.9))
p1.spin(pi/3)
assert p1.rotation == pi/3
assert p1.vertices[0] == Point(5, 5*sqrt(3))
for var in p1.args:
if isinstance(var, Point):
assert var == Point(0, 0)
else:
assert var == 5 or var == 10 or var == pi / 3
assert p1 != Point(0, 0)
assert p1 != p5
# while spin works in place (notice that rotation is 2pi/3 below)
# rotate returns a new object
p1_old = p1
assert p1.rotate(pi/3) == RegularPolygon(Point(0, 0), 10, 5, 2*pi/3)
assert p1 == p1_old
assert p1.area == (-250*sqrt(5) + 1250)/(4*tan(pi/5))
assert p1.length == 20*sqrt(-sqrt(5)/8 + 5/8)
assert p1.scale(2, 2) == \
RegularPolygon(p1.center, p1.radius*2, p1._n, p1.rotation)
assert RegularPolygon((0, 0), 1, 4).scale(2, 3) == \
Polygon(Point(2, 0), Point(0, 3), Point(-2, 0), Point(0, -3))
assert repr(p1) == str(p1)
#
# Angles
#
angles = p4.angles
assert feq(angles[Point(0, 0)].evalf(), Float("0.7853981633974483"))
assert feq(angles[Point(4, 4)].evalf(), Float("1.2490457723982544"))
assert feq(angles[Point(5, 2)].evalf(), Float("1.8925468811915388"))
assert feq(angles[Point(3, 0)].evalf(), Float("2.3561944901923449"))
angles = p3.angles
assert feq(angles[Point(0, 0)].evalf(), Float("0.7853981633974483"))
assert feq(angles[Point(4, 4)].evalf(), Float("1.2490457723982544"))
assert feq(angles[Point(5, 2)].evalf(), Float("1.8925468811915388"))
assert feq(angles[Point(3, 0)].evalf(), Float("2.3561944901923449"))
#
# Triangle
#
p1 = Point(0, 0)
p2 = Point(5, 0)
p3 = Point(0, 5)
t1 = Triangle(p1, p2, p3)
t2 = Triangle(p1, p2, Point(Rational(5, 2), sqrt(Rational(75, 4))))
t3 = Triangle(p1, Point(x1, 0), Point(0, x1))
s1 = t1.sides
assert Triangle(p1, p2, p1) == Polygon(p1, p2, p1) == Segment(p1, p2)
raises(GeometryError, lambda: Triangle(Point(0, 0)))
# Basic stuff
assert Triangle(p1, p1, p1) == p1
assert Triangle(p2, p2*2, p2*3) == Segment(p2, p2*3)
assert t1.area == Rational(25, 2)
assert t1.is_right()
assert t2.is_right() is False
assert t3.is_right()
assert p1 in t1
assert t1.sides[0] in t1
assert Segment((0, 0), (1, 0)) in t1
assert Point(5, 5) not in t2
assert t1.is_convex()
assert feq(t1.angles[p1].evalf(), pi.evalf()/2)
assert t1.is_equilateral() is False
assert t2.is_equilateral()
assert t3.is_equilateral() is False
assert are_similar(t1, t2) is False
assert are_similar(t1, t3)
assert are_similar(t2, t3) is False
assert t1.is_similar(Point(0, 0)) is False
# Bisectors
bisectors = t1.bisectors()
assert bisectors[p1] == Segment(p1, Point(Rational(5, 2), Rational(5, 2)))
ic = (250 - 125*sqrt(2)) / 50
assert t1.incenter == Point(ic, ic)
# Inradius
assert t1.inradius == t1.incircle.radius == 5 - 5*sqrt(2)/2
assert t2.inradius == t2.incircle.radius == 5*sqrt(3)/6
assert t3.inradius == t3.incircle.radius == x1**2/((2 + sqrt(2))*Abs(x1))
# Circumcircle
assert t1.circumcircle.center == Point(2.5, 2.5)
# Medians + Centroid
m = t1.medians
assert t1.centroid == Point(Rational(5, 3), Rational(5, 3))
assert m[p1] == Segment(p1, Point(Rational(5, 2), Rational(5, 2)))
assert t3.medians[p1] == Segment(p1, Point(x1/2, x1/2))
assert intersection(m[p1], m[p2], m[p3]) == [t1.centroid]
assert t1.medial == Triangle(Point(2.5, 0), Point(0, 2.5), Point(2.5, 2.5))
# Perpendicular
altitudes = t1.altitudes
assert altitudes[p1] == Segment(p1, Point(Rational(5, 2), Rational(5, 2)))
assert altitudes[p2] == s1[0]
assert altitudes[p3] == s1[2]
assert t1.orthocenter == p1
t = S('''Triangle(
Point(100080156402737/5000000000000, 79782624633431/500000000000),
Point(39223884078253/2000000000000, 156345163124289/1000000000000),
Point(31241359188437/1250000000000, 338338270939941/1000000000000000))''')
assert t.orthocenter == S('''Point(-780660869050599840216997'''
'''79471538701955848721853/80368430960602242240789074233100000000000000,'''
'''20151573611150265741278060334545897615974257/16073686192120448448157'''
'''8148466200000000000)''')
# Ensure
assert len(intersection(*bisectors.values())) == 1
assert len(intersection(*altitudes.values())) == 1
assert len(intersection(*m.values())) == 1
# Distance
p1 = Polygon(
Point(0, 0), Point(1, 0),
Point(1, 1), Point(0, 1))
p2 = Polygon(
Point(0, Rational(5)/4), Point(1, Rational(5)/4),
Point(1, Rational(9)/4), Point(0, Rational(9)/4))
p3 = Polygon(
Point(1, 2), Point(2, 2),
Point(2, 1))
p4 = Polygon(
Point(1, 1), Point(Rational(6)/5, 1),
Point(1, Rational(6)/5))
pt1 = Point(half, half)
pt2 = Point(1, 1)
'''Polygon to Point'''
assert p1.distance(pt1) == half
assert p1.distance(pt2) == 0
assert p2.distance(pt1) == Rational(3)/4
assert p3.distance(pt2) == sqrt(2)/2
'''Polygon to Polygon'''
# p1.distance(p2) emits a warning
# First, test the warning
warnings.filterwarnings("error",
message="Polygons may intersect producing erroneous output")
raises(UserWarning, lambda: p1.distance(p2))
# now test the actual output
warnings.filterwarnings("ignore",
message="Polygons may intersect producing erroneous output")
assert p1.distance(p2) == half/2
assert p1.distance(p3) == sqrt(2)/2
assert p3.distance(p4) == (sqrt(2)/2 - sqrt(Rational(2)/25)/2)
def test_convex_hull():
p = [Point(-5, -1), Point(-2, 1), Point(-2, -1), Point(-1, -3),
Point(0, 0), Point(1, 1), Point(2, 2), Point(2, -1), Point(3, 1),
Point(4, -1), Point(6, 2)]
ch = Polygon(p[0], p[3], p[9], p[10], p[6], p[1])
#test handling of duplicate points
p.append(p[3])
#more than 3 collinear points
another_p = [Point(-45, -85), Point(-45, 85), Point(-45, 26),
Point(-45, -24)]
ch2 = Segment(another_p[0], another_p[1])
assert convex_hull(*another_p) == ch2
assert convex_hull(*p) == ch
assert convex_hull(p[0]) == p[0]
assert convex_hull(p[0], p[1]) == Segment(p[0], p[1])
# no unique points
assert convex_hull(*[p[-1]]*3) == p[-1]
# collection of items
assert convex_hull(*[Point(0, 0),
Segment(Point(1, 0), Point(1, 1)),
RegularPolygon(Point(2, 0), 2, 4)]) == \
Polygon(Point(0, 0), Point(2, -2), Point(4, 0), Point(2, 2))
def test_encloses():
# square with a dimpled left side
s = Polygon(Point(0, 0), Point(1, 0), Point(1, 1), Point(0, 1),
Point(S.Half, S.Half))
# the following is True if the polygon isn't treated as closing on itself
assert s.encloses(Point(0, S.Half)) is False
assert s.encloses(Point(S.Half, S.Half)) is False # it's a vertex
assert s.encloses(Point(Rational(3, 4), S.Half)) is True
def test_triangle_kwargs():
assert Triangle(sss=(3, 4, 5)) == \
Triangle(Point(0, 0), Point(3, 0), Point(3, 4))
assert Triangle(asa=(30, 2, 30)) == \
Triangle(Point(0, 0), Point(2, 0), Point(1, sqrt(3)/3))
assert Triangle(sas=(1, 45, 2)) == \
Triangle(Point(0, 0), Point(2, 0), Point(sqrt(2)/2, sqrt(2)/2))
assert Triangle(sss=(1, 2, 5)) is None
assert deg(rad(180)) == 180
def test_transform():
pts = [Point(0, 0), Point(1/2, 1/4), Point(1, 1)]
pts_out = [Point(-4, -10), Point(-3, -37/4), Point(-2, -7)]
assert Triangle(*pts).scale(2, 3, (4, 5)) == Triangle(*pts_out)
assert RegularPolygon((0, 0), 1, 4).scale(2, 3, (4, 5)) == \
Polygon(Point(-2, -10), Point(-4, -7), Point(-6, -10), Point(-4, -13))
def test_reflect():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
b = Symbol('b')
m = Symbol('m')
l = Line((0, b), slope=m)
p = Point(x, y)
r = p.reflect(l)
dp = l.perpendicular_segment(p).length
dr = l.perpendicular_segment(r).length
assert verify_numerically(dp, dr)
t = Triangle((0, 0), (1, 0), (2, 3))
assert Polygon((1, 0), (2, 0), (2, 2)).reflect(Line((3, 0), slope=oo)) \
== Triangle(Point(5, 0), Point(4, 0), Point(4, 2))
assert Polygon((1, 0), (2, 0), (2, 2)).reflect(Line((0, 3), slope=oo)) \
== Triangle(Point(-1, 0), Point(-2, 0), Point(-2, 2))
assert Polygon((1, 0), (2, 0), (2, 2)).reflect(Line((0, 3), slope=0)) \
== Triangle(Point(1, 6), Point(2, 6), Point(2, 4))
assert Polygon((1, 0), (2, 0), (2, 2)).reflect(Line((3, 0), slope=0)) \
== Triangle(Point(1, 0), Point(2, 0), Point(2, -2))
| kaichogami/sympy | sympy/geometry/tests/test_polygon.py | Python | bsd-3-clause | 13,890 |
#!/usr/bin/python3
"""Install a 'blob' file from an extract shell archive.
This script installs the 3rd party blob contained in a previously
downloaded extract-*.sh file. This avoids the need to have to page
through and accept the user agreement (which is what you have to do if
you execute the archive directly).
"""
import locale
import os
import re
import subprocess
import sys
import script_utils as u
#......................................................................
me = sys.argv[0]
mebase = os.path.basename(me)
if len(sys.argv) != 2:
u.error("%s: supply exactly one argument" % mebase)
arg = sys.argv[1]
if not re.compile(r"extract\-.+\.sh$").match(arg):
u.warning("arg '%s' does not match template extract*.sh" % arg)
if not os.path.exists(arg):
u.error("unable to access file arg '%s'" % arg)
u.verbose(0, "... examining '%s'" % arg)
matcher = re.compile(r"tail \-n \+\d+ .+ tar zxv")
cmd = ""
encoding = locale.getdefaultlocale()[1]
with open(arg, "rb") as fin:
for line in fin:
decoded = line.decode(encoding)
if matcher.match(decoded):
# found
cmd = re.sub(r"\$0", arg, decoded.rstrip())
break
if not cmd:
u.error("could not locate tail/tar line with proper form in '%s'" % arg)
u.verbose(0, "... extracting files from '%s'" % arg)
rc = subprocess.call(cmd, shell=True)
if rc != 0:
u.error("error: cmd failed: %s" % cmd)
| thanm/devel-scripts | blobinstall.py | Python | apache-2.0 | 1,391 |
import json
from django import forms
from django.contrib.admin import helpers
from django.contrib.admin.views.decorators import staff_member_required
from django.core.urlresolvers import reverse
from django.forms.utils import ErrorDict, ErrorList, flatatt
from django.utils.decorators import method_decorator
from django.views.generic.edit import CreateView
from .utils import get_media_file_name
def model_to_modelform(model):
meta = type('Meta', (), { "model":model, "fields" : '__all__'})
modelform_class = type('modelform', (forms.ModelForm,), {"Meta": meta})
return modelform_class
class BaseBatchUploadView(CreateView):
template_name = 'batch/base.html'
def get_form(self, form_class=None):
if self.form_class:
form_class = self.get_form_class()
else:
form_class = model_to_modelform(self.model)
return form_class(**self.get_form_kwargs())
def get_context_data(self, **kwargs):
context = super(BaseBatchUploadView, self).get_context_data(**kwargs)
media_file_name = get_media_file_name(self, self.model)
if not hasattr(self, 'title'):
self.title = "Batch Upload %s"%(self.model._meta.verbose_name_plural.title())
if not hasattr(self, 'detail_fields'):
raise ImproperlyConfigured("Please specify detail_fields this view")
if not hasattr(self, 'default_fields'):
raise ImproperlyConfigured("Please specify default_fields this view")
if not hasattr(self, 'default_values'):
self.default_values = {}
if hasattr(self, 'instructions'):
context['instructions'] = self.instructions
context['app_name'] = self.model._meta.app_label.title()
context['model_name'] = self.model._meta.verbose_name.title()
context['model_name_plural'] = self.model._meta.verbose_name_plural.title()
context['title'] = self.title
context['media_file_name'] = media_file_name
context['default_fields'] = self.default_fields
context['detail_fields'] = self.detail_fields
context['default_values'] = json.dumps(self.default_values)
context['model_list_url'] = reverse('admin:app_list', kwargs={'app_label': self.model._meta.app_label})
context['model_app_url'] = reverse('admin:%s_%s_changelist'%(self.model._meta.app_label, self.model._meta.model_name))
context['model_add_url'] = reverse('admin:%s_%s_add'%(self.model._meta.app_label, self.model._meta.model_name))
context['adminform'] = adminForm = helpers.AdminForm(
self.get_form(),
list(self.get_fieldsets(self.request, None)),
self.get_prepopulated_fields(self.request, None),
self.get_readonly_fields(self.request, None),
model_admin=self
)
return context
def get_fieldsets(self, request, obj=None):
if not self.fields:
self.fields = list(set(self.default_fields + self.detail_fields))
return [(None, {'fields': self.fields})]
def get_prepopulated_fields(self, request, obj=None):
#Not implemented
return {}
def get_readonly_fields(self, request, obj=None):
#Not implemented
return []
class AdminBatchUploadView(BaseBatchUploadView):
@method_decorator(staff_member_required)
def dispatch(self, *args, **kwargs):
return super(AdminBatchUploadView, self).dispatch(*args, **kwargs) | ninapavlich/django-batch-uploader | django_batch_uploader/views.py | Python | mit | 3,562 |
from bedrock.redirects.util import redirect
redirectpatterns = (
# Bug 608370, 957664
redirect(r'^press/kit(?:.*\.html|s/?)$', 'https://blog.mozilla.org/press/kits/'),
# bug 877198
redirect(r'^press/news\.html$', 'http://blog.mozilla.org/press/'),
redirect(r'^press/mozilla-2003-10-15\.html$',
'http://blog.mozilla.org/press/2003/10/'
'mozilla-foundation-launches-new-web-browser-and-end-user-services/'),
redirect(r'^press/mozilla-2004-02-09\.html$',
'https://blog.mozilla.org/press/2004/02/'
'new-round-of-releases-extends-mozilla-project%C2%92s-standards-based-open-source-offerings/'),
redirect(r'^press/mozilla-2004-02-17\.html$',
'http://blog.mozilla.org/press/2004/02/mozilla-gains-stronghold-in-europe/'),
redirect(r'^press/mozilla-2004-02-26\.html$',
'https://blog.mozilla.org/press/2004/02/'
'mozilla-foundation-rallies-supporters-to-take-back-the-web/'),
redirect(r'^press/mozilla-2004-05-03\.html$',
'http://blog.mozilla.org/press/2004/05/mozilla-foundation-releases-thunderbird-0-6/'),
redirect(r'^press/mozilla-2004-06-15\.html$',
'http://blog.mozilla.org/press/2004/06/mozilla-reloads-firefox/'),
redirect(r'^press/mozilla-2004-06-16\.html$',
'http://blog.mozilla.org/press/2004/06/mozilla-foundation-releases-thunderbird-0-7/'),
redirect(r'^press/mozilla-2004-06-30\.html$',
'http://blog.mozilla.org/press/2013/11/mozilla-foundation-announces-more-open-scriptable-plugins/'),
redirect(r'^press/mozilla-2004-08-02\.html$',
'http://blog.mozilla.org/press/2004/08/mozilla-foundation-announces-security-bug-bounty-program/'),
redirect(r'^press/mozilla-2004-08-10\.html$',
'http://blog.mozilla.org/press/2004/08/mozilla-foundation-announces-xforms-development-project/'),
redirect(r'^press/mozilla-2004-08-18\.html$',
'http://blog.mozilla.org/press/2004/08/mozilla-affiliate-in-japan-kicks-off/'),
redirect(r'^press/mozilla-2004-09-14-01\.html$',
'http://blog.mozilla.org/press/2004/09/'
'mozilla-foundation-announces-first-payments-of-security-bug-bounty-program-further-strengthens-browser-security/'),
redirect(r'^press/mozilla-2004-09-14-02\.html$',
'http://blog.mozilla.org/press/2013/11/'
'firefox-preview-release-and-thunderbird-0-8-released/'),
redirect(r'^press/mozilla-2004-09-20\.html$',
'http://blog.mozilla.org/press/2004/09/'
'mozilla-firefox-preview-release-hits-one-million-downloads-in-first-four-days-of-availability/'),
redirect(r'^press/mozilla-2004-10-01-02\.html$',
'http://blog.mozilla.org/press/2004/10/important-security-update-for-firefox-available/'),
redirect(r'^press/mozilla-2004-11-09\.html$',
'http://blog.mozilla.org/press/2004/11/'
'mozilla-foundation-releases-the-highly-anticipated-mozilla-firefox-1-0-web-browser/'),
redirect(r'^press/mozilla-2004-11-22\.html$',
'http://blog.mozilla.org/press/2004/11/important-update-to-german-language-version-of-firefox-1-0/'),
redirect(r'^press/mozilla-2004-12-15\.html$',
'http://blog.mozilla.org/press/2004/12/mozilla-foundation-places-two-page-advocacy-ad-in-the-new-york-times/'),
redirect(r'^press/mozilla-2004-12-7\.html$',
'http://blog.mozilla.org/press/2004/12/mozilla-thunderbird-1-0-email-client-has-landed/'),
redirect(r'^press/mozilla-2005-01-07\.html$',
'http://blog.mozilla.org/press/2005/01/'
'mozilla-firefox-and-thunderbird-to-support-new-open-standard-platform-for-usb-drives/'),
redirect(r'^press/mozilla-2005-02-02\.html$',
'http://blog.mozilla.org/press/2005/02/mozilla-foundation-announces-beta-release-of-xforms-1-0-recommendation/'),
redirect(r'^press/mozilla-2005-02-16\.html$',
'http://blog.mozilla.org/press/2005/01/'
'mozilla-firefox-and-thunderbird-to-support-new-open-standard-platform-for-usb-drives/'),
redirect(r'^press/mozilla-2005-02-24\.html$',
'http://blog.mozilla.org/press/2005/02/mozilla-foundation-announces-update-to-firefox/'),
redirect(r'^press/mozilla-2005-03-04\.html$',
'http://blog.mozilla.org/press/2005/03/mozilla-foundation-expands-with-launch-of-mozilla-china/'),
redirect(r'^press/mozilla-2005-03-23\.html$',
'http://blog.mozilla.org/press/2005/03/mozilla-foundation-releases-security-update-to-firefox/'),
redirect(r'^press/mozilla-2005-03-28\.html$',
'http://blog.mozilla.org/press/2005/03/mozilla-foundation-awards-bug-bounties/'),
redirect(r'^press/mozilla-2005-05-13\.html$',
'http://blog.mozilla.org/press/2005/05/'
'mozilla-foundation-co-hosts-europes-leading-xml-and-web-developer-conference/'),
redirect(r'^press/mozilla-2005-07-28\.html$',
'http://blog.mozilla.org/press/2005/07/mozilla-headlines-two-key-open-source-development-conferences-in-august/'),
redirect(r'^press/mozilla-2005-08-03\.html$',
'http://blog.mozilla.org/press/2005/08/mozilla-foundation-forms-new-organization-to-further-the-creation-'
'of-free-open-source-internet-software-including-the-award-winning-mozilla-firefox-browser/'),
redirect(r'^press/mozilla-2005-10-03\.html$',
'http://blog.mozilla.org/press/2005/10/mozilla-launches-beta-of-comprehensive-online-developer-center/'),
redirect(r'^press/mozilla-2005-10-19\.html$',
'http://blog.mozilla.org/press/2005/10/firefox-surpasses-100-million-downloads/'),
redirect(r'^press/mozilla-2005-11-29\.html$',
'http://blog.mozilla.org/press/2005/11/mozilla-introduces-firefox-1-5-and-ups-the-ante-in-web-browsing/'),
redirect(r'^press/mozilla-2005-11-3\.html$',
'http://blog.mozilla.org/press/2005/11/mozilla-kicks-off-extend-firefox-competition/'),
redirect(r'^press/mozilla-2005-11-30\.html$',
'http://blog.mozilla.org/press/2005/11/firefox-1-5-adds-answers-com-for-quick-reference/'),
redirect(r'^press/mozilla-2005-12-2\.html$',
'http://blog.mozilla.org/press/2005/12/mozilla-launches-firefox-flicks-campaign/'),
redirect(r'^press/mozilla-2005-12-22\.html$',
'http://blog.mozilla.org/press/2005/12/mozilla-launches-firefox-flicks-ad-contest/'),
redirect(r'^press/mozilla-2006-01-12\.html$',
'http://blog.mozilla.org/press/2006/01/mozilla-releases-thunderbird-1-5-email-client/'),
redirect(r'^press/mozilla-2006-01-24\.html$',
'http://blog.mozilla.org/press/2006/01/firefox-1-5-adoption-rising-as-browser-garners-acclaim/'),
redirect(r'^press/mozilla-2006-01-25\.html$',
'http://blog.mozilla.org/press/2006/01/indie-film-all-stars-foin-firefox-flicks-crew/'),
redirect(r'^press/mozilla-2006-02-03\.html$',
'http://blog.mozilla.org/press/2006/02/mozilla-releases-preview-of-application-framework-for-'
'development-of-cross-platform-internet-client-applications/'),
redirect(r'^press/mozilla-2006-03-02\.html$',
'http://blog.mozilla.org/press/2006/03/mozilla-announces-winners-of-extend-firefox-competition/'),
redirect(r'^press/mozilla-2006-04-12\.html$',
'http://blog.mozilla.org/press/2006/04/mozilla-showcases-first-round-of-community-produced-firefox-flicks-videos/'),
redirect(r'^press/mozilla-2006-04-18\.html$',
'http://blog.mozilla.org/press/2006/04/mozilla-receives-over-280-community-produced-videos-for-firefox-flicks/'),
redirect(r'^press/mozilla-2006-04-27\.html$',
'http://blog.mozilla.org/press/2006/04/firefox-flicks-video-contest-winners-announced/'),
redirect(r'^press/mozilla-2006-06-14\.html$',
'http://blog.mozilla.org/press/2006/06/mozilla-feeds-soccer-fans-passion-with-new-firefox-add-on/'),
redirect(r'^press/mozilla-2006-10-11\.html$',
'http://blog.mozilla.org/press/2006/10/qualcomm-launches-project-in-collaboration-with-'
'mozilla-foundation-to-develop-open-source-version-of-eudora-email-program/'),
redirect(r'^press/mozilla-2006-10-24-02\.html$',
'http://blog.mozilla.org/press/2006/10/firefox-moving-the-internet-forward/'),
redirect(r'^press/mozilla-2006-10-24\.html$',
'http://blog.mozilla.org/press/2006/10/'
'mozilla-releases-major-update-to-firefox-and-raises-the-bar-for-online-experience/'),
redirect(r'^press/mozilla-2006-11-07\.html$',
'http://blog.mozilla.org/press/2006/11/adobe-and-mozilla-foundation-to-open-source-flash-player-scripting-engine/'),
redirect(r'^press/mozilla-2006-12-04\.html$',
'http://blog.mozilla.org/press/2006/12/the-world-economic-forum-announces-technology-pioneers-2007-mozilla-selected/'),
redirect(r'^press/mozilla-2006-12-11\.html$',
'http://blog.mozilla.org/press/2006/12/mozilla-firefox-headed-for-primetime/'),
redirect(r'^press/mozilla-2007-02-07\.html$',
'http://blog.mozilla.org/press/2007/02/kodak-and-mozilla-join-forces-to-make-sharing-photos-even-easier/'),
redirect(r'^press/mozilla-2007-03-27\.html$',
'http://blog.mozilla.org/press/2007/03/mozilla-launches-new-firefox-add-ons-web-site/'),
redirect(r'^press/mozilla-2007-03-28\.html$',
'http://blog.mozilla.org/press/2007/03/mozilla-and-ebay-working-together-to-make-the-auction-'
'experience-easier-for-firefox-users-in-france-germany-and-the-uk/'),
redirect(r'^press/mozilla-2007-04-19\.html$',
'http://blog.mozilla.org/press/2007/04/mozilla-thunderbird-2-soars-to-new-heights/'),
redirect(r'^press/mozilla-2007-05-16\.html$',
'http://blog.mozilla.org/press/2007/05/united-nations-agency-awards-mozilla-world-information-society-award/'),
redirect(r'^press/mozilla-2007-07-04\.html$',
'http://blog.mozilla.org/press/2007/07/mozilla-and-ebay-launch-firefox-companion-for-ebay-users/'),
redirect(r'^press/mozilla-2007-08-10\.html$',
'http://blog.mozilla.org/press/2007/08/mozilla-to-host-24-hour-worldwide-community-event/'),
redirect(r'^press/mozilla-2007-08-28\.html$',
'http://blog.mozilla.org/press/2007/08/mozilla-welcomes-students-back-to-school-with-firefox-campus-edition/'),
redirect(r'^press/mozilla-2007-09-17-faq\.html$',
'http://blog.mozilla.org/press/2007/09/mozilla-launches-internet-mail-and-communications-initiative/'),
redirect(r'^press/mozilla-2007-09-17\.html$',
'http://blog.mozilla.org/press/2007/09/mozilla-launches-internet-mail-and-communications-initiative/'),
redirect(r'^press/mozilla-2008-01-07-faq\.html$',
'http://blog.mozilla.org/press/2008/01/mozilla-appoints-john-lilly-as-chief-executive-officer/'),
redirect(r'^press/mozilla-2008-01-07\.html$',
'http://blog.mozilla.org/press/2008/01/mozilla-appoints-john-lilly-as-chief-executive-officer/'),
redirect(r'^press/mozilla-2008-02-19-faq\.html$',
'http://blog.mozilla.org/press/2008/02/mozilla-messaging-starts-up-operations/'),
redirect(r'^press/mozilla-2008-02-19\.html$',
'http://blog.mozilla.org/press/2008/02/mozilla-messaging-starts-up-operations/'),
redirect(r'^press/mozilla-2008-05-28\.html$',
'http://blog.mozilla.org/press/2008/05/mozilla-aims-to-set-guinness-world-record-on-firefox-3-download-day/'),
redirect(r'^press/mozilla-2008-06-17-faq\.html$',
'http://blog.mozilla.org/press/2008/06/mozilla-releases-firefox-3-and-redefines-the-web-experience/'),
redirect(r'^press/mozilla-2008-06-17\.html$',
'http://blog.mozilla.org/press/2008/06/mozilla-releases-firefox-3-and-redefines-the-web-experience/'),
redirect(r'^press/mozilla-2008-07-02\.html$',
'http://blog.mozilla.org/press/2008/07/mozilla-sets-new-guinness-world-record-with-firefox-3-downloads/'),
redirect(r'^press/mozilla-2008-11-18\.html$',
'http://blog.mozilla.org/press/2008/11/mozilla-launches-fashion-your-'
'firefox-and-makes-it-easy-to-customize-the-browsing-experience/'),
redirect(r'^press/mozilla-2008-12-03\.html$',
'http://blog.mozilla.org/press/2008/12/mozilla-and-zazzle-announce-strategic-relationship-for-apparel-on-demand/'),
redirect(r'^press/mozilla-2009-03-31\.html$',
'https://blog.mozilla.org/press/2009/03/%C2%AD%C2%ADmozilla-adds-style-and-star-power-to-firefox-with-new-personas/'),
redirect(r'^press/mozilla-2009-06-30-faq\.html$',
'http://blog.mozilla.org/press/2009/04/mozilla-advances-the-web-with-firefox-3-5/'),
redirect(r'^press/mozilla-2009-06-30\.html$',
'http://blog.mozilla.org/press/2009/04/mozilla-advances-the-web-with-firefox-3-5/'),
redirect(r'^press/mozilla-foundation\.html$',
'http://blog.mozilla.org/press/2003/07/mozilla-org-announces-launch-of-the-'
'mozilla-foundation-to-lead-open-source-browser-efforts/'),
redirect(r'^press/mozilla1.0\.html$',
'http://blog.mozilla.org/press/2002/06/mozilla-org-launches-mozilla-1-0/'),
redirect(r'^press/open-source-security\.html$',
'http://blog.mozilla.org/press/2000/01/open-source-development-of-security-products-'
'possible-worldwide-enhancing-security-and-privacy-for-e-commerce-and-communication/'),
)
| l-hedgehog/bedrock | bedrock/press/redirects.py | Python | mpl-2.0 | 13,621 |
#!/usr/bin/env python
"""
Your task is to process the supplied file and use the csv module to extract data from it.
The data comes from NREL (National Renewable Energy Laboratory) website. Each file
contains information from one meteorological station, in particular - about amount of
solar and wind energy for each hour of day.
Note that the first line of the datafile is neither data entry, nor header. It is a line
describing the data source. You should extract the name of the station from it.
The data should be returned as a list of lists (not dictionaries).
You can use the csv modules "reader" method to get data in such format.
Another useful method is next() - to get the next line from the iterator.
You should only change the parse_file function.
"""
import csv
import os
DATADIR = ""
DATAFILE = "745090.csv"
def parse_file(datafile):
name = ""
data = []
with open(datafile,'rb') as f:
reader = csv.reader(f, delimiter=',')
# get name in first line
name += reader.next()[1]
reader.next() # skip header
for row in reader:
data.append(row)
# Do not change the line below
return (name, data)
def test():
datafile = os.path.join(DATADIR, DATAFILE)
name, data = parse_file(datafile)
assert name == "MOUNTAIN VIEW MOFFETT FLD NAS"
assert data[0][1] == "01:00"
assert data[2][0] == "01/01/2005"
assert data[2][5] == "2"
if __name__ == "__main__":
test() | tuanvu216/udacity-course | data_wrangling_with_mongodb/Lesson_1_Problem_Set/01-Using_CSV_Module/parsecsv.py | Python | mit | 1,477 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import string
import glob
import sys
if sys.version_info[0] == 2:
def CmdArgsGetParser(usage):
reload(sys)
sys.setdefaultencoding('utf-8')
from optparse import OptionParser
return OptionParser('usage: %prog ' + usage)
def CmdArgsAddOption(parser, *args, **kwargs):
parser.add_option(*args, **kwargs)
def CmdArgsParse(parser):
return parser.parse_args()
else:
def CmdArgsGetParser(usage):
import argparse
ret = argparse.ArgumentParser(usage="%(prog)s " + usage)
ret.add_argument('REMAINDER', nargs=argparse.REMAINDER, help='task names')
return ret
def CmdArgsAddOption(parser, *args, **kwargs):
parser.add_argument(*args, **kwargs)
def CmdArgsParse(parser):
ret = parser.parse_args()
return (ret, ret.REMAINDER)
def main():
from google.protobuf import descriptor_pb2 as pb2
from google.protobuf import message_factory as _message_factory
from google.protobuf.text_format import MessageToString
script_dir = os.path.dirname(os.path.realpath(__file__))
default_header_pb_file = os.path.realpath(os.path.join(script_dir, '..', 'header', 'pb_header_v3.pb'))
usage = '[options...] <pb file> <binary file>'
parser = CmdArgsGetParser(usage)
CmdArgsAddOption(parser,
"-v",
"--version",
action="store_true",
help="show version and exit",
dest="version",
default=False)
CmdArgsAddOption(parser,
"--as_one_line",
action="store_true",
help="set one line per data_block for output",
dest="as_one_line",
default=False)
CmdArgsAddOption(parser,
"--header",
action="store",
help="set xresloader header pb file(default: {0})".format(os.path.relpath(default_header_pb_file, os.getcwd())),
dest="header_pb_file",
default=default_header_pb_file)
(options, left_args) = CmdArgsParse(parser)
if options.version:
print('1.0.0')
return 0
def print_help_msg(err_code):
parser.print_help()
exit(err_code)
if len(left_args) < 2:
print_help_msg(1)
header_pb_fds = pb2.FileDescriptorSet.FromString(open(options.header_pb_file, 'rb').read())
real_pb_fds = pb2.FileDescriptorSet.FromString(open(left_args[0], 'rb').read())
protobuf_fds = pb2.FileDescriptorSet.FromString(open(os.path.join(script_dir, 'extensions.pb'), 'rb').read())
header_message_desc = _message_factory.GetMessages([x for x in header_pb_fds.file])
pb_fds_header_clazz = header_message_desc["org.xresloader.pb.xresloader_datablocks"]
header_inst = pb_fds_header_clazz.FromString(open(left_args[1], 'rb').read())
print('==================================================================')
print(MessageToString(header_inst.header, as_utf8=True, as_one_line=options.as_one_line, use_short_repeated_primitives=True))
real_file_descs = [x for x in real_pb_fds.file]
real_file_descs.extend([x for x in header_pb_fds.file])
real_file_descs.extend([x for x in protobuf_fds.file])
real_message_desc = _message_factory.GetMessages(real_file_descs)
if header_inst.data_message_type not in real_message_desc:
print('------------------------------------------------------------------')
print('data_message_type {0} not found in {1}'.format(header_inst.data_message_type, open(left_args[0])))
exit(0)
real_inst = real_message_desc[header_inst.data_message_type]
line_index = 0
if options.as_one_line:
print('------------------------------------------------------------------')
for data_block in header_inst.data_block:
message_inst = real_inst.FromString(data_block)
line_index = line_index + 1
if not options.as_one_line:
print('# {0:<5} ----------------------------------------------------------'.format(line_index))
decode_as_utf8 = False
try:
# see https://googleapis.dev/python/protobuf/latest/google/protobuf/text_format.html for detail
if options.as_one_line:
print('$ {0:<5}: {1}'.format(line_index, MessageToString(message_inst, as_utf8=True, as_one_line=options.as_one_line, use_short_repeated_primitives=True, print_unknown_fields=True)))
else:
print(MessageToString(message_inst, as_utf8=True, as_one_line=options.as_one_line, use_short_repeated_primitives=True, print_unknown_fields=True))
decode_as_utf8 = True
except:
pass
if decode_as_utf8:
continue
try:
if options.as_one_line:
print('$ {0:<5}: {1}'.format(line_index, MessageToString(message_inst, as_utf8=True, as_one_line=options.as_one_line, use_short_repeated_primitives=True, print_unknown_fields=True)))
else:
print(MessageToString(message_inst, as_utf8=True, as_one_line=options.as_one_line, use_short_repeated_primitives=True, print_unknown_fields=True))
except:
pass
if __name__ == '__main__':
exit(main())
| xresloader/xresloader | tools/print_pb_bin.py | Python | mit | 5,217 |
from base import *
## TEST CONFIGURATION
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = SITE_ROOT
TEST_DISCOVER_ROOT = SITE_ROOT
TEST_DISCOVER_PATTERN = "test_*.py"
## END TEST CONFIGURATION
## DATABASE CONFIGURATION
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
## END DATABASE CONFIGURATION
| megaprojectske/megaprojects.co.ke | megaprojects/megaprojects/settings/test.py | Python | apache-2.0 | 477 |
__author__ = 'ch'
| scrumthing/pytm1 | pytm1/__init__.py | Python | apache-2.0 | 18 |
#!/usr/bin/env python3
import asyncio
import os
import re
import subprocess
import sys
import time
import tornado.httpserver
import tornado.ioloop
import tornado.template
import tornado.web
def log(fmt, *args):
if not args:
sys.stderr.write(str(fmt) + '\n')
else:
sys.stderr.write((str(fmt) % args) + '\n')
async def run_cgi(self, args, env=None):
if env:
genv = dict(os.environ)
genv.update(env)
else:
genv = None
p = await asyncio.create_subprocess_exec(
*args,
env=genv,
stdout=asyncio.subprocess.PIPE,
)
# Skip headers section
while 1:
line = await p.stdout.readline()
if not line.strip():
break
# Forward actual content bytes
while 1:
b = await p.stdout.read(1024)
if not b:
break
self.write(b)
self.flush()
await p.wait()
class IndexHandler(tornado.web.RequestHandler):
async def get(self):
await run_cgi(self, ['./index.cgi'])
class RssHandler(tornado.web.RequestHandler):
async def get(self):
self.set_header('Content-type', 'text/xml')
await run_cgi(self, ['./rss.cgi'])
class LogsHandler(tornado.web.RequestHandler):
async def get(self, logid):
await run_cgi(self, ['./log.cgi'], env={
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'log=%s' % logid,
})
def unlink(name):
try:
os.unlink(name)
except FileNotFoundError:
pass
class RebuildHandler(tornado.web.RequestHandler):
def get(self, logid):
self.render('rebuild.tmpl.html', logid=logid)
def post(self, logid):
assert re.match(r'^[0-9a-f]+$', logid)
unlink(os.path.join('../out/pass', logid))
unlink(os.path.join('../out/fail', logid))
unlink(os.path.join('../out/errcache', logid))
self.redirect('/')
def main():
debug = True if os.getenv('DEBUG') else False
settings = {
'autoreload': True,
'compress_response': True,
'debug': debug,
'template_whitespace': 'all'
}
STATICDIR = os.path.join(os.path.dirname(__file__), 'static')
app = tornado.web.Application([
(r'/', IndexHandler),
(r'/rss', RssHandler),
(r'/log/([0-9a-f]+)$', LogsHandler),
(r'/rebuild/([0-9a-f]+)$', RebuildHandler),
(r'/(.*)', tornado.web.StaticFileHandler, dict(path=STATICDIR)),
], **settings)
http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
addr = '0.0.0.0' if debug else '127.0.0.1'
PORT = 8014
print('Listening on %s:%d' % (addr, PORT))
http_server.listen(PORT, address=addr)
tornado.ioloop.IOLoop.current().start()
if __name__ == '__main__':
main()
| apenwarr/gitbuilder | viewer/run.py | Python | gpl-2.0 | 2,875 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Post.user'
db.add_column(u'posts_post', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.user'
db.delete_column(u'posts_post', 'user_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'posts.post': {
'Meta': {'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['posts'] | erkarl/browl-api | apps/posts/migrations/0002_auto__add_field_post_user.py | Python | mit | 4,225 |
# -*- coding: utf-8 -*-
"""
celery.registry
~~~~~~~~~~~~~~~
Registry of available tasks.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import inspect
from .exceptions import NotRegistered
class TaskRegistry(dict):
NotRegistered = NotRegistered
def regular(self):
"""Get all regular task types."""
return self.filter_types("regular")
def periodic(self):
"""Get all periodic task types."""
return self.filter_types("periodic")
def register(self, task):
"""Register a task in the task registry.
The task will be automatically instantiated if not already an
instance.
"""
self[task.name] = inspect.isclass(task) and task() or task
def unregister(self, name):
"""Unregister task by name.
:param name: name of the task to unregister, or a
:class:`celery.task.base.Task` with a valid `name` attribute.
:raises celery.exceptions.NotRegistered: if the task has not
been registered.
"""
try:
# Might be a task class
name = name.name
except AttributeError:
pass
self.pop(name)
def filter_types(self, type):
"""Return all tasks of a specific type."""
return dict((name, task) for name, task in self.iteritems()
if task.type == type)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise self.NotRegistered(key)
def pop(self, key, *args):
try:
return dict.pop(self, key, *args)
except KeyError:
raise self.NotRegistered(key)
#: Global task registry.
tasks = TaskRegistry()
def _unpickle_task(name):
return tasks[name]
| mzdaniel/oh-mainline | vendor/packages/celery/celery/registry.py | Python | agpl-3.0 | 1,926 |
"""Config flow to configure the MJPEG IP Camera integration."""
from __future__ import annotations
from http import HTTPStatus
from types import MappingProxyType
from typing import Any
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from requests.exceptions import HTTPError, Timeout
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import HomeAssistantError
from .const import CONF_MJPEG_URL, CONF_STILL_IMAGE_URL, DOMAIN, LOGGER
@callback
def async_get_schema(
defaults: dict[str, Any] | MappingProxyType[str, Any], show_name: bool = False
) -> vol.Schema:
"""Return MJPEG IP Camera schema."""
schema = {
vol.Required(CONF_MJPEG_URL, default=defaults.get(CONF_MJPEG_URL)): str,
vol.Optional(
CONF_STILL_IMAGE_URL,
description={"suggested_value": defaults.get(CONF_STILL_IMAGE_URL)},
): str,
vol.Optional(
CONF_USERNAME,
description={"suggested_value": defaults.get(CONF_USERNAME)},
): str,
vol.Optional(
CONF_PASSWORD,
default=defaults.get(CONF_PASSWORD, ""),
): str,
vol.Optional(
CONF_VERIFY_SSL,
default=defaults.get(CONF_VERIFY_SSL, True),
): bool,
}
if show_name:
schema = {
vol.Optional(CONF_NAME, default=defaults.get(CONF_NAME)): str,
**schema,
}
return vol.Schema(schema)
def validate_url(
url: str,
username: str | None,
password: str,
verify_ssl: bool,
authentication: str = HTTP_BASIC_AUTHENTICATION,
) -> str:
"""Test if the given setting works as expected."""
auth: HTTPDigestAuth | HTTPBasicAuth | None = None
if username and password:
if authentication == HTTP_DIGEST_AUTHENTICATION:
auth = HTTPDigestAuth(username, password)
else:
auth = HTTPBasicAuth(username, password)
response = requests.get(
url,
auth=auth,
stream=True,
timeout=10,
verify=verify_ssl,
)
if response.status_code == HTTPStatus.UNAUTHORIZED:
# If unauthorized, try again using digest auth
if authentication == HTTP_BASIC_AUTHENTICATION:
return validate_url(
url, username, password, verify_ssl, HTTP_DIGEST_AUTHENTICATION
)
raise InvalidAuth
response.raise_for_status()
response.close()
return authentication
async def async_validate_input(
hass: HomeAssistant, user_input: dict[str, Any]
) -> tuple[dict[str, str], str]:
"""Manage MJPEG IP Camera options."""
errors = {}
field = "base"
authentication = HTTP_BASIC_AUTHENTICATION
try:
for field in (CONF_MJPEG_URL, CONF_STILL_IMAGE_URL):
if not (url := user_input.get(field)):
continue
authentication = await hass.async_add_executor_job(
validate_url,
url,
user_input.get(CONF_USERNAME),
user_input[CONF_PASSWORD],
user_input[CONF_VERIFY_SSL],
)
except InvalidAuth:
errors["username"] = "invalid_auth"
except (OSError, HTTPError, Timeout):
LOGGER.exception("Cannot connect to %s", user_input[CONF_MJPEG_URL])
errors[field] = "cannot_connect"
return (errors, authentication)
class MJPEGFlowHandler(ConfigFlow, domain=DOMAIN):
"""Config flow for MJPEG IP Camera."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: ConfigEntry,
) -> MJPEGOptionsFlowHandler:
"""Get the options flow for this handler."""
return MJPEGOptionsFlowHandler(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
errors: dict[str, str] = {}
if user_input is not None:
errors, authentication = await async_validate_input(self.hass, user_input)
if not errors:
self._async_abort_entries_match(
{CONF_MJPEG_URL: user_input[CONF_MJPEG_URL]}
)
# Storing data in option, to allow for changing them later
# using an options flow.
return self.async_create_entry(
title=user_input.get(CONF_NAME, user_input[CONF_MJPEG_URL]),
data={},
options={
CONF_AUTHENTICATION: authentication,
CONF_MJPEG_URL: user_input[CONF_MJPEG_URL],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_STILL_IMAGE_URL: user_input.get(CONF_STILL_IMAGE_URL),
CONF_USERNAME: user_input.get(CONF_USERNAME),
CONF_VERIFY_SSL: user_input[CONF_VERIFY_SSL],
},
)
else:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=async_get_schema(user_input, show_name=True),
errors=errors,
)
async def async_step_import(self, config: dict[str, Any]) -> FlowResult:
"""Handle a flow initialized by importing a config."""
self._async_abort_entries_match({CONF_MJPEG_URL: config[CONF_MJPEG_URL]})
return self.async_create_entry(
title=config[CONF_NAME],
data={},
options={
CONF_AUTHENTICATION: config[CONF_AUTHENTICATION],
CONF_MJPEG_URL: config[CONF_MJPEG_URL],
CONF_PASSWORD: config[CONF_PASSWORD],
CONF_STILL_IMAGE_URL: config.get(CONF_STILL_IMAGE_URL),
CONF_USERNAME: config.get(CONF_USERNAME),
CONF_VERIFY_SSL: config[CONF_VERIFY_SSL],
},
)
class MJPEGOptionsFlowHandler(OptionsFlow):
"""Handle MJPEG IP Camera options."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize MJPEG IP Camera options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage MJPEG IP Camera options."""
errors: dict[str, str] = {}
if user_input is not None:
errors, authentication = await async_validate_input(self.hass, user_input)
if not errors:
for entry in self.hass.config_entries.async_entries(DOMAIN):
if (
entry.entry_id != self.config_entry.entry_id
and entry.options[CONF_MJPEG_URL] == user_input[CONF_MJPEG_URL]
):
errors = {CONF_MJPEG_URL: "already_configured"}
if not errors:
return self.async_create_entry(
title=user_input.get(CONF_NAME, user_input[CONF_MJPEG_URL]),
data={
CONF_AUTHENTICATION: authentication,
CONF_MJPEG_URL: user_input[CONF_MJPEG_URL],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_STILL_IMAGE_URL: user_input.get(CONF_STILL_IMAGE_URL),
CONF_USERNAME: user_input.get(CONF_USERNAME),
CONF_VERIFY_SSL: user_input[CONF_VERIFY_SSL],
},
)
else:
user_input = {}
return self.async_show_form(
step_id="init",
data_schema=async_get_schema(user_input or self.config_entry.options),
errors=errors,
)
class InvalidAuth(HomeAssistantError):
"""Error to indicate there is invalid auth."""
| rohitranjan1991/home-assistant | homeassistant/components/mjpeg/config_flow.py | Python | mit | 8,269 |
from django.db import connection
from calaccess_campaign_browser import models
from calaccess_campaign_browser.management.commands import CalAccessCommand
class Command(CalAccessCommand):
help = "Drops all CAL-ACCESS campaign browser database tables"
def handle(self, *args, **options):
self.header("Dropping CAL-ACCESS campaign browser database tables")
self.cursor = connection.cursor()
# Ignore MySQL "note" warnings so this can be run with DEBUG=True
self.cursor.execute("""SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0;""")
# Loop through the models and drop all the tables
model_list = [
models.Contribution,
models.Expenditure,
models.Summary,
models.Filing,
models.Committee,
models.Filer,
models.Cycle,
models.Election,
models.Office,
models.Candidate,
models.Proposition,
models.PropositionFiler,
]
sql = """DROP TABLE IF EXISTS `%s`;"""
for m in model_list:
self.log(" %s" % m.__name__)
self.cursor.execute(sql % m._meta.db_table)
# Revert database to default "note" warning behavior
self.cursor.execute("""SET SQL_NOTES=@OLD_SQL_NOTES;""")
| myersjustinc/django-calaccess-campaign-browser | calaccess_campaign_browser/management/commands/dropcalaccesscampaignbrowser.py | Python | mit | 1,320 |
import os
from setuptools import setup
import re
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def extract_version():
with open('version.h') as inf:
lines = inf.readlines()
ver_map = {'MAJOR': 0,
'MINOR': 0,
'BUGFIX': 0}
for l in lines:
match = re.match(r'#define VER_(MAJOR|MINOR|BUGFIX) (\d+)', l)
if match:
field, ver = match.groups()
ver_map[field] = ver
return '.'.join([ver_map['MAJOR'],
ver_map['MINOR'],
ver_map['BUGFIX']])
setup(
name = "asl_f4_loader",
version = extract_version(),
author = "Jeff Ciesielski",
author_email = "[email protected]",
description = ("A library and shell script to perform firmware updates on Cortex M4 Microcontrollers. (Currently: STM32F4)"),
license = "GPL2",
keywords = "firmware bootloader stm32f4",
url = "https://github.com/autosportlabs/ASL_F4_bootloader/tree/master/host_tools",
packages=['asl_f4_loader'],
package_dir={'asl_f4_loader': 'host_tools',},
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: GPLv2 License",
],
entry_points = {
'console_scripts': [
'asl_f4_loader = asl_f4_loader.fw_update:main',
'asl_f4_fw_postprocess = asl_f4_loader.fw_postprocess:main'
]
},
install_requires=[
'ihextools >= 1.1.0',
'pyserial >= 2.7',
'crcmod >= 1.7',
'XBVC >= 0.0.1'
]
)
| autosportlabs/ASL_F4_bootloader | setup.py | Python | gpl-2.0 | 1,836 |
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
import urllib
from common_includes import *
import chromium_roll
class CheckActiveRoll(Step):
MESSAGE = "Check active roll."
@staticmethod
def ContainsChromiumRoll(changes):
for change in changes:
if change["subject"].startswith("Update V8 to"):
return True
return False
def RunStep(self):
params = {
"closed": 3,
"owner": self._options.author,
"limit": 30,
"format": "json",
}
params = urllib.urlencode(params)
search_url = "https://codereview.chromium.org/search"
result = self.ReadURL(search_url, params, wait_plan=[5, 20])
if self.ContainsChromiumRoll(json.loads(result)["results"]):
print "Stop due to existing Chromium roll."
return True
class DetectLastPush(Step):
MESSAGE = "Detect commit ID of the last push to trunk."
def RunStep(self):
self.vc.Fetch()
push_hash = self.FindLastTrunkPush(
branch="origin/candidates", include_patches=True)
self["last_push"] = self.GetCommitPositionNumber(push_hash)
class DetectLastRoll(Step):
MESSAGE = "Detect commit ID of the last Chromium roll."
def RunStep(self):
# Interpret the DEPS file to retrieve the v8 revision.
# TODO(machenbach): This should be part or the roll-deps api of
# depot_tools.
Var = lambda var: '%s'
exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
last_roll = self.GetCommitPositionNumber(vars['v8_revision'])
# FIXME(machenbach): When rolling from bleeding edge and from trunk there
# be different commit numbers here. Better use version?
if int(last_roll) >= int(self["last_push"]):
print("There is no newer v8 revision than the one in Chromium (%s)."
% last_roll)
return True
class CheckClusterFuzz(Step):
MESSAGE = "Check ClusterFuzz api for new problems."
def RunStep(self):
if not os.path.exists(self.Config("CLUSTERFUZZ_API_KEY_FILE")):
print "Skipping ClusterFuzz check. No api key file found."
return False
api_key = FileToText(self.Config("CLUSTERFUZZ_API_KEY_FILE"))
# Check for open, reproducible issues that have no associated bug.
result = self._side_effect_handler.ReadClusterFuzzAPI(
api_key, job_type="linux_asan_d8_dbg", reproducible="True",
open="True", bug_information="",
revision_greater_or_equal=str(self["last_push"]))
if result:
print "Stop due to pending ClusterFuzz issues."
return True
class RollChromium(Step):
MESSAGE = "Roll V8 into Chromium."
def RunStep(self):
if self._options.roll:
args = [
"--author", self._options.author,
"--reviewer", self._options.reviewer,
"--chromium", self._options.chromium,
"--use-commit-queue",
]
if self._options.sheriff:
args.extend([
"--sheriff", "--googlers-mapping", self._options.googlers_mapping])
if self._options.dry_run:
args.extend(["--dry-run"])
if self._options.work_dir:
args.extend(["--work-dir", self._options.work_dir])
self._side_effect_handler.Call(chromium_roll.ChromiumRoll().Run, args)
class AutoRoll(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("--roll", help="Call Chromium roll script.",
default=False, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
if not options.reviewer:
print "A reviewer (-r) is required."
return False
if not options.author:
print "An author (-a) is required."
return False
return True
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-auto-roll-tempfile",
"CLUSTERFUZZ_API_KEY_FILE": ".cf_api_key",
}
def _Steps(self):
return [
CheckActiveRoll,
DetectLastPush,
DetectLastRoll,
CheckClusterFuzz,
RollChromium,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(AutoRoll().Run())
| mxOBS/deb-pkg_trusty_chromium-browser | v8/tools/push-to-trunk/auto_roll.py | Python | bsd-3-clause | 4,392 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script fixes links that contain common spelling mistakes.
This is only possible on wikis that have a template for these misspellings.
Command line options:
-always:XY instead of asking the user what to do, always perform the same
action. For example, XY can be "r0", "u" or "2". Be careful with
this option, and check the changes made by the bot. Note that
some choices for XY don't make sense and will result in a loop,
e.g. "l" or "m".
-start:XY goes through all misspellings in the category on your wiki
that is defined (to the bot) as the category containing
misspelling pages, starting at XY. If the -start argument is not
given, it starts at the beginning.
-main only check pages in the main namespace, not in the talk,
wikipedia, user, etc. namespaces.
"""
# (C) Daniel Herding, 2007
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import i18n, pagegenerators
from pywikibot.tools import PY2
from scripts.solve_disambiguation import DisambiguationRobot
if not PY2:
basestring = (str, )
HELP_MSG = """\n
mispelling.py does not support site {site}.
Help Pywikibot team to provide support for your wiki by submitting
a bug to:
https://phabricator.wikimedia.org/maniphest/task/create/?projects=pywikibot-core
with category containing misspelling pages or a template for
these misspellings.\n"""
class MisspellingRobot(DisambiguationRobot):
"""Spelling bot."""
misspellingTemplate = {
'de': ('Falschschreibung', 'Obsolete Schreibung'),
}
# Optional: if there is a category, one can use the -start
# parameter.
misspellingCategory = {
'da': u'Omdirigeringer af fejlstavninger', # only contains date redirects at the moment
'de': ('Kategorie:Wikipedia:Falschschreibung',
'Kategorie:Wikipedia:Obsolete Schreibung'),
'en': u'Redirects from misspellings',
'hu': u'Átirányítások hibás névről',
'nl': u'Categorie:Wikipedia:Redirect voor spelfout',
}
def __init__(self, always, firstPageTitle, main_only):
"""Constructor."""
super(MisspellingRobot, self).__init__(
always, [], True, False, None, False, main_only)
self.generator = self.createPageGenerator(firstPageTitle)
def createPageGenerator(self, firstPageTitle):
"""
Generator to retrieve misspelling pages or misspelling redirects.
@rtype: generator
"""
mylang = self.site.code
if mylang in self.misspellingCategory:
categories = self.misspellingCategory[mylang]
if isinstance(categories, basestring):
categories = (categories, )
generators = (
pagegenerators.CategorizedPageGenerator(
pywikibot.Category(self.site, misspellingCategoryTitle),
recurse=True, start=firstPageTitle)
for misspellingCategoryTitle in categories)
elif mylang in self.misspellingTemplate:
templates = self.misspellingTemplate[mylang]
if isinstance(templates, basestring):
templates = (templates, )
generators = (
pagegenerators.ReferringPageGenerator(
pywikibot.Page(self.site, misspellingTemplateName, ns=10),
onlyTemplateInclusion=True)
for misspellingTemplateName in templates)
if firstPageTitle:
pywikibot.output(
u'-start parameter unsupported on this wiki because there '
u'is no category for misspellings.')
else:
pywikibot.output(HELP_MSG.format(site=self.site))
empty_gen = (i for i in [])
return empty_gen
generator = pagegenerators.CombinedPageGenerator(generators)
preloadingGen = pagegenerators.PreloadingGenerator(generator)
return preloadingGen
def findAlternatives(self, disambPage):
"""
Append link target to a list of alternative links.
Overrides the DisambiguationRobot method.
@return: True if alternate link was appended
@rtype: bool or None
"""
if disambPage.isRedirectPage():
self.alternatives.append(disambPage.getRedirectTarget().title())
return True
if self.misspellingTemplate.get(disambPage.site.code) is not None:
for template, params in disambPage.templatesWithParams():
if (template.title(withNamespace=False) ==
self.misspellingTemplate[disambPage.site.code]):
# The correct spelling is in the last paramter.
correctSpelling = params[-1]
# On de.wikipedia, there are some cases where the
# misspelling is ambigous, see for example:
# https://de.wikipedia.org/wiki/Buthan
for match in self.linkR.finditer(correctSpelling):
self.alternatives.append(match.group('title'))
if not self.alternatives:
# There were no links in the parameter, so there is
# only one correct spelling.
self.alternatives.append(correctSpelling)
return True
def setSummaryMessage(self, disambPage, *args, **kwargs):
"""
Setup the summary message.
Overrides the DisambiguationRobot method.
"""
# TODO: setSummaryMessage() in solve_disambiguation now has parameters
# new_targets and unlink. Make use of these here.
self.comment = i18n.twtranslate(self.site, 'misspelling-fixing',
{'page': disambPage.title()})
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# the option that's always selected when the bot wonders what to do with
# a link. If it's None, the user is prompted (default behaviour).
always = None
main_only = False
firstPageTitle = None
for arg in pywikibot.handle_args(args):
if arg.startswith('-always:'):
always = arg[8:]
elif arg.startswith('-start'):
if len(arg) == 6:
firstPageTitle = pywikibot.input(
u'At which page do you want to start?')
else:
firstPageTitle = arg[7:]
elif arg == '-main':
main_only = True
bot = MisspellingRobot(always, firstPageTitle, main_only)
bot.run()
if __name__ == "__main__":
main()
| icyflame/batman | scripts/misspelling.py | Python | mit | 7,040 |
""" Tests topology manipulation tools
"""
import collections
import pytest
import moldesign as mdt
from moldesign import units as u
registered_types = {}
def typedfixture(*types, **kwargs):
"""This is a decorator that lets us associate fixtures with one or more arbitrary types.
We'll later use this type to determine what tests to run on the result"""
def fixture_wrapper(func):
for t in types:
registered_types.setdefault(t, []).append(func.__name__)
return pytest.fixture(**kwargs)(func)
return fixture_wrapper
@pytest.fixture
def ammonium_nocharge():
return mdt.from_smiles('[NH4]')
@pytest.fixture
def ammonium_charged():
return mdt.from_smiles('[NH4+]')
@pytest.mark.parametrize('objkey',
['ammonium_nocharge', 'ammonium_charged'])
def test_ammonium_formal_charge(objkey, request):
mol = request.getfuncargvalue(objkey)
mdt.assign_formal_charges(mol)
assert mol.charge == 1 * u.q_e
for atom in mol.atoms:
if atom.atnum == 7:
assert atom.formal_charge == 1 * u.q_e
else:
assert atom.atnum == 1
assert atom.formal_charge == 0 * u.q_e
@pytest.fixture
def c2_no_hydrogen_from_smiles():
mymol = mdt.from_smiles('[CH0][CH0]')
return mymol
def test_c2_no_hydrogen_from_smiles(c2_no_hydrogen_from_smiles):
mymol = c2_no_hydrogen_from_smiles
atomcounts = collections.Counter(atom.element for atom in mymol.atoms)
assert atomcounts['C'] == 2
assert len(atomcounts) == 1
assert mymol.num_bonds == 1
assert mymol.num_atoms == 2
bonds = list(mymol.bonds)
assert len(bonds) == 1
b = bonds[0]
assert b.order == 1
assert b.a1.index == 0
assert b.a2.index == 1
def test_add_hydrogen_to_c2(c2_no_hydrogen_from_smiles):
newmol = mdt.add_hydrogen(c2_no_hydrogen_from_smiles)
atomcounts = collections.Counter(atom.element for atom in newmol.atoms)
assert newmol.num_atoms == 8
assert atomcounts['C'] == 2
assert atomcounts['H'] == 6
assert len(atomcounts) == 2
assert newmol.num_bonds == 7
for atom, bondgraph in newmol.bond_graph.iteritems():
if atom.atnum == 1:
assert len(bondgraph) == 1
assert bondgraph.keys()[0].elem == 'C'
assert bondgraph.values()[0] == 1
else:
assert atom.atnum == 6
assert len(bondgraph) == 4
for nbr in bondgraph:
assert bondgraph[nbr] == 1
| tkzeng/molecular-design-toolkit | moldesign/_tests/test_tools.py | Python | apache-2.0 | 2,517 |
"""
Represent either BED12 or genePred transcripts as objects. Allows for conversion of coordinates between
chromosome, mRNA and CDS coordinate spaces. Can slice objects into subsets.
"""
import collections
from itertools import izip
from bx.intervals.cluster import ClusterTree
from mathOps import find_closest, find_intervals
from bio import reverse_complement, translate_sequence
from fileOps import iter_lines
from intervals import ChromosomeInterval
__author__ = "Ian Fiddes"
class Transcript(object):
"""
Represent a transcript record from a bed file.
"""
__slots__ = ('name', 'strand', 'score', 'thick_start', 'rgb', 'thick_stop', 'start', 'stop', 'intron_intervals',
'exon_intervals', 'exons', 'block_sizes', 'block_starts', 'block_count', 'chromosome',
'interval', 'coding_interval')
def __init__(self, bed_tokens):
self.chromosome = bed_tokens[0]
self.start = int(bed_tokens[1])
self.stop = int(bed_tokens[2])
self.name = bed_tokens[3]
self.score = int(bed_tokens[4])
self.strand = bed_tokens[5]
self.thick_start = int(bed_tokens[6])
self.thick_stop = int(bed_tokens[7])
self.rgb = bed_tokens[8]
self.block_count = int(bed_tokens[9])
self.block_sizes = [int(x) for x in bed_tokens[10].split(",") if x != ""]
self.block_starts = [int(x) for x in bed_tokens[11].split(",") if x != ""]
self.exon_intervals = self._get_exon_intervals()
self.intron_intervals = self._get_intron_intervals()
self.interval = self._get_interval()
self.coding_interval = self._get_coding_interval()
def __len__(self):
return sum(len(x) for x in self.exon_intervals)
def __hash__(self):
return (hash(self.chromosome) ^ hash(self.start) ^ hash(self.stop) ^ hash(self.strand) ^
hash((self.chromosome, self.start, self.stop, self.strand)))
def __repr__(self):
return 'Transcript({})'.format(self.get_bed())
@property
def cds_size(self):
"""calculates the number of coding bases"""
l = 0
for e in self.exon_intervals:
if self.thick_start < e.start and e.stop < self.thick_stop:
# squarely in the CDS
l += e.stop - e.start
elif e.start <= self.thick_start < e.stop < self.thick_stop:
# thickStart marks the start of the CDS
l += e.stop - self.thick_start
elif e.start <= self.thick_start and self.thick_stop <= e.stop:
# thickStart and thickStop mark the whole CDS
l += self.thick_stop - self.thick_start
elif self.thick_start < e.start < self.thick_stop <= e.stop:
# thickStop marks the end of the CDS
l += self.thick_stop - e.start
return l
@property
def num_coding_introns(self):
"""how many coding introns does this transcript have?"""
return len([i for i in self.intron_intervals if i.subset(self.coding_interval)])
@property
def num_coding_exons(self):
"""how many coding exons does this transcript have?"""
return len([i for i in self.exon_intervals if i.overlap(self.coding_interval)])
def _get_interval(self):
"""
Returns a ChromosomeInterval object representing the full span of this transcript.
"""
return ChromosomeInterval(self.chromosome, self.start, self.stop, self.strand)
def _get_coding_interval(self):
"""
Returns a ChromosomeInterval object representing the coding span of this transcript.
"""
return ChromosomeInterval(self.chromosome, self.thick_start, self.thick_stop, self.strand)
def _get_exon_intervals(self):
"""
Builds a list of ChromosomeInterval objects representing the exons of this transcript.
:return: List of ChromosomeIntervals
"""
exon_intervals = []
for block_size, block_start in izip(*(self.block_sizes, self.block_starts)):
start = self.start + block_start
stop = self.start + block_start + block_size
exon_intervals.append(ChromosomeInterval(self.chromosome, start, stop, self.strand))
return exon_intervals
def _get_intron_intervals(self):
"""
Builds a list of ChromosomeInterval objects representing the introns of this transcript.
:return: List of ChromosomeIntervals
"""
intron_intervals = []
for i in xrange(1, len(self.block_starts)):
stop = self.start + self.block_starts[i]
start = self.start + self.block_starts[i - 1] + self.block_sizes[i - 1]
intron_intervals.append(ChromosomeInterval(self.chromosome, start, stop, self.strand))
return intron_intervals
def get_bed(self, rgb=None, name=None, new_start=None, new_stop=None):
"""
Returns BED tokens for this object. Can be sliced into sub regions.
:param rgb: Set this to modify the RGB field.
:param name: Set this to modify the name field.
:param new_start: Set this (in chromosome coordinates) to move the start.
:param new_stop: Set this (in chromosome coordinates) to move the stop.
:return: List of values representing a BED entry.
"""
if new_start is not None and new_stop is not None:
assert new_start <= new_stop
if new_start is not None:
assert new_start >= self.start
else:
new_start = self.start
if new_stop is not None:
assert new_stop <= self.stop
else:
new_stop = self.stop
rgb = self.rgb if rgb is None else rgb
name = self.name if name is None else name
# special case -- start == stop
if new_start == new_stop:
if self.cds_size == 0:
thick_start = thick_stop = 0
else:
thick_start = new_start
thick_stop = new_stop
return map(str, [self.chromosome, new_start, new_stop, name, self.score, self.strand, thick_start,
thick_stop, rgb, 1, 0, 0])
if self.chromosome_coordinate_to_mrna(new_start) is None:
new_start = find_closest([x.start for x in self.exon_intervals], new_start)
if self.chromosome_coordinate_to_mrna(new_stop) is None:
new_stop = find_closest([x.stop for x in self.exon_intervals], new_stop)
# start slicing out intervals
new_interval = ChromosomeInterval(self.chromosome, new_start, new_stop, self.strand)
exon_intervals = []
for exon in self.exon_intervals:
new_exon = exon.intersection(new_interval)
if new_exon is None:
continue
exon_intervals.append(new_exon)
# if new_start or new_stop were not within the exonic intervals, adjust them
if new_start != exon_intervals[0].start:
new_start = exon_intervals[0].start
if new_stop != exon_intervals[-1].stop:
new_stop = exon_intervals[-1].stop
thick_start = max(self.thick_start, new_start)
thick_stop = min(self.thick_stop, new_stop)
if thick_start >= self.thick_stop or thick_stop < self.thick_start:
thick_start = 0
thick_stop = 0
block_count = len(exon_intervals)
block_sizes = ','.join(map(str, [len(x) for x in exon_intervals]))
block_starts = ','.join(map(str, [x.start - new_start for x in exon_intervals]))
return map(str, [self.chromosome, new_start, new_stop, name, self.score, self.strand, thick_start, thick_stop,
rgb, block_count, block_sizes, block_starts])
def chromosome_coordinate_to_mrna(self, coord):
if not (self.start <= coord < self.stop):
return None
p = 0
i = ChromosomeInterval(self.chromosome, coord, coord + 1, self.strand)
if not any(i.overlap(x) for x in self.exon_intervals):
return None
exon_intervals = self.exon_intervals if self.strand == '+' else reversed(self.exon_intervals)
for e in exon_intervals:
if i.overlap(e):
if self.strand == '+':
p += coord - e.start
else:
p += e.stop - coord - 1
break
p += len(e)
return p
def chromosome_coordinate_to_cds(self, coord):
if not (self.thick_start <= coord < self.thick_stop):
return None
p = self.chromosome_coordinate_to_mrna(coord)
if p is None:
return p
return self.mrna_coordinate_to_cds(p)
def mrna_coordinate_to_chromosome(self, coord):
if not (0 <= coord < len(self)):
return None
p = 0
exon_intervals = self.exon_intervals if self.strand == '+' else reversed(self.exon_intervals)
for e in exon_intervals:
if p + len(e) > coord:
if self.strand == '+':
return e.start + (coord - p)
else:
return e.stop - (coord - p) - 1
p += len(e)
def mrna_coordinate_to_cds(self, coord):
if self.strand == '+':
cds_start = self.chromosome_coordinate_to_mrna(self.thick_start)
else:
cds_start = self.chromosome_coordinate_to_mrna(self.thick_stop - 1)
r = coord - cds_start
if not (0 <= r < self.cds_size):
return None
return r
def cds_coordinate_to_mrna(self, coord):
if not (0 <= coord < self.cds_size):
return None
if self.strand == '+':
cds_start = self.chromosome_coordinate_to_mrna(self.thick_start)
else:
cds_start = self.chromosome_coordinate_to_mrna(self.thick_stop - 1)
return cds_start + coord
def cds_coordinate_to_chromosome(self, coord):
if not (0 <= coord < self.cds_size):
return None
if self.strand == '+':
cds_start = self.chromosome_coordinate_to_mrna(self.thick_start)
else:
cds_start = self.chromosome_coordinate_to_mrna(self.thick_stop - 1)
c = self.mrna_coordinate_to_chromosome(cds_start + coord)
return c
def get_mrna(self, seq_dict):
"""
Returns the mRNA sequence for this transcript based on a Fasta object.
and the start/end positions and the exons. Sequence returned in
5'-3' transcript orientation.
"""
sequence = seq_dict[self.chromosome]
assert self.stop <= len(sequence) + 1
s = []
for e in self.exon_intervals:
s.append(sequence[e.start:e.stop])
if self.strand == '+':
mrna = ''.join(s)
else:
mrna = reverse_complement(''.join(s))
return str(mrna)
def get_sequence(self, seq_dict):
"""
Returns the entire chromosome sequence for this transcript, (+) strand orientation.
"""
sequence = seq_dict[self.chromosome]
return sequence[self.start:self.stop]
def get_cds(self, seq_dict):
"""
Return the CDS sequence (as a string) for the transcript
(based on the exons) using a sequenceDict as the sequence source.
The returned sequence is in the correct 5'-3' orientation (i.e. it has
been reverse complemented if necessary).
"""
sequence = seq_dict[self.chromosome]
assert self.stop <= len(sequence) + 1
# make sure this isn't a non-coding gene
if self.thick_start == self.thick_stop == 0:
return ''
s = []
for e in self.exon_intervals:
if self.thick_start < e.start and e.stop < self.thick_stop:
# squarely in the CDS
s.append(sequence[e.start:e.stop])
elif e.start <= self.thick_start < e.stop < self.thick_stop:
# thickStart marks the start of the CDS
s.append(sequence[self.thick_start:e.stop])
elif e.start <= self.thick_start and self.thick_stop <= e.stop:
# thickStart and thickStop mark the whole CDS
s.append(sequence[self.thick_start: self.thick_stop])
elif self.thick_start < e.start < self.thick_stop <= e.stop:
# thickStop marks the end of the CDS
s.append(sequence[e.start:self.thick_stop])
if self.strand == '-':
cds = reverse_complement(''.join(s))
else:
cds = ''.join(s)
return str(cds)
def get_protein_sequence(self, seq_dict):
"""
Returns the translated protein sequence for this transcript in single
character space.
"""
cds = self.get_cds(seq_dict)
if len(cds) < 3:
return ''
return translate_sequence(self.get_cds(seq_dict).upper())
def get_start_intervals(self):
"""
Returns one or more ChromosomeInterval objects that represents the starting CDS interval for this transcript.
More than one may exist if the codon is split over a splice junction.
"""
assert self.cds_size >= 3
positions = sorted([self.cds_coordinate_to_chromosome(x) for x in range(3)])
merged_intervals = list(find_intervals(positions))
intervals = [ChromosomeInterval(self.chromosome, i[0], i[-1] + 1, self.strand) for i in merged_intervals]
assert sum(len(x) for x in intervals) == 3
c = 0
for i in intervals:
i.data = convert_frame(c)
c += len(i)
return intervals
def get_stop_intervals(self):
"""
Returns one or more ChromosomeInterval objects that represents the ending CDS interval for this transcript.
More than one may exist if the codon is split over a splice junction.
"""
assert self.cds_size >= 3
positions = sorted([self.cds_coordinate_to_chromosome(x) for x in range(self.cds_size - 3, self.cds_size)])
merged_intervals = list(find_intervals(positions))
intervals = [ChromosomeInterval(self.chromosome, i[0], i[-1] + 1, self.strand) for i in merged_intervals]
assert sum(len(x) for x in intervals) == 3
c = 0
for i in intervals:
i.data = convert_frame(c)
c += len(i)
return intervals
class GenePredTranscript(Transcript):
"""
Subclasses Transcript to represent genePred entries. genePred entries have the same information, except that they
also tell you whether the CDS is complete on both ends, and the frame information of each exon.
"""
# adding slots for new fields
__slots__ = ('cds_start_stat', 'cds_end_stat', 'exon_frames', 'name2', 'score')
def __init__(self, gene_pred_tokens):
name = gene_pred_tokens[0]
chrom = gene_pred_tokens[1]
strand = gene_pred_tokens[2]
start = gene_pred_tokens[3]
stop = gene_pred_tokens[4]
thick_start = gene_pred_tokens[5]
thick_stop = gene_pred_tokens[6]
block_count = gene_pred_tokens[7]
exon_starts = gene_pred_tokens[8]
exon_ends = gene_pred_tokens[9]
self.score = gene_pred_tokens[10]
self.name2 = gene_pred_tokens[11]
self.cds_start_stat = gene_pred_tokens[12]
self.cds_end_stat = gene_pred_tokens[13]
self.exon_frames = [int(x) for x in gene_pred_tokens[14].split(',') if x != '']
# convert genePred format coordinates to BED-like coordinates to make intervals
block_starts = [int(x) for x in exon_starts.split(',') if x != '']
block_ends = [int(x) for x in exon_ends.split(',') if x != '']
block_sizes = ",".join(map(str, [e - s for e, s in izip(block_ends, block_starts)]))
block_starts = ",".join(map(str, [x - int(start) for x in block_starts]))
bed_tokens = [chrom, start, stop, name, self.score, strand, thick_start, thick_stop, '0', block_count,
block_sizes, block_starts]
super(GenePredTranscript, self).__init__(bed_tokens)
def __repr__(self):
return 'GenePredTranscript({})'.format(self.get_gene_pred())
@property
def offset(self):
frames = [x for x in self.exon_frames if x != -1]
if len(frames) == 0:
return 0
if self.strand == '+':
offset = 3 - frames[0]
else:
offset = 3 - frames[-1]
if offset == 3:
offset = 0
return offset
def _get_exon_intervals(self):
"""
Overrides _get_exon_intervals to attach frame information to the intervals
:return: List of ChromosomeIntervals
"""
exon_intervals = []
for block_size, block_start, frame in izip(*(self.block_sizes, self.block_starts, self.exon_frames)):
start = self.start + block_start
stop = self.start + block_start + block_size
exon_intervals.append(ChromosomeInterval(self.chromosome, start, stop, self.strand, data={'frame': frame}))
return exon_intervals
def _make_exon_idx_iter(self):
"""make iterator exon indexes in order of transcriptions"""
if self.strand == '+':
return xrange(0, len(self.exon_intervals))
else:
return xrange(len(self.exon_intervals) - 1, -1, -1)
def _cds_region(self, cds_interval, frame, expected_frame):
"""Compute the next cds region"""
intervals = []
if frame != expected_frame:
cds_interval, gap_interval = self._adjust_cds_start(cds_interval, expected_frame, frame)
intervals.append(gap_interval)
if len(cds_interval) != 0:
intervals.append(cds_interval)
return self._frame_incr(expected_frame, len(cds_interval)), intervals
def _frame_incr(self, frame, amt=1):
"""increment frame by positive or negative amount"""
if frame >= 0:
return (frame + amt) % 3
else:
amt3 = (-amt) % 3
return (frame - (amt - amt3)) % 3
def _adjust_cds_start(self, cds_interval, expected_frame, frame):
"""adjust cds_interval to match the expected frame. It is possible
for the cds_interval to become zero"""
amt = 0
# this could be calculated rather than increment by in a loop, this is safer
# for the feeble minded
while frame != expected_frame:
frame = self._frame_incr(frame)
amt += 1
# min/max here avoids going negative, making a zero-length block
if cds_interval.strand == '+':
start = min(cds_interval.start + amt, cds_interval.stop)
stop = cds_interval.stop
gap_start = cds_interval.start
gap_stop = cds_interval.start + amt
else:
start = cds_interval.start
stop = max(cds_interval.stop - amt, cds_interval.start)
gap_start = cds_interval.stop - amt
gap_stop = cds_interval.stop
cds_interval = ChromosomeInterval(cds_interval.chromosome, start, stop, cds_interval.strand)
gap_interval = ChromosomeInterval(cds_interval.chromosome, gap_start, gap_stop, cds_interval.strand, 'gap')
return cds_interval, gap_interval
def _get_codon_intervals(self):
"""
Returns a list of intervals, extracting gap intervals and tagging them with data='gap'
"""
expected_frame = 0
codon_regions = []
for iexon in self._make_exon_idx_iter():
cds_interval = self.exon_intervals[iexon].intersection(self.coding_interval)
if cds_interval is not None:
if cds_interval.data is not None: # edge case for single-exon
cds_interval.data = None
expected_frame, intervals = self._cds_region(cds_interval, self.exon_frames[iexon], expected_frame)
codon_regions.extend(intervals)
return codon_regions
def get_cds(self, seq_dict, ignore_frameshift=False):
"""
Using the frame information, we can ignore indels in the CDS that cause frameshifts, producing proper codons.
"""
codon_regions = self._get_codon_intervals()
if ignore_frameshift is True:
# sort, remove gap regions
codon_regions = sorted((i for i in codon_regions if i.data is None), key=lambda x: x.start)
else:
codon_regions = sorted(codon_regions, key=lambda x: x.start)
if self.strand == '+':
cds = ''.join([str(x.get_sequence(seq_dict)) for x in codon_regions])
else:
cds = ''.join([str(x.get_sequence(seq_dict)) for x in codon_regions[::-1]])
return cds
def codon_iterator(self, seq_dict):
"""
Using the frame information, we can ignore indels and iterate codon pairs *along with true genomic coordinates*
"""
codon_regions = sorted(self._get_codon_intervals(), key=lambda x: x.start)
cds = self.get_cds(seq_dict, ignore_frameshift=True)
# construct a dumb list mapping positions to cds positions
positions = []
cds_pos = 0
for i in codon_regions:
if i.data is None:
for p in xrange(i.start, i.stop):
positions.append(p)
cds_pos += 1
if self.strand == '-':
positions = positions[::-1]
for i in xrange(0, cds_pos - cds_pos % 3, 3):
codon = cds[i:i + 3]
if self.strand == '+':
if positions[i + 2] + 1 != self.thick_stop:
yield positions[i], positions[i + 2] + 1, codon
else:
if positions[i + 2] != self.thick_start:
yield positions[i + 2], positions[i] + 1, codon
def get_protein_sequence(self, seq_dict):
"""
Returns the translated protein sequence for this transcript in single character space.
"""
cds = self.get_cds(seq_dict, ignore_frameshift=True)
if len(cds) < 3:
return ""
try:
return translate_sequence(cds.upper())
except AssertionError:
raise RuntimeError('Failed to translate transcript {} with sequence {}'.format(self.name, cds))
def get_gene_pred(self, name=None, new_start=None, new_stop=None, name2=None, score=None):
"""
Returns this transcript as a genePred transcript.
If new_start or new_stop are set (chromosome coordinates), then this record will be changed to only
show results within that region, which is defined in chromosome coordinates. The frames field will be properly
adjusted, and the cds_start_stat/cds_end_stat fields will change to 'unk' if they are moved
TODO: If this is a transMap transcript, and there were coding indels, the frame information will change to
reflect the new arrangement and the implicit indel information will be lost.
"""
name = self.name if name is None else name
name2 = self.name2 if name2 is None else name2
score = self.score if score is None else score
# if no resizing, just return what we have
if new_start is None and new_stop is None:
exon_starts = ','.join(map(str, [exon.start for exon in self.exon_intervals]))
exon_ends = ','.join(map(str, [exon.stop for exon in self.exon_intervals]))
exon_frames = ','.join(map(str, self.exon_frames))
return map(str, [name, self.chromosome, self.strand, self.start, self.stop, self.thick_start,
self.thick_stop, len(self.exon_intervals), exon_starts, exon_ends, score, name2,
self.cds_start_stat, self.cds_end_stat, exon_frames])
if new_start is not None and new_stop is not None:
assert new_start <= new_stop
if new_start is not None:
assert new_start >= self.start
else:
new_start = self.start
if new_stop is not None:
assert new_stop <= self.stop
else:
new_stop = self.stop
# start slicing out intervals, adjusting the frames
new_interval = ChromosomeInterval(self.chromosome, new_start, new_stop, self.strand)
exon_intervals = []
exon_frames = []
exon_iter = self.exon_intervals if self.strand == '+' else self.exon_intervals[::-1]
frame_iter = self.exon_frames if self.strand == '+' else reversed(self.exon_frames)
# attempt to find the first frame. If there is none, then we have a non-coding transcript and this is easy
try:
starting_frame = [f for f in frame_iter if f != -1][0]
except IndexError: # non-coding transcript
exon_intervals = [exon.intersection(new_interval) for exon in exon_iter]
exon_frames = [-1] * len(exon_intervals)
else: # start following frame to adjust for resized transcript
cds_counter = 0 # keep track of total CDS bases encountered
cds_flag = False
for exon in exon_iter:
new_exon = exon.intersection(new_interval)
if new_exon is None:
continue
exon_intervals.append(new_exon)
coding_exon = exon.intersection(self.coding_interval)
if coding_exon is None:
exon_frames.append(-1)
elif cds_flag is False:
cds_flag = True
exon_frames.append(starting_frame)
cds_counter += len(coding_exon) + starting_frame
else:
exon_frames.append(cds_counter % 3)
cds_counter += len(coding_exon)
# flip back around negative strand transcripts
if self.strand == '-':
exon_intervals = exon_intervals[::-1]
exon_frames = exon_frames[::-1]
# if new_start or new_stop were intronic coordinates, fix this
if new_start != exon_intervals[0].start:
new_start = exon_intervals[0].start
if new_stop != exon_intervals[-1].stop:
new_stop = exon_intervals[-1].stop
thick_start = max(self.thick_start, new_start)
thick_stop = min(self.thick_stop, new_stop)
cds_start_stat = 'unk' if thick_start != self.thick_start else self.cds_start_stat
cds_end_stat = 'unk' if thick_stop != self.thick_stop else self.cds_end_stat
exon_count = len(exon_intervals)
exon_starts = ','.join(map(str, [exon.start for exon in exon_intervals]))
exon_ends = ','.join(map(str, [exon.stop for exon in exon_intervals]))
exon_frames = ','.join(map(str, exon_frames))
return map(str, [name, self.chromosome, self.strand, new_start, new_stop, thick_start, thick_stop, exon_count,
exon_starts, exon_ends, score, name2, cds_start_stat, cds_end_stat, exon_frames])
def get_gene_pred_dict(gp_file):
"""
Produces a dictionary of GenePredTranscripts from a genePred file
:param gp_file: A genePred file path or handle.
:return: A dictionary of name:transcript pairs
"""
return {t.name: t for t in gene_pred_iterator(gp_file)}
def gene_pred_iterator(gp_file):
"""
Iterator for GenePred file or handle, producing tuples of (name, GenePredTranscript)
:param gp_file: A genePred file path or handle.
:return: tuples of (name, GenePredTranscript)
"""
for i, x in enumerate(open(gp_file)):
tokens = x.rstrip().split('\t')
if len(tokens) != 15:
raise RuntimeError('GenePred line {} had {} tokens, not 15. Record: {}'.format(i + 1, len(tokens), tokens))
t = GenePredTranscript(tokens)
yield t
def get_transcript_dict(bed_file):
"""
Produces a dictionary of Transcripts from a BED file
:param bed_file: A BED file path or handle.
:return: A dictionary of name:transcript pairs
"""
return {t.name: t for t in transcript_iterator(bed_file)}
def transcript_iterator(bed_file):
"""
Iterator for BED file or handle, producing tuples of (name, Transcript)
:param bed_file: A BED file path or handle.
:return: tuples of (name, Transcript)
"""
with open(bed_file) as inf:
for tokens in iter_lines(inf):
if len(tokens) != 12:
raise RuntimeError('BED line had {} tokens, not 12. Record: {}'.format(len(tokens), tokens))
t = Transcript(tokens)
yield t
def load_gps(gp_list):
"""helper function that loads a list of genePreds into one mega-dict"""
r = {}
for gp in gp_list:
for t in gene_pred_iterator(gp):
if t.name in r:
raise RuntimeError('Attempted to add duplicate GenePredTranscript object with name {}'.format(t.name))
r[t.name] = t
return r
def convert_frame(exon_frame):
"""converts genePred-style exonFrame to GFF-style phase"""
mapping = {0: 0, 1: 2, 2: 1, -1: '.'}
return mapping[exon_frame]
def create_bed_info_gp(gp):
"""Creates the block_starts, block_sizes and exon_frames fields from a GenePredTranscript object"""
block_starts = ','.join(map(str, gp.block_starts))
block_sizes = ','.join(map(str, gp.block_sizes))
exon_frames = ','.join(map(str, gp.exon_frames))
return block_starts, block_sizes, exon_frames
def group_transcripts_by_name2(tx_iter):
"""Takes a iterable of GenePredTranscript objects and groups them by name2"""
r = collections.defaultdict(list)
for tx in tx_iter:
r[tx.name2].append(tx)
return r
def intervals_to_bed(intervals, name=None, score=0, rgb=0, thick_start=0, thick_stop=0):
"""Converts an iterable of intervals into a Transcript object. If any intervals overlap this will fail"""
assert len(set(i.strand for i in intervals)) == 1
assert len(set(i.chromosome for i in intervals)) == 1
intervals = sorted(intervals)
start = intervals[0].start
stop = intervals[-1].stop
block_sizes = ','.join(map(str, [len(i) for i in intervals]))
block_starts = ','.join(map(str, [i.start - start for i in intervals]))
i = intervals[0]
return Transcript([i.chromosome, start, stop, name, score, i.strand, thick_start, thick_stop, rgb,
len(intervals), block_sizes, block_starts])
def cluster_txs(txs):
"""Uses a ClusterTree to cluster to cluster transcript objects. TODO: use clusterGenes instead"""
cluster_trees = collections.defaultdict(lambda: ClusterTree(0, 1))
for i, tx in enumerate(txs):
cluster_trees[tx.chromosome].insert(tx.start, tx.stop, i)
# convert the clusters to a nested structure of chrom -> cluster_id -> tx objects
clustered_reads = collections.defaultdict(dict)
cluster_id = 0
for chrom, cluster_tree in cluster_trees.iteritems():
for start, end, interval_indices in cluster_tree.getregions():
clustered_reads[chrom][cluster_id] = [txs[ix] for ix in interval_indices]
cluster_id += 1
return clustered_reads
def divide_clusters(clustered_reads, ref_names):
"""
Takes the output of cluster_txs and splits them into two groups based on having their name be in ref_names or not.
Returns a dict mapping cluster IDs to tuples of [ref_txs, non_ref_txs].
Discards any cluster that does not contain members of both ref and non-ref.
"""
divided_clusters = {}
for chrom in clustered_reads:
for cluster_id, tx_list in clustered_reads[chrom].iteritems():
ref = [tx for tx in tx_list if tx.name in ref_names and len(tx.intron_intervals) > 0]
iso = [tx for tx in tx_list if tx.name not in ref_names and len(tx.intron_intervals) > 0]
if len(ref) > 0 and len(iso) > 0:
divided_clusters[cluster_id] = [ref, iso]
return divided_clusters
def construct_start_stop_intervals(intron_intervals, d):
"""Splits a iterable of intervals into two parallel tuples of 2d bp intervals representing their start and stop"""
left_intervals = []
right_intervals = []
for i in intron_intervals:
left_intervals.append(ChromosomeInterval(i.chromosome, i.start - d, i.start + d, i.strand))
right_intervals.append(ChromosomeInterval(i.chromosome, i.stop - d, i.stop + d, i.strand))
return tuple(left_intervals), tuple(right_intervals)
def find_subset_match(iso_intervals, enst_intervals):
"""
Compares intervals produced by construct_start_stop_intervals to each other to find subset matches.
Used for fuzzy matching of IsoSeq transcripts (iso_intervals) to existing annotations (enst_intervals)
"""
iso_l, iso_r = iso_intervals
enst_l, enst_r = enst_intervals
# if we have fewer reference junctions than isoseq, we can't have a subset match by definition
if len(iso_l) > len(enst_l):
return False
lm = all([any([il.overlap(el) for el in enst_l]) for il in iso_l])
lr = all([any([ir.overlap(er) for er in enst_r]) for ir in iso_r])
return lm and lr
def calculate_subset_matches(divided_clusters, fuzz_distance=8, filter_short_intron=30):
"""
A wrapper for find_subset_match that looks at every cluster of transcripts produced by divide_clusters and finds
a fuzzy match between any non-reference sequence and a reference sequence.
"""
r = collections.defaultdict(list)
for cluster_id, (ensts, isos) in divided_clusters.iteritems():
enst_intervals = collections.defaultdict(list)
for tx in ensts:
tx_intervals = [x for x in tx.intron_intervals if len(x) >= filter_short_intron]
enst_interval = construct_start_stop_intervals(tx_intervals, fuzz_distance)
enst_intervals[tuple(enst_interval)].append(tx)
for iso in isos:
iso_intervals = [x for x in iso.intron_intervals if len(x) >= filter_short_intron]
iso_intervals = construct_start_stop_intervals(iso_intervals, fuzz_distance)
for enst_interval, enst_txs in enst_intervals.iteritems():
m = find_subset_match(iso_intervals, enst_interval)
if m:
r[iso.name].extend(enst_txs)
return r
def has_start_codon(fasta, tx):
"""
Does this start with a start codon?
:param fasta: Sequence Dictionary
:param tx: GenePredTranscript object
:return: boolean
"""
if tx.cds_size == 0:
return None
s = tx.get_protein_sequence(fasta)
if len(s) == 0:
return False
return s[0] == 'M'
def has_stop_codon(fasta, tx):
"""
Does this transcript have a valid stop codon?
:param fasta: Sequence Dictionary
:param tx: GenePredTranscript object
:return: boolean
"""
if tx.cds_size == 0:
return None
s = tx.get_protein_sequence(fasta)
if len(s) == 0:
return False
return s[-1] == '*'
| ucsc-mus-strain-cactus/Comparative-Annotation-Toolkit | tools/transcripts.py | Python | apache-2.0 | 35,144 |
import scipy
import sqlite3
import os
import sys
sys.path.append('../')
import CIAO_DatabaseTools
connection = sqlite3.connect(os.environ.get('GRAVITY_SQL')+'GravityObs.db')
cursor = connection.cursor()
cursor.execute("""DROP TABLE GRAVITY_OBS;""")
#"""
#sqlCommand = """
#CREATE TABLE GRAVITY_OBS (
#TIMESTAMP FLOAT PRIMARY KEY,
#DIRECTORY VARCHAR(100),
#ACQCAM_1_STREHL FLOAT,
#ACQCAM_2_STREHL FLOAT,
#ACQCAM_3_STREHL FLOAT,
#ACQCAM_4_STREHL FLOAT);"""
#
#cursor.execute(sqlCommand)
#"""
connection.commit()
connection.close()
GDB = CIAO_DatabaseTools.GRAVITY_Database()
GDB.addTable()
GDB.close()
| soylentdeen/Graffity | src/SQLTools/buildGRAVITYDatabase.py | Python | mit | 613 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_oauthlib.client import OAuth
from config import config
db = SQLAlchemy()
oa = OAuth()
lm = LoginManager()
lm.login_view = 'main.login'
from app.models import User
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
db.init_app(app)
lm.init_app(app)
oa.init_app(app)
from app.converters import WordClassConverter
app.url_map.converters['word_class'] = WordClassConverter
from app.views.main import main
from app.views.oauth import oauth
app.register_blueprint(main)
app.register_blueprint(oauth)
return app
| Encrylize/MyDictionary | app/__init__.py | Python | mit | 790 |
import pypsa
#NB: this test doesn't work for other cases because transformer tap
#ratio and phase angle not supported for lpf
from pypower.api import ppoption, runpf, case30 as case
from pypower.ppver import ppver
from distutils.version import StrictVersion
pypower_version = StrictVersion(ppver()['Version'])
import pandas as pd
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal as equal
@pytest.mark.skipif(pypower_version <= '5.0.0',
reason="PyPOWER 5.0.0 is broken with recent numpy and unmaintained since Aug 2017.")
def test_pypower_case():
#ppopt is a dictionary with the details of the optimization routine to run
ppopt = ppoption(PF_ALG=2)
#choose DC or AC
ppopt["PF_DC"] = True
#ppc is a dictionary with details about the network, including baseMVA, branches and generators
ppc = case()
results,success = runpf(ppc, ppopt)
#store results in a DataFrame for easy access
results_df = {}
#branches
columns = 'bus0, bus1, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax, p0, q0, p1, q1'.split(", ")
results_df['branch'] = pd.DataFrame(data=results["branch"],columns=columns)
#buses
columns = ["bus","type","Pd","Qd","Gs","Bs","area","v_mag_pu_set","v_ang_set","v_nom","zone","Vmax","Vmin"]
results_df['bus'] = pd.DataFrame(data=results["bus"],columns=columns,index=results["bus"][:,0])
#generators
columns = "bus, p, q, q_max, q_min, Vg, mBase, status, p_max, p_min, Pc1, Pc2, Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf".split(", ")
results_df['gen'] = pd.DataFrame(data=results["gen"],columns=columns)
#now compute in PyPSA
network = pypsa.Network()
network.import_from_pypower_ppc(ppc)
network.lpf()
#compare generator dispatch
p_pypsa = network.generators_t.p.loc["now"].values
p_pypower = results_df['gen']["p"].values
equal(p_pypsa, p_pypower)
#compare branch flows
for item in ["lines", "transformers"]:
df = getattr(network, item)
pnl = getattr(network, item + "_t")
for si in ["p0","p1"]:
si_pypsa = getattr(pnl,si).loc["now"].values
si_pypower = results_df['branch'][si][df.original_index].values
equal(si_pypsa, si_pypower)
| PyPSA/PyPSA | test/test_lpf_against_pypower.py | Python | mit | 2,335 |
import sys
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
striptest = test.replace(" ",'')
# print striptest
acc = 0
for x in range(0, 16, 2):
acc += 2 * int(striptest[x])
for x in range(1, 16, 2):
acc += int(striptest[x])
# print acc
if acc % 10 == 0:
print "Real"
else:
print "Fake"
test_cases.close()
| lavahot/codeeval_python | fakecc/fakecc.py | Python | gpl-3.0 | 346 |
from django.contrib import admin
from simulation.models import SimulationStage, SimulationStageMatch, SimulationStageMatchResult
class SimulationStageAdmin(admin.ModelAdmin):
list_display = ["number", "created_at"]
list_filter = ["created_at"]
class SimulationStageMatchAdmin(admin.ModelAdmin):
list_display = ["stage", "order", "raund",
"cat", "rat", "won", "created_at"]
list_filter = ["stage", "created_at"]
search_fields = ["cat", "rat"]
readonly_fields = ["won", "cat_password", "rat_password", "system_password"]
class SimulationStageMatchResultAdmin(admin.ModelAdmin):
list_display = ["match", "is_caught", "distance", "is_cancelled", "created_at"]
list_filter = ["created_at"]
search_fields = ["match"]
admin.site.register(SimulationStage, SimulationStageAdmin)
admin.site.register(SimulationStageMatch, SimulationStageMatchAdmin)
admin.site.register(SimulationStageMatchResult, SimulationStageMatchResultAdmin) | bilbeyt/ituro | ituro/simulation/admin.py | Python | mit | 972 |
import urllib2, HTMLParser
import re
text_file = open("shakespeare.txt", 'w')
class HTMLShakespeareParser(HTMLParser.HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == "a":
link = dict(attrs)["href"]
if "/index.html" in link:
works.append(link[:-11])
works = []
html_index_file = urllib2.urlopen("http://shakespeare.mit.edu")
HTMLShakespeareParser().feed(html_index_file.read())
html_index_file.close()
for title in works:
html_data_file = urllib2.urlopen("http://shakespeare.mit.edu/" + title + "/full.html")
html_data = html_data_file.read()
html_data_file.close()
text_file.write(re.sub("\\s+", " ", " " + re.sub("<[^>]*>", " ", html_data.split("<H3>", 1)[1]) + " "))
text_file.close() | amiller27/First-Experiences-in-Research | writer/shakespeare_get.py | Python | gpl-2.0 | 722 |
# -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import MessageService, MessageReceiver, PopbillException
messageService = MessageService(testValue.LinkID, testValue.SecretKey)
messageService.IsTest = testValue.IsTest
messageService.IPRestrictOnOff = testValue.IPRestrictOnOff
messageService.UseStaticIP = testValue.UseStaticIP
messageService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
[대랑전송] MMS(포토)를 전송합니다.
- 메시지 내용이 2,000Byte 초과시 초과된 메시지 내용은 자동으로 제거됩니다.
- 이미지 파일의 크기는 최대 300Kbtye (JPEG), 가로/세로 1000px 이하 권장
- https://docs.popbill.com/message/python/api#SendMMS_Multi
'''
try:
print("=" * 15 + " 멀티메시지(MMS) 다량(최대1000건) 전송 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 팝빌회원 아이디
UserID = testValue.testUserID
# 발신번호(동보전송용)
Sender = "07043042992"
# 장문 메시지 제목(동보전송용)
Subject = "동보전송용 메시지 제목"
# 장문 메시지 내용(동보전송용), 길이가 2000Byte 초과시 초과된 메시지 내용은 자동으로 제거됩니다.
Contents = "동보전송용 메시지 내용"
# 예약전송시간, 공백 처리시 즉시전송(작성형태 yyyyMMddHHmmss)
reserveDT = ""
# 전송할 파일경로 (이미지 파일의 크기는 최대 300Kbyte(JPEG), 가로/세로 1500px 이하 권장)
filePath = "test.jpeg"
# 광고문자 전송여부
adsYN = False
# 개별 전송정보 배열 (최대 1000건)
messages = []
for x in range(0, 1000):
messages.append(
MessageReceiver(
snd='07043042992', # 발신번호
sndnm='발신자명', # 발신자명
rcv='010111222', # 수신번호
rcvnm='수신자명' + str(x), # 수신자명
msg='멀티 문자 API TEST', # msg값이 없는 경우 동보전송용 메시지로 전송됨.
sjt='멀티 문자제목' # 장문 메시지 제목
)
)
# 전송요청번호
# 파트너가 전송 건에 대해 관리번호를 구성하여 관리하는 경우 사용.
# 1~36자리로 구성. 영문, 숫자, 하이픈(-), 언더바(_)를 조합하여 팝빌 회원별로 중복되지 않도록 할당.
RequestNum = ""
receiptNum = messageService.sendMMS_Multi(CorpNum, Sender, Subject, Contents,
messages, filePath, reserveDT, adsYN, UserID, RequestNum)
print("receiptNum : %s" % receiptNum)
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
| linkhub-sdk/popbill.message.example.py | sendMMS_multi.py | Python | mit | 2,932 |
from datetime import datetime, timedelta, timezone
import dateutil.parser
import dateutil.tz as tz
class TestReport:
def test_report(self, helper):
user = helper.admin_user()
# Create devices
device0 = helper.given_new_device(self)
device0.location = [20.00025, 20.00025]
device1 = helper.given_new_device(self)
# Upload audio files
now = datetime.now(dateutil.tz.tzlocal()).replace(microsecond=0)
exp_audio_bait_name = "sound2"
sound1 = user.upload_audio_bait({"name": exp_audio_bait_name})
sound2 = user.upload_audio_bait({"name": "sound2"})
sound3 = user.upload_audio_bait({"name": "sound3"})
# Add older audio events for device0
device0.record_event("audioBait", {"fileId": sound1}, [now - timedelta(minutes=5)])
# This is sound we expect to see
exp_audio_bait_time = now - timedelta(minutes=2)
device0.record_event("audioBait", {"fileId": sound2, "volume": 8}, [exp_audio_bait_time])
# these are past the recording time (shouldn't get used)
device0.record_event("audioBait", {"fileId": sound3}, [now + timedelta(seconds=5)])
device0.record_event("audioBait", {"fileId": sound3}, [now + timedelta(minutes=1)])
# Add 2 recordings for device0
rec0 = device0.upload_recording()
rec1 = device0.upload_recording()
# Add a recording for device1
rec2 = device1.upload_recording()
rec3 = device1.upload_audio_recording()
# Add recording tag to 1st recording.
rec0.is_tagged_as(what="cool").by(user)
# Add track and track tags to rec1
user.update_recording(rec1, comment="foo")
track = user.can_add_track_to_recording(rec1)
user.can_tag_track(track, what="possum", automatic=True)
user.can_tag_track(track, what="rat", automatic=False)
user.can_tag_track(track, what="stoat", automatic=False)
# with audiobait
report = ReportChecker(user.get_report(limit=10, audiobait="true"))
report.check_line(rec0, device0)
report.check_line(rec1, device0)
report.check_line(rec2, device1)
report.check_line(rec3, device1)
report.check_audiobait(rec0, exp_audio_bait_name, exp_audio_bait_time)
report.check_audiobait(rec1, exp_audio_bait_name, exp_audio_bait_time)
# without audiobait
report2 = ReportChecker(user.get_report(limit=10))
report2.check_line(rec0, device0)
report2.check_line(rec1, device0)
report2.check_line(rec2, device1)
report2.check_line(rec3, device1)
report2.check_no_audiobait(rec0)
def test_report_jwt_arg(self, helper):
user = helper.admin_user()
device = helper.given_new_device(self)
rec = device.upload_recording()
token = user.new_token()
report = ReportChecker(user.get_report(limit=5, jwt=token))
report.check_line(rec, device)
class ReportChecker:
def __init__(self, lines):
self._lines = {}
for line in lines:
recording_id = int(line["Id"])
assert recording_id not in self._lines
self._lines[recording_id] = line
def check_no_audiobait(self, rec):
line = self._lines.get(rec.id_)
assert "Audio Bait" not in line
def check_audiobait(self, rec, exp_audio_bait_name=None, exp_audio_bait_time=None):
line = self._lines.get(rec.id_)
if exp_audio_bait_name:
assert line["Audio Bait"] == exp_audio_bait_name
assert line["Audio Bait Volume"] == "8"
else:
assert line["Audio Bait"] == ""
assert line["Mins Since Audio Bait"] == ""
assert line["Audio Bait Volume"] == ""
def check_line(self, rec, device):
line = self._lines.get(rec.id_)
assert line is not None
assert line["Type"] == rec["type"]
assert int(line["Duration"]) == rec["duration"]
assert line["Group"] == device.group
assert line["Device"] == device.devicename
if device.location:
assert line["Latitude"] == "{}".format(device.location[0])
assert line["Longitude"] == "{}".format(device.location[1])
assert line["Comment"] == rec["comment"]
assert line["BatteryPercent"] == "98"
assert int(line["Track Count"]) == len(rec.tracks)
expected_auto_tags = []
expected_human_tags = []
for track in rec.tracks:
for tag in track.tags:
if tag.automatic:
expected_auto_tags.append(tag.what)
else:
expected_human_tags.append(tag.what)
assert line["Automatic Track Tags"] == format_tags(expected_auto_tags)
assert line["Human Track Tags"] == format_tags(expected_human_tags)
assert line["Recording Tags"] == format_tags(t["what"] for t in rec.tags)
assert line["URL"] == "http://test.site/recording/" + str(rec.id_)
index = rec.props.get("additionalMetadata", {}).get("analysis", {}).get("cacophony_index")
if index:
percents = [str(period["index_percent"]) for period in index]
assert line["Cacophony Index"] == ";".join(percents)
else:
assert line["Cacophony Index"] == ""
def format_tags(items):
return "+".join(items)
| TheCacophonyProject/Full_Noise | test/test_report.py | Python | agpl-3.0 | 5,424 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon.proxies.generators.base import write_to_conf_file
from polyaxon.proxies.schemas.api import get_base_config, get_main_config
def generate_api_conf(path=None, root=None):
write_to_conf_file("polyaxon.main", get_main_config(root), path)
write_to_conf_file("polyaxon.base", get_base_config(), path)
| polyaxon/polyaxon | core/polyaxon/proxies/generators/api.py | Python | apache-2.0 | 924 |
from rest_framework import permissions
class IsUserOwner(permissions.BasePermission):
def has_object_permission(self, request, view, user):
if request.user:
return request.user == user
return False | weichen2046/IntellijPluginDevDemo | enterprise-repo/enterprepo/apiv1/permissions.py | Python | apache-2.0 | 230 |
"""empty message
Revision ID: 75ad68311ea9
Revises: a578b9de5d89
Create Date: 2016-12-13 23:01:43.813979
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '75ad68311ea9'
down_revision = 'a578b9de5d89'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('email', sa.String(length=64), nullable=True))
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_column('users', 'email')
# ### end Alembic commands ###
| Lifeistrange/flaskweb | migrations/versions/75ad68311ea9_.py | Python | mit | 795 |
MODULE_INFO = {
'animation': {'path': 'animation/animation-min.js',
'requires': ['dom', 'event'],
'type': 'js'},
'autocomplete': {'optional': ['connection', 'animation'],
'path': 'autocomplete/autocomplete-min.js',
'requires': ['dom', 'event', 'datasource'],
'skinnable': True,
'type': 'js'},
'base': {'after': ['reset', 'fonts', 'grids'],
'path': 'base/base-min.css',
'type': 'css'},
'button': {'optional': ['menu'],
'path': 'button/button-min.js',
'requires': ['element'],
'skinnable': True,
'type': 'js'},
'calendar': {'path': 'calendar/calendar-min.js',
'requires': ['event', 'dom'],
'skinnable': True,
'type': 'js'},
'carousel': {'optional': ['animation'],
'path': 'carousel/carousel-min.js',
'requires': ['element'],
'skinnable': True,
'type': 'js'},
'charts': {'path': 'charts/charts-min.js',
'requires': ['element', 'json', 'datasource'],
'type': 'js'},
'colorpicker': {'optional': ['animation'],
'path': 'colorpicker/colorpicker-min.js',
'requires': ['slider', 'element'],
'skinnable': True,
'type': 'js'},
'connection': {'path': 'connection/connection-min.js',
'requires': ['event'],
'type': 'js'},
'container': {'optional': ['dragdrop', 'animation', 'connection'],
'path': 'container/container-min.js',
'requires': ['dom', 'event'],
'skinnable': True,
'supersedes': ['containercore'],
'type': 'js'},
'containercore': {'path': 'container/container_core-min.js',
'pkg': 'container',
'requires': ['dom', 'event'],
'type': 'js'},
'cookie': {'path': 'cookie/cookie-min.js',
'requires': ['yahoo'],
'type': 'js'},
'datasource': {'optional': ['connection'],
'path': 'datasource/datasource-min.js',
'requires': ['event'],
'type': 'js'},
'datatable': {'optional': ['calendar', 'dragdrop', 'paginator'],
'path': 'datatable/datatable-min.js',
'requires': ['element', 'datasource'],
'skinnable': True,
'type': 'js'},
'dom': {'path': 'dom/dom-min.js', 'requires': ['yahoo'], 'type': 'js'},
'dragdrop': {'path': 'dragdrop/dragdrop-min.js',
'requires': ['dom', 'event'],
'type': 'js'},
'editor': {'optional': ['animation', 'dragdrop'],
'path': 'editor/editor-min.js',
'requires': ['menu', 'element', 'button'],
'skinnable': True,
'supersedes': ['simpleeditor'],
'type': 'js'},
'element': {'path': 'element/element-min.js',
'requires': ['dom', 'event'],
'type': 'js'},
'event': {'path': 'event/event-min.js', 'requires': ['yahoo'], 'type': 'js'},
'fonts': {'path': 'fonts/fonts-min.css', 'type': 'css'},
'get': {'path': 'get/get-min.js', 'requires': ['yahoo'], 'type': 'js'},
'grids': {'optional': ['reset'],
'path': 'grids/grids-min.css',
'requires': ['fonts'],
'type': 'css'},
'history': {'path': 'history/history-min.js',
'requires': ['event'],
'type': 'js'},
'imagecropper': {'path': 'imagecropper/imagecropper-min.js',
'requires': ['dom', 'event', 'dragdrop', 'element', 'resize'],
'skinnable': True,
'type': 'js'},
'imageloader': {'path': 'imageloader/imageloader-min.js',
'requires': ['event', 'dom'],
'type': 'js'},
'json': {'path': 'json/json-min.js', 'requires': ['yahoo'], 'type': 'js'},
'layout': {'optional': ['animation', 'dragdrop', 'resize', 'selector'],
'path': 'layout/layout-min.js',
'requires': ['dom', 'event', 'element'],
'skinnable': True,
'type': 'js'},
'logger': {'optional': ['dragdrop'],
'path': 'logger/logger-min.js',
'requires': ['event', 'dom'],
'skinnable': True,
'type': 'js'},
'menu': {'path': 'menu/menu-min.js',
'requires': ['containercore'],
'skinnable': True,
'type': 'js'},
'paginator': {'path': 'paginator/paginator-min.js',
'requires': ['element'],
'skinnable': True,
'type': 'js'},
'profiler': {'path': 'profiler/profiler-min.js',
'requires': ['yahoo'],
'type': 'js'},
'profilerviewer': {'path': 'profilerviewer/profilerviewer-min.js',
'requires': ['profiler', 'yuiloader', 'element'],
'skinnable': True,
'type': 'js'},
'reset': {'path': 'reset/reset-min.css', 'type': 'css'},
'reset-fonts': {'path': 'reset-fonts/reset-fonts.css',
'rollup': 2,
'supersedes': ['reset', 'fonts'],
'type': 'css'},
'reset-fonts-grids': {'path': 'reset-fonts-grids/reset-fonts-grids.css',
'rollup': 4,
'supersedes': ['reset', 'fonts', 'grids', 'reset-fonts'],
'type': 'css'},
'resize': {'optional': ['animation'],
'path': 'resize/resize-min.js',
'requires': ['dom', 'event', 'dragdrop', 'element'],
'skinnable': True,
'type': 'js'},
'selector': {'path': 'selector/selector-min.js',
'requires': ['yahoo', 'dom'],
'type': 'js'},
'simpleeditor': {'optional': ['containercore',
'menu',
'button',
'animation',
'dragdrop'],
'path': 'editor/simpleeditor-min.js',
'pkg': 'editor',
'requires': ['element'],
'skinnable': True,
'type': 'js'},
'slider': {'optional': ['animation'],
'path': 'slider/slider-min.js',
'requires': ['dragdrop'],
'skinnable': True,
'type': 'js'},
'stylesheet': {'path': 'stylesheet/stylesheet-min.js',
'requires': ['yahoo'],
'type': 'js'},
'tabview': {'optional': ['connection'],
'path': 'tabview/tabview-min.js',
'requires': ['element'],
'skinnable': True,
'type': 'js'},
'treeview': {'optional': ['json'],
'path': 'treeview/treeview-min.js',
'requires': ['event', 'dom'],
'skinnable': True,
'type': 'js'},
'uploader': {'path': 'uploader/uploader.js',
'requires': ['element'],
'type': 'js'},
'utilities': {'path': 'utilities/utilities.js',
'rollup': 8,
'supersedes': ['yahoo',
'event',
'dragdrop',
'animation',
'dom',
'connection',
'element',
'yahoo-dom-event',
'get',
'yuiloader',
'yuiloader-dom-event'],
'type': 'js'},
'yahoo': {'path': 'yahoo/yahoo-min.js', 'type': 'js'},
'yahoo-dom-event': {'path': 'yahoo-dom-event/yahoo-dom-event.js',
'rollup': 3,
'supersedes': ['yahoo', 'event', 'dom'],
'type': 'js'},
'yuiloader': {'path': 'yuiloader/yuiloader-min.js',
'supersedes': ['yahoo', 'get'],
'type': 'js'},
'yuiloader-dom-event': {'path': 'yuiloader-dom-event/yuiloader-dom-event.js',
'rollup': 5,
'supersedes': ['yahoo',
'dom',
'event',
'get',
'yuiloader',
'yahoo-dom-event'],
'type': 'js'},
'yuitest': {'path': 'yuitest/yuitest-min.js',
'requires': ['logger'],
'skinnable': True,
'type': 'js'}}
SKIN = {
'after': ['reset', 'fonts', 'grids', 'base'],
'base': 'assets/skins/',
'defaultSkin': 'sam',
'path': 'skin.css',
'rollup': 3}
MODULE_INFO[SKIN['defaultSkin']] = {
'type': 'css',
'path': SKIN['base'] + SKIN['defaultSkin'] + '/' + SKIN['path'],
'after': SKIN['after'] }
| akaihola/django-yui-loader | yui_loader/module_info_2_7_0.py | Python | bsd-3-clause | 9,269 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_role
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Role Avi RESTful Object
description:
- This module is used to configure Role object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
name:
description:
- Name of the object.
required: true
privileges:
description:
- List of permission.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Role object
avi_role:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_role
"""
RETURN = '''
obj:
description: Role (api/role) object
returned: success, changed
type: dict
'''
from pkg_resources import parse_version
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.avi import avi_common_argument_spec
HAS_AVI = True
try:
import avi.sdk
sdk_version = getattr(avi.sdk, '__version__', None)
if ((sdk_version is None) or (sdk_version and
(parse_version(sdk_version) < parse_version('16.3.5.post1')))):
# It allows the __version__ to be '' as that value is used in development builds
raise ImportError
from avi.sdk.utils.ansible_utils import avi_ansible_api
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
name=dict(type='str', required=True),
privileges=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'role',
set([]))
if __name__ == '__main__':
main()
| 0x46616c6b/ansible | lib/ansible/modules/network/avi/avi_role.py | Python | gpl-3.0 | 3,591 |
"""
Video player in the courseware.
"""
import time
import requests
from selenium.webdriver.common.action_chains import ActionChains
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, Promise
from bok_choy.javascript import wait_for_js, js_defined
import logging
log = logging.getLogger('VideoPage')
VIDEO_BUTTONS = {
'CC': '.hide-subtitles',
'volume': '.volume',
'play': '.video_control.play',
'pause': '.video_control.pause',
'fullscreen': '.add-fullscreen',
'download_transcript': '.video-tracks > a',
'speed': '.speeds',
'quality': '.quality-control',
}
CSS_CLASS_NAMES = {
'closed_captions': '.closed .subtitles',
'captions_rendered': '.video.is-captions-rendered',
'captions': '.subtitles',
'captions_text': '.subtitles > li',
'error_message': '.video .video-player h3',
'video_container': 'div.video',
'video_sources': '.video-player video source',
'video_spinner': '.video-wrapper .spinner',
'video_xmodule': '.xmodule_VideoModule',
'video_init': '.is-initialized',
'video_time': 'div.vidtime',
'video_display_name': '.vert h2',
'captions_lang_list': '.langs-list li',
'video_speed': '.speeds .value'
}
VIDEO_MODES = {
'html5': 'div.video video',
'youtube': 'div.video iframe'
}
VIDEO_MENUS = {
'language': '.lang .menu',
'speed': '.speed .menu',
'download_transcript': '.video-tracks .a11y-menu-list',
'transcript-format': '.video-tracks .a11y-menu-button'
}
@js_defined('window.Video', 'window.RequireJS.require', 'window.jQuery',
'window.MathJax', 'window.MathJax.isReady')
class VideoPage(PageObject):
"""
Video player in the courseware.
"""
url = None
current_video_display_name = None
@wait_for_js
def is_browser_on_page(self):
return self.q(css='div{0}'.format(CSS_CLASS_NAMES['video_xmodule'])).present
@wait_for_js
def wait_for_video_class(self):
"""
Wait until element with class name `video` appeared in DOM.
"""
self.wait_for_ajax()
video_selector = '{0}'.format(CSS_CLASS_NAMES['video_container'])
self.wait_for_element_presence(video_selector, 'Video is initialized')
@wait_for_js
def wait_for_video_player_render(self):
"""
Wait until Video Player Rendered Completely.
"""
self.wait_for_video_class()
self.wait_for_element_presence(CSS_CLASS_NAMES['video_init'], 'Video Player Initialized')
self.wait_for_element_presence(CSS_CLASS_NAMES['video_time'], 'Video Player Initialized')
video_player_buttons = ['volume', 'play', 'fullscreen', 'speed']
for button in video_player_buttons:
self.wait_for_element_visibility(VIDEO_BUTTONS[button], '{} button is visible'.format(button.title()))
def _is_finished_loading():
"""
Check if video loading completed.
Returns:
bool: Tells Video Finished Loading.
"""
return not self.q(css=CSS_CLASS_NAMES['video_spinner']).visible
EmptyPromise(_is_finished_loading, 'Finished loading the video', timeout=200).fulfill()
self.wait_for_ajax()
def get_video_vertical_selector(self, video_display_name=None):
"""
Get selector for a video vertical with display name specified by `video_display_name`.
Arguments:
video_display_name (str or None): Display name of a Video. Default vertical selector if None.
Returns:
str: Vertical Selector for video.
"""
if video_display_name:
video_display_names = self.q(css=CSS_CLASS_NAMES['video_display_name']).text
if video_display_name not in video_display_names:
raise ValueError("Incorrect Video Display Name: '{0}'".format(video_display_name))
return '.vert.vert-{}'.format(video_display_names.index(video_display_name))
else:
return '.vert.vert-0'
def get_element_selector(self, class_name, vertical=True):
"""
Construct unique element selector.
Arguments:
class_name (str): css class name for an element.
vertical (bool): do we need vertical css selector or not. vertical css selector is not present in Studio
Returns:
str: Element Selector.
"""
if vertical:
return '{vertical} {video_element}'.format(
vertical=self.get_video_vertical_selector(self.current_video_display_name),
video_element=class_name)
else:
return class_name
def use_video(self, video_display_name):
"""
Set current video display name.
Arguments:
video_display_name (str): Display name of a Video.
"""
self.current_video_display_name = video_display_name
def is_video_rendered(self, mode):
"""
Check that if video is rendered in `mode`.
Arguments:
mode (str): Video mode, `html5` or `youtube`.
Returns:
bool: Tells if video is rendered in `mode`.
"""
selector = self.get_element_selector(VIDEO_MODES[mode])
def _is_element_present():
"""
Check if a web element is present in DOM.
Returns:
tuple: (is_satisfied, result)`, where `is_satisfied` is a boolean indicating whether the promise was
satisfied, and `result` is a value to return from the fulfilled `Promise`.
"""
is_present = self.q(css=selector).present
return is_present, is_present
return Promise(_is_element_present, 'Video Rendering Failed in {0} mode.'.format(mode)).fulfill()
@property
def is_autoplay_enabled(self):
"""
Extract `data-autoplay` attribute to check video autoplay is enabled or disabled.
Returns:
bool: Tells if autoplay enabled/disabled.
"""
selector = self.get_element_selector(CSS_CLASS_NAMES['video_container'])
auto_play = self.q(css=selector).attrs('data-autoplay')[0]
if auto_play.lower() == 'false':
return False
return True
@property
def is_error_message_shown(self):
"""
Checks if video player error message shown.
Returns:
bool: Tells about error message visibility.
"""
selector = self.get_element_selector(CSS_CLASS_NAMES['error_message'])
return self.q(css=selector).visible
@property
def is_spinner_shown(self):
"""
Checks if video spinner shown.
Returns:
bool: Tells about spinner visibility.
"""
selector = self.get_element_selector(CSS_CLASS_NAMES['video_spinner'])
return self.q(css=selector).visible
@property
def error_message_text(self):
"""
Extract video player error message text.
Returns:
str: Error message text.
"""
selector = self.get_element_selector(CSS_CLASS_NAMES['error_message'])
return self.q(css=selector).text[0]
def is_button_shown(self, button_id):
"""
Check if a video button specified by `button_id` is visible.
Arguments:
button_id (str): key in VIDEO_BUTTONS dictionary, its value will give us the css selector for button.
Returns:
bool: Tells about a buttons visibility.
"""
selector = self.get_element_selector(VIDEO_BUTTONS[button_id])
return self.q(css=selector).visible
def show_captions(self):
"""
Make Captions Visible.
"""
self._captions_visibility(True)
def hide_captions(self):
"""
Make Captions Invisible.
"""
self._captions_visibility(False)
def is_captions_visible(self):
"""
Get current visibility sate of captions.
Returns:
bool: True means captions are visible, False means captions are not visible
"""
caption_state_selector = self.get_element_selector(CSS_CLASS_NAMES['closed_captions'])
return not self.q(css=caption_state_selector).present
@wait_for_js
def _captions_visibility(self, captions_new_state):
"""
Set the video captions visibility state.
Arguments:
captions_new_state (bool): True means show captions, False means hide captions
"""
states = {True: 'Shown', False: 'Hidden'}
state = states[captions_new_state]
# Make sure that the CC button is there
EmptyPromise(lambda: self.is_button_shown('CC'),
"CC button is shown").fulfill()
# toggle captions visibility state if needed
if self.is_captions_visible() != captions_new_state:
self.click_player_button('CC')
# Verify that captions state is toggled/changed
EmptyPromise(lambda: self.is_captions_visible() == captions_new_state,
"Captions are {state}".format(state=state)).fulfill()
@property
def captions_text(self):
"""
Extract captions text.
Returns:
str: Captions Text.
"""
self.wait_for_captions()
captions_selector = self.get_element_selector(CSS_CLASS_NAMES['captions_text'])
subs = self.q(css=captions_selector).html
return ' '.join(subs)
@property
def speed(self):
"""
Get current video speed value.
Return:
str: speed value
"""
speed_selector = self.get_element_selector(CSS_CLASS_NAMES['video_speed'])
return self.q(css=speed_selector).text[0]
@speed.setter
def speed(self, speed):
"""
Change the video play speed.
Arguments:
speed (str): Video speed value
"""
# mouse over to video speed button
speed_menu_selector = self.get_element_selector(VIDEO_BUTTONS['speed'])
element_to_hover_over = self.q(css=speed_menu_selector).results[0]
hover = ActionChains(self.browser).move_to_element(element_to_hover_over)
hover.perform()
speed_selector = self.get_element_selector('li[data-speed="{speed}"] a'.format(speed=speed))
self.q(css=speed_selector).first.click()
def click_player_button(self, button):
"""
Click on `button`.
Arguments:
button (str): key in VIDEO_BUTTONS dictionary, its value will give us the css selector for `button`
"""
button_selector = self.get_element_selector(VIDEO_BUTTONS[button])
# If we are going to click pause button, Ensure that player is not in buffering state
if button == 'pause':
self.wait_for(lambda: self.state != 'buffering', 'Player is Ready for Pause')
self.q(css=button_selector).first.click()
self.wait_for_ajax()
def _get_element_dimensions(self, selector):
"""
Gets the width and height of element specified by `selector`
Arguments:
selector (str): css selector of a web element
Returns:
dict: Dimensions of a web element.
"""
element = self.q(css=selector).results[0]
return element.size
@property
def _dimensions(self):
"""
Gets the video player dimensions.
Returns:
tuple: Dimensions
"""
iframe_selector = self.get_element_selector('.video-player iframe,')
video_selector = self.get_element_selector(' .video-player video')
video = self._get_element_dimensions(iframe_selector + video_selector)
wrapper = self._get_element_dimensions(self.get_element_selector('.tc-wrapper'))
controls = self._get_element_dimensions(self.get_element_selector('.video-controls'))
progress_slider = self._get_element_dimensions(
self.get_element_selector('.video-controls > .slider'))
expected = dict(wrapper)
expected['height'] -= controls['height'] + 0.5 * progress_slider['height']
return video, expected
def is_aligned(self, is_transcript_visible):
"""
Check if video is aligned properly.
Arguments:
is_transcript_visible (bool): Transcript is visible or not.
Returns:
bool: Alignment result.
"""
# Width of the video container in css equal 75% of window if transcript enabled
wrapper_width = 75 if is_transcript_visible else 100
initial = self.browser.get_window_size()
self.browser.set_window_size(300, 600)
# Wait for browser to resize completely
# Currently there is no other way to wait instead of explicit wait
time.sleep(0.2)
real, expected = self._dimensions
width = round(100 * real['width'] / expected['width']) == wrapper_width
self.browser.set_window_size(600, 300)
# Wait for browser to resize completely
# Currently there is no other way to wait instead of explicit wait
time.sleep(0.2)
real, expected = self._dimensions
height = abs(expected['height'] - real['height']) <= 5
# Restore initial window size
self.browser.set_window_size(
initial['width'], initial['height']
)
return all([width, height])
def _get_transcript(self, url):
"""
Download Transcript from `url`
"""
kwargs = dict()
session_id = [{i['name']: i['value']} for i in self.browser.get_cookies() if i['name'] == u'sessionid']
if session_id:
kwargs.update({
'cookies': session_id[0]
})
response = requests.get(url, **kwargs)
return response.status_code < 400, response.headers, response.content
def downloaded_transcript_contains_text(self, transcript_format, text_to_search):
"""
Download the transcript in format `transcript_format` and check that it contains the text `text_to_search`
Arguments:
transcript_format (str): Transcript file format `srt` or `txt`
text_to_search (str): Text to search in Transcript.
Returns:
bool: Transcript download result.
"""
transcript_selector = self.get_element_selector(VIDEO_MENUS['transcript-format'])
# check if we have a transcript with correct format
if '.' + transcript_format not in self.q(css=transcript_selector).text[0]:
return False
formats = {
'srt': 'application/x-subrip',
'txt': 'text/plain',
}
transcript_url_selector = self.get_element_selector(VIDEO_BUTTONS['download_transcript'])
url = self.q(css=transcript_url_selector).attrs('href')[0]
result, headers, content = self._get_transcript(url)
if result is False:
return False
if formats[transcript_format] not in headers.get('content-type', ''):
return False
if text_to_search not in content.decode('utf-8'):
return False
return True
def current_language(self):
"""
Get current selected video transcript language.
"""
selector = self.get_element_selector(VIDEO_MENUS["language"] + ' li.is-active')
return self.q(css=selector).first.attrs('data-lang-code')[0]
def select_language(self, code):
"""
Select captions for language `code`.
Arguments:
code (str): two character language code like `en`, `zh`.
"""
self.wait_for_ajax()
# mouse over to CC button
cc_button_selector = self.get_element_selector(VIDEO_BUTTONS["CC"])
element_to_hover_over = self.q(css=cc_button_selector).results[0]
ActionChains(self.browser).move_to_element(element_to_hover_over).perform()
language_selector = VIDEO_MENUS["language"] + ' li[data-lang-code="{code}"]'.format(code=code)
language_selector = self.get_element_selector(language_selector)
self.wait_for_element_visibility(language_selector, 'language menu is visible')
self.q(css=language_selector).first.click()
# Sometimes language is not clicked correctly. So, if the current language code
# differs form the expected, we try to change it again.
if self.current_language() != code:
self.select_language(code)
if 'is-active' != self.q(css=language_selector).attrs('class')[0]:
return False
active_lang_selector = self.get_element_selector(VIDEO_MENUS["language"] + ' li.is-active')
if len(self.q(css=active_lang_selector).results) != 1:
return False
# Make sure that all ajax requests that affects the display of captions are finished.
# For example, request to get new translation etc.
self.wait_for_ajax()
captions_selector = self.get_element_selector(CSS_CLASS_NAMES['captions'])
EmptyPromise(lambda: self.q(css=captions_selector).visible, 'Subtitles Visible').fulfill()
self.wait_for_captions()
return True
def is_menu_present(self, menu_name):
"""
Check if menu `menu_name` exists.
Arguments:
menu_name (str): Menu key from VIDEO_MENUS.
Returns:
bool: Menu existence result
"""
selector = self.get_element_selector(VIDEO_MENUS[menu_name])
return self.q(css=selector).present
def select_transcript_format(self, transcript_format):
"""
Select transcript with format `transcript_format`.
Arguments:
transcript_format (st): Transcript file format `srt` or `txt`.
Returns:
bool: Selection Result.
"""
button_selector = self.get_element_selector(VIDEO_MENUS['transcript-format'])
button = self.q(css=button_selector).results[0]
coord_y = button.location_once_scrolled_into_view['y']
self.browser.execute_script("window.scrollTo(0, {});".format(coord_y))
hover = ActionChains(self.browser).move_to_element(button)
hover.perform()
if '...' not in self.q(css=button_selector).text[0]:
return False
menu_selector = self.get_element_selector(VIDEO_MENUS['download_transcript'])
menu_items = self.q(css=menu_selector + ' a').results
for item in menu_items:
if item.get_attribute('data-value') == transcript_format:
item.click()
self.wait_for_ajax()
break
self.browser.execute_script("window.scrollTo(0, 0);")
if self.q(css=menu_selector + ' .active a').attrs('data-value')[0] != transcript_format:
return False
if '.' + transcript_format not in self.q(css=button_selector).text[0]:
return False
return True
@property
def sources(self):
"""
Extract all video source urls on current page.
Returns:
list: Video Source URLs.
"""
sources_selector = self.get_element_selector(CSS_CLASS_NAMES['video_sources'])
return self.q(css=sources_selector).map(lambda el: el.get_attribute('src').split('?')[0]).results
@property
def caption_languages(self):
"""
Get caption languages available for a video.
Returns:
dict: Language Codes('en', 'zh' etc) as keys and Language Names as Values('English', 'Chinese' etc)
"""
languages_selector = self.get_element_selector(CSS_CLASS_NAMES['captions_lang_list'])
language_codes = self.q(css=languages_selector).attrs('data-lang-code')
language_names = self.q(css=languages_selector).attrs('textContent')
return dict(zip(language_codes, language_names))
@property
def position(self):
"""
Gets current video slider position.
Returns:
str: current seek position in format min:sec.
"""
selector = self.get_element_selector(CSS_CLASS_NAMES['video_time'])
current_seek_position = self.q(css=selector).text[0]
return current_seek_position.split('/')[0].strip()
@property
def seconds(self):
"""
Extract seconds part from current video slider position.
Returns:
str
"""
return int(self.position.split(':')[1])
@property
def state(self):
"""
Extract the current state (play, pause etc) of video.
Returns:
str: current video state
"""
state_selector = self.get_element_selector(CSS_CLASS_NAMES['video_container'])
current_state = self.q(css=state_selector).attrs('class')[0]
# For troubleshooting purposes show what the current state is.
# The debug statements will only be displayed in the event of a failure.
logging.debug("Current state of '{}' element is '{}'".format(state_selector, current_state))
# See the JS video player's onStateChange function
if 'is-playing' in current_state:
return 'playing'
elif 'is-paused' in current_state:
return 'pause'
elif 'is-buffered' in current_state:
return 'buffering'
elif 'is-ended' in current_state:
return 'finished'
def _wait_for(self, check_func, desc, result=False, timeout=200, try_interval=0.2):
"""
Calls the method provided as an argument until the Promise satisfied or BrokenPromise
Arguments:
check_func (callable): Function that accepts no arguments and returns a boolean indicating whether the promise is fulfilled.
desc (str): Description of the Promise, used in log messages.
result (bool): Indicates whether we need a results from Promise or not
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out.
"""
if result:
return Promise(check_func, desc, timeout=timeout, try_interval=try_interval).fulfill()
else:
return EmptyPromise(check_func, desc, timeout=timeout, try_interval=try_interval).fulfill()
def wait_for_state(self, state):
"""
Wait until `state` occurs.
Arguments:
state (str): State we wait for.
"""
self._wait_for(
lambda: self.state == state,
'State is {state}'.format(state=state)
)
def seek(self, seek_value):
"""
Seek the video to position specified by `seek_value`.
Arguments:
seek_value (str): seek value
"""
seek_time = _parse_time_str(seek_value)
seek_selector = self.get_element_selector(' .video')
js_code = "$('{seek_selector}').data('video-player-state').videoPlayer.onSlideSeek({{time: {seek_time}}})".format(
seek_selector=seek_selector, seek_time=seek_time)
self.browser.execute_script(js_code)
# after seek, player goes into `is-buffered` state. we need to get
# out of this state before doing any further operation/action.
def _is_buffering_completed():
"""
Check if buffering completed
"""
return self.state != 'buffering'
self._wait_for(_is_buffering_completed, 'Buffering completed after Seek.')
def reload_page(self):
"""
Reload/Refresh the current video page.
"""
self.browser.refresh()
self.wait_for_video_player_render()
@property
def duration(self):
"""
Extract video duration.
Returns:
str: duration in format min:sec
"""
selector = self.get_element_selector(CSS_CLASS_NAMES['video_time'])
# The full time has the form "0:32 / 3:14" elapsed/duration
all_times = self.q(css=selector).text[0]
duration_str = all_times.split('/')[1]
return duration_str.strip()
def wait_for_position(self, position):
"""
Wait until current will be equal to `position`.
Arguments:
position (str): position we wait for.
"""
self._wait_for(
lambda: self.position == position,
'Position is {position}'.format(position=position)
)
@property
def is_quality_button_visible(self):
"""
Get the visibility state of quality button
Returns:
bool: visibility status
"""
selector = self.get_element_selector(VIDEO_BUTTONS['quality'])
return self.q(css=selector).visible
@property
def is_quality_button_active(self):
"""
Check if quality button is active or not.
Returns:
bool: active status
"""
selector = self.get_element_selector(VIDEO_BUTTONS['quality'])
classes = self.q(css=selector).attrs('class')[0].split()
return 'active' in classes
def wait_for_captions(self):
"""
Wait until captions rendered completely.
"""
captions_rendered_selector = self.get_element_selector(CSS_CLASS_NAMES['captions_rendered'])
self.wait_for_element_presence(captions_rendered_selector, 'Captions Rendered')
def _parse_time_str(time_str):
"""
Parse a string of the form 1:23 into seconds (int).
Arguments:
time_str (str): seek value
Returns:
int: seek value in seconds
"""
time_obj = time.strptime(time_str, '%M:%S')
return time_obj.tm_min * 60 + time_obj.tm_sec
| rue89-tech/edx-platform | common/test/acceptance/pages/lms/video/video.py | Python | agpl-3.0 | 25,834 |
"""
WSGI config for minitwitter project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "minitwitter.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "minitwitter.settings.devel")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| slok/minitwitter | minitwitter/minitwitter/wsgi.py | Python | bsd-3-clause | 1,440 |
#!/usr/bin/env python
import os
import sys
os.path.dirname(__file__)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| micfan/dinner | src/manage.py | Python | mit | 277 |
import math
from probe import functions
from probe import statchar
import pygal
import os
"""
1. empirical_cdf
2. empirical_cdf-graph
3. compare
4. compare_cdf_with_theoretical
1. Kolmogorov distribution
2. Normal distribution
3. Fisher distribution
4. t-distribution
5. chi-squared distribution
6. Poisson distribution
7. Zipf distribution
1. find_pecentile
"""
class DistributionsError(Exception):
def __str__(self):
return repr("An error for distributions.py")
class IncorrectDistributionInicializationError(DistributionsError):
def __str__(self):
return repr("Please check distribution requirements.")
class IncorrectInputError(DistributionsError):
def __str__(self):
return repr("Input violates requirements- check context.")
class NotAListError(DistributionsError):
def __str__(self):
return repr("Given variable not a list.")
class PercentileError(DistributionsError):
def __str__(self):
return repr("Percentile must be within (0, 1)")
def empirical_cdf(t, data_list):
"""
Computes the empirical cdf of the given data_list for the
given value t.
For more information:
http://en.wikipedia.org/wiki/Empirical_distribution_function
"""
if type(data_list) is not list:
raise NotAListError()
n = len(data_list)
number_of_elements = 0
for element in data_list:
if element <= t:
number_of_elements += 1
return number_of_elements/n
just_names = ["empiric cdf graph.svg", "Empiric cdf graph for data", "data"]
def empiric_cdf_graph(data_list, names=just_names):
"""
Prints the graph of the empirical cdf of the data in data_list.
"""
if type(data_list) is not list:
raise NotAListError()
for_print = []
for element in data_list:
for_print.append(empirical_cdf(element, data_list))
functions.print_graph_discrete(for_print, True, True, True, names)
def compare(data_list, distribution):
"""
Compares the difference between the empirical cdf of the
data in data_list and the theoretical cdf of the specified
distribution- returns the sum of squares between the cdfs in
the points in data_list.
"""
if type(data_list) is not list:
raise NotAListError()
cdf = distribution.cdf
sum_of_squares = 0
for element in data_list:
A = empirical_cdf(element, data_list)
B = cdf(element)
sum_of_squares += math.pow(A-B, 2)
return sum_of_squares
def compare_cdf_with_theoretical(data_list, distributions):
"""
Prints the graphs of the empirical cdf of the data in
data_list and the cdf of distribution for the points in
data_list.
"""
if type(data_list) is not list:
raise NotAListError()
data_list = sorted(data_list)
line_chart = pygal.Line(dots_size=1)
coordinates = []
length = len(data_list)
if length > 50:
minimum = min(data_list)
maximum = max(data_list)
interval = maximum - minimum
step = interval/10
current = min(data_list)
while current <= max(data_list):
coordinates.append(current)
current += step
line_chart.x_labels = map(str, coordinates)
else:
i = 0
current = data_list[i]
while current <= max(data_list):
coordinates.append(current)
i += 1
try:
current = data_list[i]
except IndexError:
line_chart.x_labels = map(str, coordinates)
break
empirical_cdf_list = []
for element in data_list:
empirical_cdf_list.append(empirical_cdf(element, data_list))
line_chart.add("empirical cdf", empirical_cdf_list)
if type(distributions) is list:
for dist in distributions:
theoretical_cdf_list = []
for element in data_list:
theoretical_cdf_list.append(dist.cdf(element))
if type(dist) is NormalDistribution:
line_chart.add("Normal cdf", theoretical_cdf_list)
if type(dist) is FisherDistribution:
line_chart.add("Fisher cdf", theoretical_cdf_list)
if type(dist) is t_Distribution:
line_chart.add("t cdf", theoretical_cdf_list)
if type(dist) is ChiSquaredDistribution:
line_chart.add("chi-squared cdf", theoretical_cdf_list)
else:
dist = distributions
theoretical_cdf_list = []
for element in data_list:
theoretical_cdf_list.append(distributions.cdf(element))
if type(dist) is NormalDistribution:
line_chart.add("Normal cdf", theoretical_cdf_list)
if type(dist) is FisherDistribution:
line_chart.add("Fisher cdf", theoretical_cdf_list)
if type(dist) is t_Distribution:
line_chart.add("t cdf", theoretical_cdf_list)
if type(dist) is ChiSquaredDistribution:
line_chart.add("chi-squared cdf", theoretical_cdf_list)
current_dir = os.getcwd()
if not os.path.exists(current_dir+"\probe svg container"):
os.makedirs(current_dir+"\probe svg container")
os.chdir(current_dir+"\probe svg container")
line_chart.render_to_file("cdf comparison.svg")
os.chdir(os.getcwd().split("\probe svg container")[0])
class KolmogorovDistribution():
"""
Provides kolmogorov_cdf method for the Kolmogorov distribution.
for more information:
http://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test#Kolmogorov_distribution
http://www.jstatsoft.org/v08/i18/paper
"""
def kolmogorov_cdf(x):
if x == 0:
return 0
A = math.pow(2*math.pi, 0.5)/x
summation = 0
for i in range(1, 10):
B = -math.pow(2*i-1, 2)*math.pow(math.pi, 2)
C = 8*math.pow(x, 2)
summation += math.exp(B/C)
return A*summation
class NormalDistribution():
"""
Implements the Normal distribution with its basic features.
For more information:
http://en.wikipedia.org/wiki/Normal_distribution
"""
def normal_pdf(x, m, v):
"""
Gives the value of the normal probability density function
for the given x and the specified parameters of the normal
distribution- m specifies the mean, v specifies the
variance.
"""
if v < 0:
raise IncorrectInputError()
A = 1/(math.pow(v*2*math.pi, 0.5))
B = -math.pow(x-m, 2)/(2*v)
C = math.exp(B)
return A*C
def normal_cdf(x, m, v):
"""
Gives the value of the normal cumulative density function
for the given x and the specified parameters of the normal
distribution- m specifies the mean, v specifies the
variance.
"""
if v < 0:
raise IncorrectInputError()
A = (x-m)/(math.pow(v*2, 0.5))
return 1/2*(1+functions.error_function(A))
def normal_percentile(x, m, v):
"""
Returns the quantile required. m specifies the mean of the
distribution, v specifies the variance of the distribution.
x must be in the interval
(0, 1).
"""
if v < 0:
raise IncorrectInputError()
if x <= 0 or x >= 1:
raise PercentileError()
f = functions.inverse_error_function
return m + math.pow(2*v, 0.5)*f(2*x-1)
def __init__(self, mean, variance):
"""
Sets the parameters for the Normal distribution.
variance >= 0
"""
if variance < 0:
raise IncorrectDistributionInicializationError()
self.mean = mean
self.variance = variance
self.median = mean
self.mode = mean
self.skewness = 0
self.ex_kurtosis = 0
def pdf(self, x):
"""
Gives the value of the normal probability density function
for the given x.
"""
m = self.mean
v = self.variance
return NormalDistribution.normal_pdf(x, m, v)
def cdf(self, x):
"""
Gives the value of the normal cumulative density function
for the given x.
"""
m = self.mean
v = self.variance
return NormalDistribution.normal_cdf(x, m, v)
def percentile(self, x):
"""
Returns the quantile required. x must be in the interval
(0, 1).
"""
if x <= 0 or x >= 1:
raise PercentileError()
m = self.mean
v = self.variance
return NormalDistribution.normal_percentile(x, m, v)
class FisherDistribution:
"""
Implements the Fisher distribution with its basic features.
For more information:
https://en.wikipedia.org/wiki/F-distribution
"""
def f_pdf(x, df1, df2):
"""
Gives the value of the Fisher probability density function
for the given x (x must be in (0, inf))and the specified
parameters of the Fisher distribution- df1 and df2 specify
the degrees of freedom (d1 and d2 > 0).
TODO: implement for x = 0!
"""
if df1 <= 0 or df2 <= 0:
raise IncorrectInputError()
A = math.pow(df1*x, df1)
B = math.pow(df2, df2)
C = math.pow(df1*x+df2, df1+df2)
D = math.pow(A*B/C, 0.5)
beta = functions.beta_function(df1/2, df2/2)
return D/(x*beta)
def f_cdf(x, df1, df2):
"""
Gives the value of the Fisher cumulative density function
for the given x (x must be in [0, inf))and the specified
parameters of the Fisher distribution- df1 and df2 specify
the degrees of freedom (d1 and d2 > 0).
"""
if df1 <= 0 or df2 <= 0:
raise IncorrectInputError()
index = df1*x/(df1*x+df2)
A = functions.incomplete_beta_function(index, df1/2, df2/2)
B = functions.beta_function(df1/2, df2/2)
return A/B
def __init__(self, df1, df2):
"""
Sets the parameters for the Fisher distribution.
The degrees of freedon d1 and d2 must be > 0.
"""
if df1 <= 0 or df2 <= 0:
raise IncorrectDistributionInicializationError()
self.df1 = df1
self.df2 = df2
if df2 > 2:
self.mean = df2/(df2-2)
else:
self.mean = "not defined"
if df2 > 4:
A = 2*math.pow(df2, 2)*(df1*df2-2)
B = df1*math.pow(df2-2, 2)*(df2-4)
self.variance = A/B
else:
self.variance = "not defined"
# maybe not so
self.median = "not defined"
if df1 > 2:
self.mode = ((df1-2)/df1)*(df2/(df2+2))
else:
self.mode = "not defined"
# self.skewness = 0
# self.ex_kurtosis = 0
def pdf(self, x):
"""
Gives the value of the Fisher probability density function
for the given x.
"""
df1 = self.df1
df2 = self.df2
return FisherDistribution.f_pdf(x, df1, df2)
def cdf(self, x):
"""
Gives the value of the Fisher cumulative density function
for the given x.
"""
df1 = self.df1
df2 = self.df2
return FisherDistribution.f_cdf(x, df1, df2)
class t_Distribution():
"""
Implements the t-distribution with its basic features.
For more information:
http://mathworld.wolfram.com/Studentst-Distribution.html
"""
def t_pdf(x, df):
"""
Gives the value of the t probability density function
for the given x (x can be any real number)and the specified
parameter of the t-distribution- df specifies
the degrees of freedom (df > 0).
"""
if df <= 0:
raise IncorrectInputError()
f = functions.gamma_function
A = f((df+1)/2)
B = math.pow(df*math.pi, 0.5)*f(df/2)
C = -(df+1)/2
D = 1+math.pow(x, 2)/df
return A/B*math.pow(D, C)
def t_cdf(x, df):
"""
Gives the value of the t cumulative distribution function
for the given x (x can be any real number)and the specified
parameter of the t-distribution- df specifies
the degrees of freedom (df > 0).
"""
if df <= 0:
raise IncorrectInputError()
b = functions.regularized_beta_function
A = b(df/(df+math.pow(x, 2)), df/2, 1/2)
main_element = 1-A
if x < 0:
main_element = - main_element
the_sum = 1/2 + 1/2*main_element
return the_sum
def __init__(self, df):
"""
Sets the parameters for the t distribution.
The degrees of freedon df must be > 0.
"""
if df <= 0:
raise IncorrectDistributionInicializationError()
self.df = df
if df > 1:
self.mean = 0
else:
self.mean = "not defined"
if df > 2:
self.variance = df/(df-2)
elif 1 < df <= 2:
self.variance = "infinite"
else:
self.variance = "not defined"
self.median = 0
self.mode = 0
if df > 3:
self.skewness = 0
else:
self.skewness = "not defined"
if df > 4:
self.ex_kurtosis = 6/(df-4)
elif 2 < df <= 4:
self.ex_kurtosis = "infinite"
else:
self.ex_kurtosis = "not defined"
def pdf(self, x):
"""
Gives the value of the t probability density function
for the given x.
"""
df = self.df
return t_Distribution.t_pdf(x, df)
def cdf(self, x):
"""
Gives the value of the t cumulative density function
for the given x.
"""
df = self.df
return t_Distribution.t_cdf(x, df)
class ChiSquaredDistribution():
"""
Implements the chi-squared distribution with its basic features.
For more information:
http://en.wikipedia.org/wiki/Chi-squared_distribution#Characteristics
"""
def chi_squared_pdf(x, df):
"""
Gives the value of the chi-squared probability density function
for the given x (x can be any real number, >= 0)and the specified
parameter of the chi-squareddistribution- df specifies
the degrees of freedom (df > 0).
"""
if df <= 0:
raise IncorrectInputError()
if x < 0:
return 0
A = math.pow(x, df/2-1)*math.exp(-x/2)
B = math.pow(2, df/2)*functions.gamma_function(df/2)
return A/B
def chi_squared_cdf(x, df):
"""
Gives the value of the chi-squared probability density function
for the given x (x can be any real number, >= 0)and the specified
parameter of the chi-squareddistribution- df specifies
the degrees of freedom (df > 0).
"""
if df <= 0:
raise IncorrectInputError()
if x < 0:
return 0
A = functions.lower_incomplete_gamma_function(df/2, x/2)
B = functions.gamma_function(df/2)
return A/B
def __init__(self, df):
"""
Sets the parameters for the chi-squared distribution.
df must be > 0.
"""
if df <= 0:
raise IncorrectDistributionInicializationError()
self.df = df
self.mean = df
self.variance = 2*df
self.median = df*math.pow(1-2/(9*df), 3)
if 0 >= df-2:
self.mode = 0
else:
self.mode = df-2
self.skewness = math.pow(8/df, 0.5)
self.ex_kurtosis = 12/df
def pdf(self, x):
"""
Gives the value of the chi-squared probability density function
for the given x.
"""
df = self.df
return ChiSquaredDistribution.chi_squared_pdf(x, df)
def cdf(self, x):
"""
Gives the value of the chi-squared cumulative distribution function
for the given x.
"""
df = self.df
return ChiSquaredDistribution.chi_squared_cdf(x, df)
class PoissonDistribution():
"""
Implements the Poisson distribution with its basic features.
For more information:
http://en.wikipedia.org/wiki/Poisson_distribution
"""
def poisson_pmf(k, the_lambda):
"""
Gives the value of the Poisson probability mass function
for the given k (k can be a whole, nonnrgative nuber)
and the specified parameter of the Poisson distribution-
the_lambda must be > 0.
"""
if the_lambda <= 0:
raise IncorrectInputError()
A = math.pow(the_lambda, k)
B = math.exp(-the_lambda)
C = functions.factorial(k)
return A*B/C
def poisson_cdf(k, the_lambda):
"""
Gives the value of the Poisson cumulative density function
for the given k (k can be a whole, nonnrgative nuber)
and the specified parameter of the Poisson distribution-
the_lambda must be > 0.
"""
if the_lambda <= 0:
raise IncorrectInputError()
A = math.exp(-the_lambda)
index = functions.floor_function(k)
summation = 0
for i in range(index+1):
summation += math.pow(the_lambda, i)/functions.factorial(i)
return A*summation
def __init__(self, the_lambda):
"""
Sets the parameters for the Poisson distribution.
the_lambda must be > 0
"""
if the_lambda <= 0:
raise IncorrectDistributionInicializationError()
self.the_lambda = the_lambda
self.mean = the_lambda
self.variance = the_lambda
self.median = functions.floor_function(the_lambda+1/3-0.02/the_lambda)
self.mode = functions.floor_function(the_lambda)
self.skewness = math.pow(the_lambda, -0.5)
self.ex_kurtosis = math.pow(the_lambda, -1)
def pmf(self, k):
"""
Gives the value of the Poisson probability mass function
for the given k (k >= 0, whole number).
"""
the_lambda = self.the_lambda
return PoissonDistribution.poisson_pmf(k, the_lambda)
def cdf(self, k):
"""
Gives the value of the Poisson cumulative density function
for the given k (k >= 0, whole number).
"""
the_lambda = self.the_lambda
return PoissonDistribution.poisson_cdf(k, the_lambda)
def exact(self, k):
"""
Gives the exact probability for occurence of the given k
(k >= 0, whole number)
"""
the_lambda = self.the_lambda
A = PoissonDistribution.poisson_cdf(k, the_lambda)
if k == 0:
return A
B = PoissonDistribution.poisson_cdf(k-1, the_lambda)
return A - B
class ZipfDistribution():
"""
Implements the Zipf distribution with its basic features.
For more information:
http://en.wikipedia.org/wiki/Zipf%27s_law
"""
def zipf_pmf(k, s, N):
"""
Gives the value of the Zipf probability mass function
for the given k (k can be a whole, in [1, 2, ..., N])
and the specified parameters of the Zipf distribution-
s >= 0, N in [1, 2, ...]
"""
A = 1/math.pow(k, s)
B = functions.generalized_harmonic_number_function(N, s)
return A/B
def zipf_cdf(k, s, N):
"""
Gives the value of the Zipf cumulative distribution function
for the given k (k can be a whole, in [1, 2, ..., N])
and the specified parameters of the Zipf distribution-
s >= 0, N in [1, 2, ...]
"""
A = functions.generalized_harmonic_number_function(k, s)
B = functions.generalized_harmonic_number_function(N, s)
return A/B
def __init__(self, s, N):
"""
Sets the parameters for the Zipf distribution.
s must be > 0.
"""
if s < 0:
raise IncorrectDistributionInicializationError()
self.s = s
self.N = N
A = functions.generalized_harmonic_number_function(N, s-1)
B = functions.generalized_harmonic_number_function(N, s)
self.mean = A/B
# http://mathworld.wolfram.com/ZipfDistribution.html
# http://mathworld.wolfram.com/RiemannZetaFunction.html
# - bad integral
# self.variance = "Implement me!"
self.mode = 1
def pmf(self, k):
"""
Gives the value of the Zipf probability mass function
for the given k (k in [1, 2, ..., N]).
"""
s = self.s
N = self.N
return ZipfDistribution.zipf_pmf(k, s, N)
def cdf(self, k):
"""
Gives the value of the Zipf cumulative density function
for the given k (k in [1, 2, ..., N]).
"""
s = self.s
N = self.N
return ZipfDistribution.zipf_cdf(k, s, N)
def find_percentile(percentile, cdf, args=None):
"""
Finds the percentile of the given cdf.
(https://en.wikipedia.org/wiki/Percentile)
* percentile specifies the wanted percentile (must be in (0, 1)).
* cdf is the cdf of interest
* args provides the additional arguments necessary for
the cdf function.
"""
if percentile >= 1 or percentile <= 0:
raise PercentileError()
start = 0.01
end = 10
if args is None:
if cdf(start) > percentile:
while cdf(start) >= percentile:
start -= 50
while cdf(end) < percentile:
end += end
middle = (start + end)/2
while abs(cdf(middle)-(percentile)) > 0.0001:
if cdf(middle) > percentile:
end = middle
else:
start = middle
middle = (start + end)/2
else:
if cdf(start, *args) > percentile:
while cdf(start, *args) >= percentile:
start -= 50
while cdf(end, *args) < percentile:
end += end
middle = (start + end)/2
while abs(cdf(middle, *args)-(percentile)) > 0.0001:
if cdf(middle, *args) > percentile:
end = middle
else:
start = middle
middle = (start + end)/2
return middle
| Mystfinder/probe-2 | probe/distributions.py | Python | gpl-2.0 | 22,738 |
# -*- coding: utf-8 -*-
"""
Copyright 2013-2014 Olivier Cortès <[email protected]>
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
from mongodbforms import DocumentForm
from ..models import (HomePreferences, ReadPreferences,
SelectorPreferences, StaffPreferences)
LOGGER = logging.getLogger(__name__)
class HomePreferencesForm(DocumentForm):
class Meta:
model = HomePreferences
# Other fields are not yet ready
exclude = ('style', )
class ReadPreferencesForm(DocumentForm):
class Meta:
model = ReadPreferences
class SelectorPreferencesForm(DocumentForm):
class Meta:
model = SelectorPreferences
class StaffPreferencesForm(DocumentForm):
class Meta:
model = StaffPreferences
| EliotBerriot/1flow | oneflow/core/forms/preferences.py | Python | agpl-3.0 | 1,443 |
""" This package provides support for writing plugins for Spyke Viewer.
It belongs to `spykeutils` so that plugins can be executed in an evironment
where the `spykeviewer` package and its dependencies are not installed
(e.g. servers).
`spykeutils` installs a script named "spykeplugin" that can be used to start
plugins directly from the command line, supplying selection and plugin
parameter information. It is also the default script that Spyke Viewer uses
when starting plugins remotely. If you want to implement your own script for
starting plugins remotely, e.g. on a server, you should conform to the
interface of this script.
:mod:`analysis_plugin` Module
-----------------------------
.. automodule:: spykeutils.plugin.analysis_plugin
:members:
:show-inheritance:
:mod:`data_provider` Module
---------------------------
.. automodule:: spykeutils.plugin.data_provider
:members:
:show-inheritance:
:mod:`gui_data` Module
----------------------
.. automodule:: spykeutils.plugin.gui_data
""" | rproepp/spykeutils | spykeutils/plugin/__init__.py | Python | bsd-3-clause | 1,020 |
# -*- coding: utf-8 -*-
from copy import deepcopy
from django.contrib import admin
from django.contrib.admin import site
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from django.contrib.sites.models import Site
from django.db import OperationalError
from django.utils.translation import gettext_lazy as _
from cms.admin.forms import GlobalPagePermissionAdminForm, PagePermissionInlineAdminForm, ViewRestrictionInlineAdminForm
from cms.exceptions import NoPermissionsException
from cms.models import PagePermission, GlobalPagePermission
from cms.utils import permissions, page_permissions
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import classproperty
PERMISSION_ADMIN_INLINES = []
user_model = get_user_model()
admin_class = UserAdmin
for model, admin_instance in site._registry.items():
if model == user_model:
admin_class = admin_instance.__class__
class TabularInline(admin.TabularInline):
pass
class PagePermissionInlineAdmin(TabularInline):
model = PagePermission
# use special form, so we can override of user and group field
form = PagePermissionInlineAdminForm
classes = ['collapse', 'collapsed']
extra = 0 # edit page load time boost
show_with_view_permissions = False
def has_change_permission(self, request, obj=None):
if not obj:
return False
return page_permissions.user_can_change_page_permissions(
request.user,
page=obj,
site=obj.node.site,
)
def has_add_permission(self, request, obj=None):
return self.has_change_permission(request, obj)
@classproperty
def raw_id_fields(cls):
# Dynamically set raw_id_fields based on settings
threshold = get_cms_setting('RAW_ID_USERS')
# Given a fresh django-cms install and a django settings with the
# CMS_RAW_ID_USERS = CMS_PERMISSION = True
# django throws an OperationalError when running
# ./manage migrate
# because auth_user doesn't exists yet
try:
threshold = threshold and get_user_model().objects.count() > threshold
except OperationalError:
threshold = False
return ['user'] if threshold else []
def get_queryset(self, request):
"""
Queryset change, so user with global change permissions can see
all permissions. Otherwise user can see only permissions for
peoples which are under him (he can't see his permissions, because
this will lead to violation, when he can add more power to himself)
"""
site = Site.objects.get_current(request)
try:
# can see only permissions for users which are under him in tree
qs = self.model.objects.subordinate_to_user(request.user, site)
except NoPermissionsException:
return self.model.objects.none()
return qs.filter(can_view=self.show_with_view_permissions)
def get_formset(self, request, obj=None, **kwargs):
"""
Some fields may be excluded here. User can change only
permissions which are available for him. E.g. if user does not haves
can_publish flag, he can't change assign can_publish permissions.
"""
exclude = self.exclude or []
if obj:
user = request.user
if not obj.has_add_permission(user):
exclude.append('can_add')
if not obj.has_delete_permission(user):
exclude.append('can_delete')
if not obj.has_publish_permission(user):
exclude.append('can_publish')
if not obj.has_advanced_settings_permission(user):
exclude.append('can_change_advanced_settings')
if not obj.has_move_page_permission(user):
exclude.append('can_move_page')
kwargs['exclude'] = exclude
formset_cls = super(PagePermissionInlineAdmin, self).get_formset(request, obj=obj, **kwargs)
qs = self.get_queryset(request)
if obj is not None:
qs = qs.filter(page=obj)
formset_cls._queryset = qs
return formset_cls
class ViewRestrictionInlineAdmin(PagePermissionInlineAdmin):
extra = 0 # edit page load time boost
form = ViewRestrictionInlineAdminForm
verbose_name = _("View restriction")
verbose_name_plural = _("View restrictions")
show_with_view_permissions = True
class GlobalPagePermissionAdmin(admin.ModelAdmin):
list_display = ['user', 'group', 'can_change', 'can_delete', 'can_publish', 'can_change_permissions']
list_filter = ['user', 'group', 'can_change', 'can_delete', 'can_publish', 'can_change_permissions']
form = GlobalPagePermissionAdminForm
search_fields = []
for field in admin_class.search_fields:
search_fields.append("user__%s" % field)
search_fields.append('group__name')
list_display.append('can_change_advanced_settings')
list_filter.append('can_change_advanced_settings')
def get_list_filter(self, request):
threshold = get_cms_setting('RAW_ID_USERS')
try:
threshold = threshold and get_user_model().objects.count() > threshold
except OperationalError:
threshold = False
filter_copy = deepcopy(self.list_filter)
if threshold:
filter_copy.remove('user')
return filter_copy
def has_add_permission(self, request):
site = Site.objects.get_current(request)
return permissions.user_can_add_global_permissions(request.user, site)
def has_change_permission(self, request, obj=None):
site = Site.objects.get_current(request)
return permissions.user_can_change_global_permissions(request.user, site)
def has_delete_permission(self, request, obj=None):
site = Site.objects.get_current(request)
return permissions.user_can_delete_global_permissions(request.user, site)
@classproperty
def raw_id_fields(cls):
# Dynamically set raw_id_fields based on settings
threshold = get_cms_setting('RAW_ID_USERS')
# Given a fresh django-cms install and a django settings with the
# CMS_RAW_ID_USERS = CMS_PERMISSION = True
# django throws an OperationalError when running
# ./manage migrate
# because auth_user doesn't exists yet
try:
threshold = threshold and get_user_model().objects.count() > threshold
except OperationalError:
threshold = False
return ['user'] if threshold else []
if get_cms_setting('PERMISSION'):
admin.site.register(GlobalPagePermission, GlobalPagePermissionAdmin)
PERMISSION_ADMIN_INLINES.extend([
ViewRestrictionInlineAdmin,
PagePermissionInlineAdmin,
])
| benzkji/django-cms | cms/admin/permissionadmin.py | Python | bsd-3-clause | 6,844 |
#!/usr/bin/python2
# check_memcache.py
#
# Copyright 2012 Silvio Knizek <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import sys
import time
import argparse
import memcache
def argument_parser():
parser = argparse.ArgumentParser(description='Check the specified \
memcached.')
parser.add_argument('-H',
help = 'the host to query (Default: localhost)',
dest = 'host',
type = str,
default = 'localhost'
)
parser.add_argument('-p',
help = 'the port at the host (Default: 11211)',
dest = 'port',
type = int,
default = 11211
)
parser.add_argument('-w',
help = 'the time after WARNING is thrown (Default: 1s)',
dest = 'warning',
type = int,
default = 1
)
parser.add_argument('-c',
help = 'the time after CRITICAL is thrown (Default: 3s)',
dest = 'critical',
type = int,
default = 3
)
return parser.parse_args()
def main():
args = argument_parser()
start_time = time.time()
mc = memcache.Client([args.host + ":" + str(args.port)])
try:
stats = mc.get_stats()[0][1]
except memcache._ConnectionDeadError:
sys.stdout.write("Connection died during reading!")
sys.exit((2))
except IndexError:
sys.stdout.write("Couldn't connect to host!")
sys.exit((2))
except:
sys.stdout.write("Unexpected error: " + str(sys.exc_info()[0]))
sys.exit((2))
used_time = round(time.time() - start_time, 3)
total_hits = float(stats['get_hits'])
total_connections = total_hits + float(stats['get_misses'])
perf = ""
for key in stats:
perf += str(key) + "=" + str(stats[key]) + ", "
perf = perf[:-2] #remove trailing ", "
if (total_hits == 0) or (total_connections == 0):
hitrate = 0
else:
hitrate = round(total_hits / total_connections * 100, 2)
memcache_stat = "Memcached OK"
exit_value = 0
if used_time >= args.critical:
memcache_stat = "Memcached CRITICAL"
exit_value = 2
if used_time >= args.warning:
memcache_stat = "Memcached WARNING"
exit_value = 1
sys.stdout.write(memcache_stat + " - " + str(used_time) + "s - Hitrate: " \
+ str(hitrate) + "%" + " | " + perf)
sys.exit((exit_value))
if __name__ == '__main__':
main()
sys.stdout.write("Something really bad happend!")
sys.exit((3))
| killermoehre/nagios-plugins | check_memcache.py | Python | gpl-2.0 | 3,173 |
# encoding: utf-8
from south.v2 import DataMigration
from sanetime import time
class Migration(DataMigration):
def forwards(self, orm):
now = time()
orm.AccountType.objects.create(
name='Webex Meeting Center',
username_label='??',
extra_username_label='???',
listing_priority=3,
can_api_create_event=True,
can_api_load_event=True,
can_api_register_user=True,
can_api_report_views=True,
created_at=now,
updated_at=now)
orm.AccountType.objects.filter(name='Webex').update(name='Webex Event Center')
def backwards(self, orm):
orm.AccountType.objects.filter(name='Webex Meeting Center').delete()
orm.AccountType.objects.filter(name='Webex Event Center').update(name='Webex')
models = {
'webinars.account': {
'Meta': {'object_name': 'Account'},
'account_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountType']"}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'current_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.AccountSync']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.AccountSync']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'prevent_unformed_lead_import': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_lock': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.accountsync': {
'Meta': {'object_name': 'AccountSync'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'forced_stop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.HubSync']", 'null': 'True'}),
'sharded_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'staged_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.accountsyncshard': {
'Meta': {'object_name': 'AccountSyncShard'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountSync']"}),
'section': ('django.db.models.fields.IntegerField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.accountsyncstage': {
'Meta': {'object_name': 'AccountSyncStage'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'historical': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.accounttype': {
'Meta': {'object_name': 'AccountType'},
'can_api_create_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_load_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_register_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_report_views': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'extra_username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'listing_priority': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.cmsform': {
'Meta': {'object_name': 'CmsForm'},
'guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'is_sync_target': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.event': {
'Meta': {'object_name': 'Event'},
'_attended_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_noshow_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_registered_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_time_ended_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_ends_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'_update_cms_form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.CmsForm']"}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'alt_remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'attended_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'cms_forms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['webinars.CmsForm']", 'through': "orm['webinars.EventForm']", 'symmetrical': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'current_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.EventSync']"}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '16383', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.EventSync']"}),
'mothballed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'noshow_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'sync_leads_for_all_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_lock': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'unknowable_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.eventform': {
'Meta': {'object_name': 'EventForm'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'converted_at_cutoff': ('sanetime.dj.SaneTimeField', [], {'default': '0'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_last_modified_at': ('sanetime.dj.SaneTimeField', [], {'default': '0'})
},
'webinars.eventsync': {
'Meta': {'object_name': 'EventSync'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'forced_stop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountSync']", 'null': 'True'}),
'sharded_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'staged_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.eventsyncshard': {
'Meta': {'object_name': 'EventSyncShard'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'section': ('django.db.models.fields.IntegerField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.gtweventsnapshot': {
'Meta': {'object_name': 'GTWEventSnapshot'},
'_time_ended_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_ends_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'alt_remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '16383', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.gtweventsyncstage': {
'Meta': {'object_name': 'GTWEventSyncStage'},
'attendants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.gtwregistrantsnapshot': {
'Meta': {'object_name': 'GTWRegistrantSnapshot'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.hub': {
'Meta': {'object_name': 'Hub'},
'_attended_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_churned_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_cms_domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'_friends_and_family': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'_product_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'_registered_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_sfdc_info_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'beta': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'current_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.HubSync']"}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_sync': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.HubSync']"}),
'sync_lock': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'uninstalled_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.hubspoteventsyncstage': {
'Meta': {'object_name': 'HubSpotEventSyncStage'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'event_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventForm']"}),
'finish_last_modified_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_last_modified_at': ('sanetime.dj.SaneTimeField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.hubspotregistrantsnapshot': {
'Meta': {'object_name': 'HubSpotRegistrantSnapshot'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.hubsync': {
'Meta': {'object_name': 'HubSync'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'forced_stop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.landingpage': {
'Meta': {'object_name': 'LandingPage'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'form_title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'webinars.registrant': {
'Meta': {'object_name': 'Registrant'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']", 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.stagedgtwevent': {
'Meta': {'object_name': 'StagedGTWEvent'},
'_time_ended_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_ends_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '16383', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'universal_key': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'webinars.stagedgtwregistrant': {
'Meta': {'object_name': 'StagedGTWRegistrant'},
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'ended_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.stagedhubspotregistrant': {
'Meta': {'object_name': 'StagedHubSpotRegistrant'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'converted_at': ('sanetime.dj.SaneTimeField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.stagedwebexevent': {
'Meta': {'object_name': 'StagedWebexEvent'},
'_time_ended_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_ends_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '16383', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.stagedwebexregistrant': {
'Meta': {'object_name': 'StagedWebexRegistrant'},
'attendee_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.webexeventsnapshot': {
'Meta': {'object_name': 'WebexEventSnapshot'},
'_time_ended_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_ends_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '16383', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.webexeventsyncstage': {
'Meta': {'object_name': 'WebexEventSyncStage'},
'attendants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {}),
'parent_sync': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.EventSync']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.webexregistrantsnapshot': {
'Meta': {'object_name': 'WebexRegistrantSnapshot'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
}
}
complete_apps = ['webinars']
| prior/webinars | webinars_web/webinars/migrations/0059_webex_meeting_center.py | Python | apache-2.0 | 32,605 |
from intent.igt.metadata import set_meta_attr, find_meta, find_meta_attr, set_meta_text
from yggdrasil.consts import USER_META_ATTR, RATING_META_TYPE, QUALITY_META_ATTR, RATINGS, RATINGS_REV, \
EDITOR_METADATA_TYPE, COMMENT_META_TYPE, REASON_META_ATTR
def set_rating(inst, user, rating, reason):
set_meta_attr(inst, RATING_META_TYPE, USER_META_ATTR, user, metadata_type=EDITOR_METADATA_TYPE)
set_meta_attr(inst, RATING_META_TYPE, QUALITY_META_ATTR, RATINGS.get(rating), metadata_type=EDITOR_METADATA_TYPE)
set_meta_attr(inst, RATING_META_TYPE, REASON_META_ATTR, reason, metadata_type=EDITOR_METADATA_TYPE)
def get_rating(inst):
r = find_meta_attr(inst, RATING_META_TYPE, QUALITY_META_ATTR, metadata_type=EDITOR_METADATA_TYPE)
if r is not None:
return RATINGS_REV.get(r)
def get_reason(inst):
r = find_meta_attr(inst, RATING_META_TYPE, REASON_META_ATTR, metadata_type=EDITOR_METADATA_TYPE)
return r
def set_comment(inst, user, comment):
set_meta_text(inst, COMMENT_META_TYPE, comment, metadata_type=EDITOR_METADATA_TYPE)
set_meta_attr(inst, COMMENT_META_TYPE, USER_META_ATTR, user, metadata_type=EDITOR_METADATA_TYPE)
def get_comment(inst):
m = find_meta(inst, COMMENT_META_TYPE, metadata_type=EDITOR_METADATA_TYPE)
if m is not None:
return m.text
else:
return None
| xigt/yggdrasil | yggdrasil/metadata.py | Python | mit | 1,348 |
# -*- coding: utf-8 -*-
# 2012 L. Amber Wilcox-O'Hearn
# test_real_word_vocab_extractor.py
from malaprop.error_insertion import real_word_vocabulary_extractor
import unittest, StringIO
class RealWordVocabExtractorTest(unittest.TestCase):
def test_real_word_vocab_extactor(self):
vocab_file_obj = open('malaprop/test/data/1K_test_vocab', 'rb')
outfile_obj = StringIO.StringIO()
rwve = real_word_vocabulary_extractor.RealWordVocabExtractor(vocab_file_obj, outfile_obj)
rwve.extract_real_words()
vocabulary = outfile_obj.getvalue()
for test_word in [u'with', u'end', u'don\'t']:
assert vocabulary.find(u'\n' + test_word + u'\n') != -1, test_word
for test_word in [u'xxxx', u'-', u'.', u'<3-digit-integer>', u'end.of.document']:
assert vocabulary.find(u'\n' + test_word + u'\n') == -1, test_word
def test_no_unicode(self):
vocab_file_obj = StringIO.StringIO('Some\nascii\nwords\nand\nthen\n\xe1\xbc\x84\xce\xbd\xce\xb1\xcf\x81\xcf\x87\xce\xbf\xcf\x82\n')
outfile_obj = StringIO.StringIO()
rwve = real_word_vocabulary_extractor.RealWordVocabExtractor(vocab_file_obj, outfile_obj)
rwve.extract_real_words()
vocabulary = outfile_obj.getvalue()
for test_word in [u'ascii', u'and', u'words']:
assert vocabulary.find(u'\n' + test_word + u'\n') != -1, test_word
for test_word in [u'\xe1\xbc\x84\xce\xbd\xce\xb1\xcf\x81\xcf\x87\xce\xbf\xcf\x82']:
assert vocabulary.find(u'\n' + test_word + u'\n') == -1, test_word
if __name__ == '__main__':
unittest.main()
| ambimorph/malaprop | malaprop/test/test_real_word_vocabulary_extractor.py | Python | agpl-3.0 | 1,645 |
from PyPDF2 import PdfFileWriter, PdfFileReader
from reportlab.pdfgen import canvas
c = canvas.Canvas('watermark.pdf')
c.drawImage('testplot.png', 350, 550, width=150, height=150) # , mask=None, preserveAspectRatio=False)
# c.drawString(15, 720, "Hello World")
c.save()
c = canvas.Canvas('watermark2.pdf')
c.drawImage('testplot.png', 250, 350, width=250, height=150) # , mask=None, preserveAspectRatio=False)
# c.drawString(15, 720, "Hello World")
c.save()
output_file = PdfFileWriter()
watermark = PdfFileReader(open("watermark.pdf", 'rb'))
watermark2 = PdfFileReader(open("watermark2.pdf", 'rb'))
input_file = PdfFileReader(file('../flyingpigeon/static/pdf/climatefactsheettemplate.pdf', 'rb'))
page_count = input_file.getNumPages()
for page_number in range(page_count):
print "Plotting png to {} of {}".format(page_number, page_count)
input_page = input_file.getPage(page_number)
input_page.mergePage(watermark.getPage(0))
input_page.mergePage(watermark2.getPage(0))
output_file.addPage(input_page)
with open('output.pdf', 'wb') as outputStream:
output_file.write(outputStream)
| KatiRG/flyingpigeon | scripts/pngintopdftemplate.py | Python | apache-2.0 | 1,118 |
'''An example of using :class:`AreaDetector`'''
import time
import config
from ophyd import SimDetector
from ophyd import (ImagePlugin, TIFFPlugin, ProcessPlugin, OverlayPlugin,
Component as Cpt)
logger = config.logger
class MyDetector(SimDetector):
image1 = Cpt(ImagePlugin, 'image1:')
tiff1 = Cpt(TIFFPlugin, 'TIFF1:')
proc1 = Cpt(ProcessPlugin, 'Proc1:')
over1 = Cpt(OverlayPlugin, 'Over1:')
det1_prefix = 'XF:31IDA-BI{Cam:Tbl}'
det = MyDetector(det1_prefix)
det.cam.image_mode.put('Single', wait=True)
det.image1.enable.put('Enable', wait=True)
det.cam.array_callbacks.put('Enable', wait=True)
# ensure EPICS_CA_MAX_ARRAY_BYTES set properly...
img = det.image1.image
print('Image: {}'.format(img))
det.tiff1.file_template.put('%s%s_%3.3d.tif', wait=True)
logger.debug('template value=%s', det.tiff1.file_template.get())
logger.debug('full filename=%s', det.tiff1.full_file_name.get())
logger.debug('acquire = %d', det.cam.acquire.get())
img1 = det.image1
logger.debug('nd_array_port = %s', img1.nd_array_port.get())
# Signal group allows setting value as a list:
proc1 = det.proc1
logger.debug('fc=%s', proc1.fc.get())
FcTuple = proc1.fc.get_device_tuple()
proc1.fc.put(FcTuple(fc1=1, fc2=2, fc3=3, fc4=4),
wait=True)
time.sleep(0.1)
logger.debug('fc=%s', proc1.fc.get())
# But they can be accessed individually as well
logger.debug('(fc1=%s, fc2=%s, fc3=%s, fc4=%s)', proc1.fc.fc1.get(),
proc1.fc.fc2.get(), proc1.fc.fc3.get(), proc1.fc.fc4.get())
# Reset them to the default values
proc1.fc.put(FcTuple(1, -1, 0, 1), wait=True)
time.sleep(0.1)
logger.debug('reset to fc=%s', proc1.fc.get())
# if using IPython, try the following:
# In [0]: run areadetector.py
#
# In [1]: help(proc1)
logger.debug('Overlay1:1 blue=%s', det.over1.overlay_1.blue.get())
| dchabot/ophyd | examples/areadetector.py | Python | bsd-3-clause | 1,836 |
"""
KBase narrative service and method API.
The main classes defined here are :class:`Service` and :class:`ServiceMethod`.
See :func:`example` for an example of usage.
"""
__author__ = ["Dan Gunter <[email protected]>", "William Riehl <[email protected]>"]
__version__ = "0.0.1"
## Imports
# Stdlib
from collections import deque
import json
import logging
import os
import re
import sys
import time
import traceback
# Third-party
import traitlets as trt
# from IPython.core.application import Application
# Local
from biokbase.narrative.common import kbtypes, kblogging
from biokbase.narrative.common.log_common import EVENT_MSG_SEP
from biokbase.narrative.common.url_config import URLS
from biokbase.narrative.common import util
# Logging
_log = logging.getLogger(__name__)
## Exceptions
class ServiceError(Exception):
"""Base class for Service errors.
Should not normally be instantiated directly.
"""
def __init__(self, errmsg):
Exception.__init__(self, errmsg)
self._info = {
'severity': 'FATAL',
'type': self.__class__.__name__,
'msg': str(errmsg)
}
def add_info(self, k, v):
self._info[k] = v
def as_json(self):
return json.dumps(self._info)
class DuplicateServiceError(ServiceError):
pass
class ServiceMethodError(ServiceError):
"""Base class for all ServiceMethod errors"""
def __init__(self, method, errmsg, tb=None):
msg = "in function '{}': {}".format(method.name, errmsg)
ServiceError.__init__(self, msg)
self.add_info('method_name', method.name)
if tb is not None:
self.add_info('traceback',
self.traceback_dict(tb))
TB_KEYS = 'filename', 'line', 'function', 'text'
def traceback_dict(self, tb):
"""Extract and reformat traceback as a dict, for reporting in narrative.
:param tb: List of stack trace entries.
:type tb: list
:return: List where each entry is converted into a dict with
key/value pairs corresponding to the quadruple given above.
:rtype: dict
"""
etb = traceback.extract_tb(tb)
return [{self.TB_KEYS[i]: entry[i] for i in xrange(len(entry))}
for entry in etb]
class ServiceMethodParameterError(ServiceMethodError):
"""Bad parameter for ServiceMethod."""
def __init__(self, method, errmsg):
msg = "bad parameter: " + errmsg
ServiceMethodError.__init__(self, method, msg)
self.add_info('details', errmsg)
class ServiceRegistryFormatError(ServiceMethodError):
"""Bad format for Service Registry."""
def __init__(self, method, errmsg):
msg = "bad registry format: " + errmsg
ServiceMethodError.__init__(self, method, msg)
self.add_info('details', errmsg)
## Utility functions / classes
def is_sequence(arg):
"""Returns True only if input acts like a sequence, but does
not act like a string.
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
def get_func_desc(fn):
"""Get function description from docstring.
"""
doc, desc = fn.__doc__, []
for line in doc.split("\n"):
line = line.strip()
if line == "":
break
desc.append(line)
return ' '.join(desc)
def get_func_info(fn):
"""Get params and return from docstring
"""
doc = fn.__doc__
params, return_ = {}, {}
param_order = []
for line in doc.split("\n"):
line = line.strip()
# :param
if line.startswith(":param"):
_, name, desc = line.split(":", 2)
name = name[6:].strip() # skip 'param '
params[name] = {'desc': desc.strip()}
param_order.append(name)
# :type (of parameter, should be in kbtypes)
elif line.startswith(":type"):
_, name, desc = line.split(":", 2)
name = name[5:].strip() # skip 'type '
if not name in params:
raise ValueError("'type' without 'param' for {}".format(name))
typeobj = eval(desc.strip())
params[name]['type'] = typeobj
# :default (value of parameter)
elif line.startswith(":default"):
_, name, value = line.split(":", 2)
name = name[8:].strip() # skip 'default '
if not name in params:
raise ValueError("'default' without 'param' for {}".format(name))
params[name]['default'] = value.strip() # XXX: should allow quoting
# :ui_name (of parameter) - the name that should be displayed in the user interface
elif line.startswith(":ui_name"):
_, name, ui_name = line.split(":", 2)
name = name[8:].strip() # skip 'ui_name '
if not name in params:
raise ValueError("'ui_name' without 'param' for {}".format(name))
ui_name = ui_name.strip()
params[name]['ui_name'] = ui_name
# :return - name of thing to return
elif line.startswith(":return"):
_1, _2, desc = line.split(":", 2)
return_['desc'] = desc.strip()
# :rtype - type of thing to return
elif line.startswith(":rtype"):
_1, _2, desc = line.split(":", 2)
typeobj = eval(desc.strip())
return_['type'] = typeobj
# :input_widget - the default visualization widget for this method.
# Should be the name as it's invoked in Javascript.
elif line.startswith(":input_widget"):
_1, _2, widget = line.split(":", 2)
return_['input_widget'] = widget.strip()
# :output_widget - the visualization widget for this method.
# Should be the name as it's invoked in Javascript.
elif line.startswith(":output_widget"):
_1, _2, widget = line.split(":", 2)
return_['output_widget'] = widget.strip()
# :embed - True if the widget should be automatically embedded.
# so, probably always True, but not necessarily
elif line.startswith(":embed"):
_1, _2, embed = line.split(":", 2)
embed = eval(embed.strip())
return_['embed_widget'] = embed
r_params = []
vis_info = {'input_widget': None,
'output_widget': None,
'embed_widget': True}
for i, name in enumerate(param_order):
type_ = params[name]['type']
desc = params[name]['desc']
ui_name = params[name].get('ui_name', name) # use parameter name if no ui_name is given
if 'default' in params[name]:
# set default value
dflt = params[name]['default']
pvalue = type_(dflt, desc=desc, ui_name=ui_name)
else:
# no default value
pvalue = type_(desc=desc, ui_name=ui_name)
r_params.append(pvalue)
if not return_:
r_output = None
else:
r_output = return_['type'](desc=return_['desc'])
vis_info = dict(vis_info.items() + return_.items())
return r_params, r_output, vis_info
## Registry
_services = {}
# def register_job(job_id):
# """Register a long-running job by id.
# This takes a job id from the User and Job Service, and registers it in
# the Narrative interface. This registration process does two things:
# 1. Shares that job with NarrativeJobViewer account on behalf of the current user.
# 2. Sends that job id forward to the Narrative front-end to be stored in the
# Narrative object and made visible to any querying front-end widget.
# :param str job_id: Unique identifier for the long-running job.
# """
# pass
# def poll_job(job_id):
# """Fetch a job from the User and Job Service.
# :param str job_id: Unique identifier for the job.
# """
# pass
def register_service(svc, name=None):
"""Register a service.
This will fail if there is already a service registered by that name.
If you want to replace a service, you must call :func:`unregister_service`
and then this method.
:param Service svc: Service object
:param str name: Service name. If not present, use `svc.name`.
:return: None
:raise: DuplicateServiceError, if service already is registered
"""
if name is None:
name = svc.name
if name in _services:
raise DuplicateServiceError(name)
_services[name] = svc
def unregister_service(svc=None, name=None):
"""Unregister a service.
:param Service svc: Service object. If not present, use `name`.
:param str name: Service name. If not present, use `svc.name`.
:raise: ValueError if bad arguments, KeyError if service not found
"""
if name is None:
if svc is None:
raise ValueError("Service object or name required")
name = svc.name
if name is None:
raise ValueError("Service object has no name")
del _services[name]
def get_service(name):
"""Get a registered service by name.
:param str name: Service name
:return: The service, or None
:rtype: Service
"""
return _services.get(name, None)
def get_all_services(as_json=False, as_json_schema=False):
"""Get all registered services, as objects (default) as JSON, or as JSON schema.
:param bool as_json: If True, return JSON instead of objects. Supersedes as_json_schema.
:param bool as_json_schema: If True, return JSON schema instead of objects.
:return: dict of {service name : Service object or JSON}
"""
if as_json or as_json_schema:
if as_json:
return json.dumps({name: inst.as_json() for name, inst in _services.iteritems()})
else:
return json.dumps({name: inst.as_json_schema() for name, inst in _services.iteritems()})
else:
return _services.copy()
## Service classes
class Service(trt.HasTraits):
"""Base Service class.
"""
__all__ = dict()
#: Name of the service; should be short identifier
name = trt.Unicode()
#: Description of the service
desc = trt.Unicode()
#: Version number of the service, see :class:`VersionNumber` for format
version = kbtypes.VersionNumber()
#: Flag for making all service methods invisible to UI
invisible = trt.Bool(False)
def __init__(self, **meta):
"""Initialize a Service instance.
:param meta: Metadata keywords to set as attributes on the instance.
Special keywords are `name`, `desc`, and `version` (see
documentation for each).
"""
trt.HasTraits.__init__(self)
# set traits from 'meta', if present
for key, val in meta.iteritems():
if hasattr(self, key):
setattr(self, key, val)
# list of all methods
self.methods = []
# register the new instance so long as the service was
# properly declared with a name
if 'name' in meta:
self.__class__.__all__[meta['name']] = self
def add_method(self, method=None, **kw):
"""Add one :class:`ServiceMethod`
:param method: The method. If missing, create an instance from keywords.
:type method: ServiceMethod or None
:param kw: Keywords if creating a ServiceMethod
:type kw: dict
:return: The method (given or created)
:rtype: ServiceMethod
:raise: If method is None, anything raised by :class:`ServiceMethod` constructor
"""
if not method:
# If the service isn't visible, pass that down into the method
if self.invisible:
kw['visible'] = False
method = ServiceMethod(**kw)
self.methods.append(method)
return method
def get_method(self, name):
"""Get a service method, by name.
:param str name: Method name
:return: Method or None
:rtype: ServiceMethod
"""
for m in self.methods:
#print("check vs {}".format(m.name))
if m.name == name:
return m
print("didn't find {}".format(name))
return None
def quiet(self, value=True):
"""Make all methods quiet.
See :meth:`ServiceMethod.quiet`.
"""
for m in self.methods:
m.quiet(value)
def as_json(self):
d = {
'name': self.name,
'desc': self.desc,
'version': self.version,
'methods': [m.as_json() for m in self.methods]
}
return d
def as_json_schema(self):
d = {
'name': self.name,
'desc': self.desc,
'version': self.version,
'methods': [m.as_json_schema() for m in self.methods]
}
return d
class LifecycleSubject(object):
"""Contains the current status of a running process.
The basic model is that a process is in a 'stage', which is
an integer starting at 1 and less than or equal to the total
number of stages. Stages and total numbers of stages can
change as long as the invariants 0 <= stage <= num_stages
and 1 <= num_stages hold. Note that 0 is a special stage number
meaning 'not yet started'.
"""
def __init__(self, stages=1):
if not isinstance(stages, int) or stages < 1:
raise ValueError("Number of stages ({}) must be > 0".format(stages))
self._stages = stages
self.reset()
self.obs = []
def register(self, observer):
self.obs.append(observer)
def unregister(self, observer):
self.obs.remove(observer)
def _event(self, name, *args):
for obs in self.obs:
getattr(obs, name)(*args)
## Events
def reset(self):
self._stage = 0
self._done = False
def advance(self, name):
"""Increments stage, giving it a name."""
if not self._done:
self._stage += 1
self._event('stage', self._stage, self._stages, name)
def started(self, params):
"""Start the process.
Idempotent.
"""
self._done = False
self._event('started', params)
def done(self):
"""Done with process.
Idempotent.
"""
if not self._done:
self._done = True
self._event('done')
def error(self, code, err):
"""Done with process due to an error.
Idempotent.
"""
if not self._done:
self._done = True
self._event('error', code, err)
def debug(self, msg):
"""Debugging message.
"""
self._event('debug', msg)
def register_job(self, job_id):
"""Register a new long-running job.
"""
raise ValueError("Method is not supported anymore")
def register_app(self, app_id):
"""Register a new long-running app process.
"""
raise ValueError("Method is not supported anymore")
# get/set 'stage' property
@property
def stage(self):
return self._stage
@stage.setter
def stage(self, value):
if not isinstance(value, int):
raise ValueError("stage ({}) must be an int")
elif value < 0:
raise ValueError("stage ({}) must be >= 0".format(value))
elif value > self._stages:
raise ValueError("stage ({}) must be <= num. stages ({})"
.format(value, self._stages))
self._stage = value
self._event('stage', self._stage, self._stages, '')
# get/set 'stages' (number of stages) property
@property
def stages(self):
return self._stages
@stages.setter
def stages(self, value):
if not isinstance(value, int):
raise ValueError("stages ({}) must be an int")
elif value < 1:
raise ValueError("stages ({}) must be >= 1".format(value))
elif value < self._stage:
raise ValueError("stages ({}) must be >= cur. stage ({})"
.format(value, self._stage))
self._stages = value
class LifecycleObserver(object):
"""Interface that defines the lifecycle events of a service,
in terms of callbacks. These callbacks will be used by the
:class:`IpService` to communicate with the IPython kernel,
but they can also be extended to perform service-specific actions.
"""
def started(self, params):
"""Called before execution starts"""
pass
def stage(self, num, total, name):
"""Called for stage changes"""
pass
def done(self):
"""Called on successful completion"""
pass
def error(self, code, err):
"""Called on fatal error"""
pass
def debug(self, msg):
"""Debugging message"""
pass
def register_job(self, job_id):
"""Register a long-running job"""
pass
def register_app(self, app_id):
"""Register a an app job that's composed of several subjobs"""
pass
class LifecycleHistory(LifecycleObserver):
"""Record duration between start/end in lifecycle events.
"""
def __init__(self, method, max_save=1000):
self._method = method
self._t = [None, None]
self._p = None
self._hist = deque() # each item: (t0, t1, dur, [params])
self._maxlen = max_save
self._cur_stage, self._nstages = 0, 0
def get_durations(self):
"""Past durations of the method.
:return: All the past durations, in seconds
:rtype: iterable of double
"""
return (x[2] for x in self._hist)
def started(self, params):
"""Called when execution starts
"""
self._t[0] = time.time()
self._p = params
def stage(self, num, ttl, name):
self._cur_stage = num
self._nstages = ttl
def done(self):
"""Called on successful completion
"""
self._t[1] = time.time()
dur = self._t[1] - self._t[0]
self._hist.append(tuple(self._t + [dur, self._p]))
if len(self._hist) > self._maxlen:
self._hist.popleft()
def error(self, code, err):
"""Called on fatal error"""
pass
def estimated_runtime(self, params):
"""Based on history and params, estimate runtime for function.
"""
dur = self.get_durations()
if len(dur) == 0:
estimate = -1 # no @!$%# idea
else:
# dumb: ignore params, take mean
estimate = sum(dur) / len(dur)
return estimate
class LifecyclePrinter(LifecycleObserver):
"""Observe lifecycle events and print out messages to stdout.
This allows the front-end to get the current status of the process
by simply looking for 'special' lines on stdout.
After the prefix there is a 1-letter code:
* S - started
* D - done
* P - progress ; rest of line is '<name>,<num>,<num>' meaning: stage name,current,total
* E - error ; rest of line is JSON object with key/vals about the error.
For details see the :class:`ServiceError` subclasses.
Example:
>>> subj = LifecycleSubject(stages=3)
>>> lpr = LifecyclePrinter()
>>> subj.register(lpr)
>>> subj.started([])
@@S
>>> subj.advance("foo")
@@Pfoo,1,3
>>> subj.done()
@@D
"""
#: Special prefixes for output to stdout
#: that indicates the status of the process.
SPECIAL_PFX = '@@'
def _write(self, s):
sys.stdout.write(self.SPECIAL_PFX + s + "\n")
sys.stdout.flush()
def started(self, params):
self._write('S')
def done(self):
self._write('D')
def stage(self, n, total, name):
self._write('P{},{:d},{:d}'.format(name, n, total))
def error(self, code, err):
self._write('E' + err.as_json())
def debug(self, msg):
self._write('G' + msg)
def register_job(self, job_id):
self._write('J' + job_id)
def register_app(self, app_id):
self._write('A' + app_id)
class LifecycleLogger(LifecycleObserver):
"""Log lifecycle messages in a simple but structured format,
to a file.
"""
MAX_MSG_LEN = 240 # Truncate message to this length, in chars
def __init__(self, name, debug=False):
"""Create a Python logging.Logger with the given name, under the existing
IPython logging framework.
:param name: Name of logger
:type name: str
:param debug: Whether to set debug as the log level
:type debug: bool
"""
self._name = name
# use the IPython application singleton's 'log' trait
# self._log = Application.instance().log
self._log = kblogging.get_logger(name)
if debug:
self._log.setLevel(logging.DEBUG)
else:
self._log.setLevel(logging.INFO)
self._is_debug = debug
self._start_time = None
def _write(self, level, event, kvp):
kvp['severity'] = logging.getLevelName(level)
kblogging.log_event(self._log, event, kvp)
def started(self, params):
# note: quote params so the logging can handle spaces inside them
#pstr = str(params).replace('"', '\\"') # escape embedded quotes
pstr = str(params).replace('"', "'") # change dbl to single quotes
pstr = pstr.replace('\\','') # eliminate dbl-backslashes
pstr = pstr.replace("\'", "'") # and so on? yeesh!
# now extract actual params
psets = ','.join(re.findall('{\s*\'stepId\'.*?\]', pstr))
self._write(logging.INFO, "func.begin", {'params': psets})
self._start_time = time.time()
def done(self):
t = time.time()
if self._start_time is not None:
dur = t - self._start_time
self._start_time = None
else:
dur = -1
self._write(logging.INFO, "func.end", {'dur': dur})
def stage(self, n, total, name):
self._write(logging.INFO, "func.stage.{}".format(name),
{'num': n, 'total': total})
def error(self, code, err):
if len(str(err)) > self.MAX_MSG_LEN:
err = str(err[:self.MAX_MSG_LEN]) + '[..]'
self._write(logging.ERROR, "func.error", {'errcode': code, 'errmsg':err})
def debug(self, msg):
if self._is_debug:
self._write(logging.DEBUG, "func.debug", {'dbgmsg': msg})
def register_job(self, job_id):
self._write(logging.INFO, "start job", {'jobid': "id={}".format(job_id)})
def register_app(self, app_id):
self._write(logging.INFO, "start app", {'jobid': "id={}".format(app_id)})
class ServiceMethod(trt.HasTraits, LifecycleSubject):
"""A method of a service.
Defines some metadata and a function, using :meth:`set_func`,
to run the service. Call the class instance like a function
to execute the service in its wrapped mode.
Note that for services to be able to chain their results forward to
the next called service, a method _must_ return a value.
Example usage:
>>> svc = Service()
>>> def multiply(m, a,b): return a*b
>>> meth = ServiceMethod(svc, quiet=True)
>>> meth.set_func(multiply, (trt.CFloat(), trt.CFloat()), (trt.Float(),))
>>> c = meth(9, 8)[0]
>>> c
72
>>> # validation catches bad args, function isn't called
>>> c = meth("strawberry", "alarmclock")
>>> print(c)
None
"""
#: Name of the method; should be short identifier
name = trt.Unicode()
#: Description of the method
desc = trt.Unicode()
#: Parameters of method, a Tuple of traits
params = trt.Tuple()
#: Output of the method, a Tuple of traits
outputs = trt.Tuple()
def __init__(self, status_class=LifecycleHistory, quiet=False,
func=None, visible=True, **meta):
"""Constructor.
:param status_class: Subclass of LifecycleObserver to instantiate
and use by default for status queries.
Other observers can be used with :meth:`register`.
:type status_class: type
:param bool quiet: If True, don't add the printed output
:param func: Function to auto-wrap, if present
:param visible: Whether this service is 'visible' to the UI
:param meta: Other key/value pairs to set as traits of the method.
"""
LifecycleSubject.__init__(self)
self.name, self.full_name, self.run = "", "", None
self._visible = visible
self._history = status_class(self)
self.register(self._history)
self._observers = [] # keep our own list of 'optional' observers
# set traits from 'meta', if present
for key, val in meta.iteritems():
if hasattr(self, key):
setattr(self, key, val)
# Call set_func() with metadata from function
# docstring, if function is given
if func is not None:
self.desc = get_func_desc(func)
params, output, vis_info = get_func_info(func)
self.set_func(func, tuple(params), (output,), vis_info)
# Set logging level. Do this last so it can use func. name
self.quiet(quiet)
def quiet(self, value=True):
"""Control printing of status messages.
"""
if value:
# make it quiet
if self._observers: # for idempotence
map(self.unregister, self._observers)
self._observers = []
else:
# make some noise
if not self._observers: # for idempotence
debug = util.kbase_debug_mode()
self._observers = [LifecyclePrinter(),
LifecycleLogger(self.full_name, debug=debug)]
map(self.register, self._observers)
def set_func(self, fn, params, outputs, vis_info):
"""Set the main function to run, and its metadata.
Although params and outputs are normally traits or
subclasses of traits defined in kbtypes, the value
None is also allowed for return values.
:param fn: Function object to run
:param params: tuple of traits describing input parameters
:param outputs: tuple of traits, describing the output value(s)
:param vis_info: visualization information, with two keys:
* 'widget': Name of the default widget.
* 'embed_widget': Whether it should automatically be shown, default = True.
:type vis_info: dict
:raise: ServiceMethodParameterError, if function signature does not match
ValueError, if None is given for a param
"""
self.run = fn
if self.name is None:
self.name = fn.__name__
self.full_name = '.'.join([fn.__module__, self.name])
# Handle parameters
for i, p in enumerate(params):
if p is None:
raise ValueError("None is not allowed for a parameter type")
p.name = "param{:d}".format(i)
self.params = params
# Handle outputs
for i, o in enumerate(outputs):
o.name = "output{:d}".format(i)
# Set widget name
self.input_widget = None
if 'input_widget' in vis_info and vis_info['input_widget'] is not None:
self.input_widget = vis_info['input_widget']
self.output_widget = None
if 'output_widget' in vis_info and vis_info['output_widget'] is not None:
self.output_widget = vis_info['output_widget']
# Set embed_widget
self.embed_widget = True
if 'embed' in vis_info and vis_info['embed_widget'] is not None:
self.embed_widget = vis_info['embed_widget']
self.outputs = outputs
self._one_output_ok = len(outputs) == 1
def __call__(self, *params):
"""Run the method when the class instance is called like
a function.
:param params: List of parameters for the method
:return: From function given with :meth:`set_func`
:raise: ServiceMethodParameterError, if parameters don't validate
"""
result = None
self.reset()
try:
self._validate(params, self.params)
self.started(params)
tmpresult = self.run(self, *params)
if self._one_output_ok and not is_sequence(tmpresult):
tmpresult = (tmpresult,)
self._validate(tmpresult, self.outputs)
result = tmpresult
self.done()
except ServiceMethodError as err:
self.error(-2, err)
except Exception as err:
tb = traceback.sys.exc_traceback
self.error(-1, ServiceMethodError(self, err, tb=tb))
# output object contains:
# data
# default widget name
# whether it should automatically embed the result or not
output_obj = {'data': result,
'widget': self.output_widget,
'embed': self.embed_widget}
sys.stdout.write(json.dumps(output_obj))
return result
def _validate(self, values, specs):
if len(values) != len(specs):
raise ServiceMethodParameterError(self, "Wrong number of arguments. got={} wanted={}"
.format(len(values), len(specs)))
for val, spec in zip(values, specs):
if spec is None:
if val is not None:
err = "None expected, got {}".format(val)
raise ServiceMethodParameterError(self, "Argument type error: {}".format(err))
else:
try:
spec.validate(spec, val)
except trt.TraitError, err:
raise ServiceMethodParameterError(self, "Argument type error: {}".format(err))
def estimated_runtime(self, params=()):
"""Calculate estimated runtime, for the given parameters.
:param tuple params: List of parameter values
:return: Runtime, in seconds. Use -1 for "unknown"
:rtype: double
"""
return self._history.estimated_runtime(params)
## Utility functions
@property
def token(self):
"""Authorization token passed in from front-end.
"""
return os.environ['KB_AUTH_TOKEN']
@property
def workspace_id(self):
"""Workspace ID passed in from front-end.
"""
return os.environ['KB_WORKSPACE_ID']
def poll_job(self, job_id):
raise ValueError("Method is not supported anymore")
## JSON serialization
def as_json(self, formatted=False, **kw):
d = {
'name': self.name,
'desc': self.desc,
'input_widget': self.input_widget,
'output_widget': self.output_widget,
'params': [(p.name, p.get_metadata('ui_name'), str(p), p.get_metadata('desc')) for p in self.params],
'outputs': [(p.name, str(p), p.get_metadata('desc')) for p in self.outputs],
'visible': self._visible
}
if formatted:
return json.dumps(d, **kw)
return d
trt_2_jschema = {'a unicode string': 'string',
'an int': 'integer',
'a list or None': 'array',
'a set or None': 'array',
'a tuple or None': 'array',
'a dict or None': 'object',
'a float': 'number',
'a boolean': 'boolean'}
def as_json_schema(self, formatted=False, **kw):
d = {
'title': self.name,
'type': 'object',
'description': self.desc,
'properties': {
'parameters': {p.name: {'type': self.trt_2_jschema.get(p.info(), str(p)),
'description': p.get_metadata('desc'),
'ui_name': p.get_metadata('ui_name'),
'default': p.get_default_value()} for p in self.params},
'widgets': {'input': self.input_widget, 'output': self.output_widget },
},
'visible': self._visible,
'returns': {p.name: {'type': self.trt_2_jschema.get(p.info(), str(p)),
'description': p.get_metadata('desc')} for p in self.outputs}
}
if formatted:
return json.dumps(d, **kw)
return d
def as_json_schema_dumps(self):
return json.dumps(self.as_json_schema())
## Simplified, decorator-based, workflow
_curr_service = None
def init_service(**kw):
"""Call this first, to create & set service.
All arguments must be keywords. See :class:`Service` and
:meth:`Service.__init__`.
"""
global _curr_service
_curr_service = Service(**kw)
def configure_service(**kw):
"""Set service attributes given in input keywords.
:raise: AttributeError if there is no such attribute,
ValueError if service is not initialized
"""
if _curr_service is None:
raise ValueError("Attempt to configure service before init_service()")
for key, value in kw.iteritems():
setattr(_curr_service, key, value)
def method(name=None):
"""Decorator function for creating new services.
Example usage::
@method(name="MyMethod")
def my_service(method, arg1, arg2, etc.):
pass # method body goes here
"""
if _curr_service is None:
raise ValueError("Attempt to call @method decorator before init_service()")
def wrap(fn, name=name):
if name is None:
name = fn.__name__
wrapped_fn = _curr_service.add_method(name=name, func=fn)
# copy docstring from original fn to wrapped fn, so that
# interactive help, autodoc, etc. will show the 'real' docs.
wrapped_fn.__doc__ = fn.__doc__
return wrapped_fn
return wrap
def finalize_service():
"""Call this last, to finalize and register the service.
"""
global _curr_service
register_service(_curr_service)
_curr_service = None # reset to un-initialized
#############################################################################
def example():
# New data type for a Person
class Person(trt.Unicode):
default_value = "Joe Schmoe"
info_text = 'the name of a person'
def validate(self, obj, value):
trt.Unicode.validate(self, obj, value)
# Function that does the work of the "pickup" method
def pick_up_people(method, num, where_from, where_to, who):
method.stages = 3
if num < 1:
raise ValueError("Can't pick up less than one person ({})".format(num))
if num == 99:
return 1, 2, 3
print("{} called for {:d} people to be driven from {} to {}".format(who, num, where_from, where_to))
time.sleep(0.5)
method.advance("pickup: " + where_from)
print("picking up {} and {:d} other bozos at {}".format(who, num - 1, where_from))
time.sleep(0.5)
method.advance('dropoff: ' + where_to)
print("dropping off {} and {:d} other bozos at {}".format(who, num - 1, where_to))
# for one return value, a list/tuple is optional
if num < 5:
return num
else:
return [num]
# Service creation
# =================
# Create a new service
service = Service(name="taxicab", desc="Yellow Cab taxi service", version="0.0.1-alpha")
# Create and initialize a method in the service
method = ServiceMethod(name="pickup", desc="Pick up people in a taxi")
method.set_func(pick_up_people,
(trt.Int(1, desc="number of people"), trt.Unicode("", desc="Pick up location"),
trt.Unicode("", desc="main drop off location"),
Person("", desc="Person who called the taxi")),
(trt.Int([], desc="Number of people dropped off"),))
service.add_method(method)
# Register service
register_service(service)
hdr = lambda s: "\n### " + s + " ###\n"
# Service usage
# ==============
# Registry
# --------
# (pretend this is the start of a new module)
# a. Show all registered services
print(hdr("All registered service schema"))
print(get_all_services(as_json_schema=True))
# b. get service/method from registry
method = get_service("taxicab").get_method("pickup")
# JSON metadata
# -------------
print(hdr("JSON metadata"))
print(method.as_json())
print(hdr("JSON Metadata"))
print(method.as_json(formatted=True, indent=2))
print(hdr("JSON Schema Metadata"))
print(method.as_json_schema(formatted=True, indent=2))
# Validation
# ----------
print(hdr("Bad parameters"))
r = method(1)
assert(r is None)
print(hdr("Function error"))
r = method(0, "here", "there", "me")
assert (r is None)
# Failure, bad output
print(hdr("Bad output type"))
r = method(99, "here", "there", "me")
assert (r is None)
# Successful run
# --------------
print(hdr("Success 1"))
r = method(3, "Berkeley", "San Francisco", "Willie Brown")
assert(r is not None)
print(hdr("Success 2"))
r = method(9, "Dubuque", "Tallahassee", "Cthulhu")
assert (r is not None)
if __name__ == '__main__':
example()
| msneddon/narrative | src/biokbase/narrative/common/service.py | Python | mit | 37,656 |
# -*- coding: utf-8 -*-
#
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2016 Sucros Clear Information Technologies PLC.
# This module copyright (C) 2014 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Database content cleanup',
'summary': 'Remove database content',
'version': '8.0',
'author': "Sucros Clear Information Technologies PLC, Therp BV",
'depends': ['base'],
'license': 'AGPL-3',
'category': 'Tools',
'data': [
'view/purge_wizard.xml',
'view/menu.xml',
],
'installable': True,
'auto_install': False,
}
| Clear-ICT/odoo-addons | content_cleanup/__openerp__.py | Python | agpl-3.0 | 1,294 |
import json
import os
from django import forms
from django.core.urlresolvers import reverse_lazy
from django.template import Context
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.forms.util import flatatt
from django_summernote.settings import summernote_config
from django.conf import settings
__all__ = ['SummernoteWidget', 'SummernoteInplaceWidget']
def _static_url(url):
return os.path.join(settings.STATIC_URL, url)
class SummernoteWidgetBase(forms.Textarea):
@classmethod
def template_contexts(cls):
return {
'toolbar': json.dumps(summernote_config['toolbar']),
'lang': summernote_config['lang'],
'airMode': 'true' if summernote_config['airMode'] else 'false',
'height': summernote_config['height'],
'url': {
'upload_attachment':
reverse_lazy('django_summernote-upload_attachment'),
},
}
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if value in summernote_config['empty']:
return None
return value
class SummernoteWidget(SummernoteWidgetBase):
def render(self, name, value, attrs=None):
attrs_for_textarea = attrs.copy()
attrs_for_textarea['hidden'] = 'true'
html = super(SummernoteWidget, self).render(name,
value,
attrs_for_textarea)
final_attrs = self.build_attrs(attrs)
del final_attrs['id'] # Use original attributes without id.
url = reverse_lazy('django_summernote-editor',
kwargs={'id': attrs['id']})
html += render_to_string('django_summernote/widget_iframe.html',
{
'id': '%s-iframe' % (attrs['id']),
'src': url,
'attrs': flatatt(final_attrs),
'width': summernote_config['width'],
'height': summernote_config['height'],
})
return mark_safe(html)
class SummernoteInplaceWidget(SummernoteWidgetBase):
class Media:
css = {'all': (summernote_config['inplacewidget_external_css']) + (
_static_url('django_summernote/summernote.css'),
)}
js = (summernote_config['inplacewidget_external_js']) + (
_static_url('django_summernote/summernote.min.js'),
_static_url('django_summernote/jquery.ui.widget.js'),
_static_url('django_summernote/jquery.iframe-transport.js'),
_static_url('django_summernote/jquery.fileupload.js'),
)
if summernote_config['lang'] != 'en-US':
js += (_static_url(
'django_summernote/lang/summernote-%s.js' % (summernote_config['lang'])
), )
def render(self, name, value, attrs=None):
attrs_for_textarea = attrs.copy()
attrs_for_textarea['hidden'] = 'true'
attrs_for_textarea['id'] += '-textarea'
html = super(SummernoteInplaceWidget, self).render(name,
value,
attrs_for_textarea)
html += render_to_string(
'django_summernote/widget_inplace.html',
Context(dict({
'value': value if value else '',
'id': attrs['id'],
}, **SummernoteWidgetBase.template_contexts()))
)
return mark_safe(html)
| gquirozbogner/contentbox-master | third_party/django_summernote/widgets.py | Python | apache-2.0 | 3,743 |
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
# @file spatialdata/spatialdb/SimpleDB.py
#
# @brief Python manager for simple spatial database.
#
# Factory: spatial_database
from .SpatialDBObj import SpatialDBObj
from .spatialdb import SimpleDB as ModuleSimpleDB
class SimpleDB(SpatialDBObj, ModuleSimpleDB):
"""
Python manager for simple spatial database.
Factory: spatial_database
INVENTORY
Properties
- *query_type* Type of query to perform [nearest, linear].
Facilities
- *iohandler* I/O handler for database.
"""
import pythia.pyre.inventory
queryType = pythia.pyre.inventory.str("query_type", default="nearest")
queryType.validator = pythia.pyre.inventory.choice(["nearest", "linear"])
queryType.meta['tip'] = "Type of query to perform."
from .SimpleIOAscii import SimpleIOAscii
iohandler = pythia.pyre.inventory.facility("iohandler", family="simpledb_io",
factory=SimpleIOAscii)
iohandler.meta['tip'] = "I/O handler for database."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="simpledb"):
"""
Constructor.
"""
SpatialDBObj.__init__(self, name)
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based on inventory.
"""
SpatialDBObj._configure(self)
ModuleSimpleDB.setIOHandler(self, self.iohandler)
ModuleSimpleDB.setQueryType(self, self._parseQueryString(self.queryType))
def _createModuleObj(self):
"""
Create Python module object.
"""
ModuleSimpleDB.__init__(self)
def _parseQueryString(self, label):
if label.lower() == "nearest":
value = ModuleSimpleDB.NEAREST
elif label.lower() == "linear":
value = ModuleSimpleDB.LINEAR
else:
raise ValueError("Unknown value for query type '%s'." % label)
return value
# FACTORIES ////////////////////////////////////////////////////////////
def spatial_database():
"""
Factory associated with SimpleDB.
"""
return SimpleDB()
# End of file
| geodynamics/spatialdata | spatialdata/spatialdb/SimpleDB.py | Python | mit | 2,610 |
# Doc target.
#
# Copyright (C) 2015 Denis BOURGE
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from .dependencies.builder import Builder
from .dependencies.utils import Utils
from .config import config
BROWSER = "chromium"
class DocBuilder(Builder):
def __init__(self, options, game):
super().__init__("doc", options, game)
def build_game(self):
Utils.execCommand("doxygen")
def execute_game(self):
Utils.execCommand("{} site/doc/html/index.html".format(BROWSER))
| Srynetix/hx3d-framework | builds/doc.py | Python | lgpl-3.0 | 1,246 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from udata.models import Reuse, Dataset
from .models import Discussion
def discussions_for(user, only_open=True):
'''
Build a queryset to query discussions related to a given user's assets.
It includes discussions coming from the user's organizations
:param bool only_open: whether to include closed discussions or not.
'''
# Only fetch required fields for discussion filtering (id and slug)
# Greatly improve performances and memory usage
datasets = Dataset.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
reuses = Reuse.objects.owned_by(user.id, *user.organizations).only('id', 'slug')
qs = Discussion.objects(subject__in=list(datasets) + list(reuses))
if only_open:
qs = qs(closed__exists=False)
return qs
| jphnoel/udata | udata/core/discussions/actions.py | Python | agpl-3.0 | 871 |
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portuguese version translated by Duarte Loreto <[email protected]>, 2007.
# Based on the Spanish file.
"""
Portuguese-specific classes for parsing and displaying dates.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..lib.date import Date
from ._dateparser import DateParser
from ._datedisplay import DateDisplay
from ._datehandler import register_datehandler
#-------------------------------------------------------------------------
#
# Portuguese parser
#
#-------------------------------------------------------------------------
class DateParserPT(DateParser):
modifier_to_int = {
'antes de' : Date.MOD_BEFORE,
'antes' : Date.MOD_BEFORE,
'ant.' : Date.MOD_BEFORE,
'ant' : Date.MOD_BEFORE,
'até' : Date.MOD_BEFORE,
'depois de' : Date.MOD_AFTER,
'depois' : Date.MOD_AFTER,
'dep.' : Date.MOD_AFTER,
'dep' : Date.MOD_AFTER,
'aprox.' : Date.MOD_ABOUT,
'aprox' : Date.MOD_ABOUT,
'apr.' : Date.MOD_ABOUT,
'apr' : Date.MOD_ABOUT,
'cerca de' : Date.MOD_ABOUT,
'ca.' : Date.MOD_ABOUT,
'ca' : Date.MOD_ABOUT,
'c.' : Date.MOD_ABOUT,
'por volta de' : Date.MOD_ABOUT,
'por volta' : Date.MOD_ABOUT,
'pvd.' : Date.MOD_ABOUT,
}
calendar_to_int = {
'gregoriano' : Date.CAL_GREGORIAN,
'g' : Date.CAL_GREGORIAN,
'juliano' : Date.CAL_JULIAN,
'j' : Date.CAL_JULIAN,
'hebreu' : Date.CAL_HEBREW,
'h' : Date.CAL_HEBREW,
'islâmico' : Date.CAL_ISLAMIC,
'i' : Date.CAL_ISLAMIC,
'revolucionário' : Date.CAL_FRENCH,
'r' : Date.CAL_FRENCH,
'persa' : Date.CAL_PERSIAN,
'p' : Date.CAL_PERSIAN,
'swedish' : Date.CAL_SWEDISH,
's' : Date.CAL_SWEDISH,
}
quality_to_int = {
'estimado' : Date.QUAL_ESTIMATED,
'est.' : Date.QUAL_ESTIMATED,
'est' : Date.QUAL_ESTIMATED,
'calc.' : Date.QUAL_CALCULATED,
'calc' : Date.QUAL_CALCULATED,
'calculado' : Date.QUAL_CALCULATED,
}
def init_strings(self):
DateParser.init_strings(self)
_span_1 = ['de']
_span_2 = ['a']
_range_1 = ['entre','ent\.','ent']
_range_2 = ['e']
self._span = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_span_1), '|'.join(_span_2)),
re.IGNORECASE)
self._range = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_range_1), '|'.join(_range_2)),
re.IGNORECASE)
#-------------------------------------------------------------------------
#
# Portuguese display
#
#-------------------------------------------------------------------------
class DateDisplayPT(DateDisplay):
"""
Portuguese language date display class.
"""
long_months = ( "", "Janeiro", "Fevereiro", "Março", "Abril", "Maio",
"Junho", "Julho", "Agosto", "Setembro", "Outubro",
"Novembro", "Dezembro" )
short_months = ( "", "Jan", "Fev", "Mar", "Abr", "Mai", "Jun",
"Jul", "Ago", "Set", "Out", "Nov", "Dez" )
calendar = (
"", "Juliano", "Hebreu",
"Revolucionário", "Persa", "Islâmico",
"Sueco"
)
_mod_str = ("","antes de ","depois de ","por volta de ","","","")
_qual_str = ("","estimado ","calculado ")
formats = (
"AAAA-MM-DD (ISO)", "Numérica", "Mês Dia, Ano",
"MÊS Dia, Ano", "Dia Mês, Ano", "Dia MÊS, Ano"
)
# this must agree with DateDisplayEn's "formats" definition
# (since no locale-specific _display_gregorian exists, here)
def display(self,date):
"""
Return a text string representing the date.
"""
mod = date.get_modifier()
cal = date.get_calendar()
qual = date.get_quality()
start = date.get_start_date()
newyear = date.get_new_year()
qual_str = self._qual_str[qual]
if mod == Date.MOD_TEXTONLY:
return date.get_text()
elif start == Date.EMPTY:
return ""
elif mod == Date.MOD_SPAN:
d1 = self.display_cal[cal](start)
d2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'de', d1, 'a', d2, scal)
elif mod == Date.MOD_RANGE:
d1 = self.display_cal[cal](start)
d2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, 'entre', d1, 'e', d2, scal)
else:
text = self.display_cal[date.get_calendar()](start)
scal = self.format_extras(cal, newyear)
return "%s%s%s%s" % (qual_str, self._mod_str[mod], text, scal)
#-------------------------------------------------------------------------
#
# Register classes
#
#-------------------------------------------------------------------------
register_datehandler(
('pt_PT', 'pt_PT.UTF-8', 'pt_BR', 'pt_BR.UTF-8',
'pt' 'portuguese', 'Portuguese', ('%d-%m-%Y',)),
DateParserPT, DateDisplayPT)
| jralls/gramps | gramps/gen/datehandler/_date_pt.py | Python | gpl-2.0 | 6,889 |
#This submodule includes routines to manipulate image arrays
import numpy as np
from scipy.interpolate import griddata
from utilities.imaging.fitting import legendre2d
import pdb
def unpackimage(data,xlim=[-1,1],ylim=[-1,1],remove=True):
"""Convert a 2D image into x,y,z coordinates.
x will relate to 2nd index in order to correspond to abscissa in imshow
y will relate to 1st index in order to correspond to oordinate in imshow
if remove is True, NaNs will not be returned in the list of coordinates
"""
x,y = np.meshgrid(np.linspace(xlim[0],xlim[1],np.shape(data)[1]),\
np.linspace(ylim[0],ylim[1],np.shape(data)[0]))
## y = y.flatten()
## x = x.flatten()
if remove is True:
ind = np.invert(np.isnan(data.flatten()))
return x.flatten()[ind],y.flatten()[ind],data.flatten()[ind]
return x.flatten(),y.flatten(),data.flatten()
def autoGrid(d,xr=[-1,1],yr=[-1,1]):
"""
Create a meshgrid based on the shape of the image d
"""
xspan = np.linspace(xr[0],xr[1],np.shape(d)[1])
yspan = np.linspace(yr[0],yr[1],np.shape(d)[0])
return np.meshgrid(xspan,yspan)
def shiftNaN(img,n=1,axis=0):
"""This function shifts an image in a NaN padded array
Specify which axis to shift, and specify wh
ich direction
"""
#Construct array to insert
if axis is 0:
ins = np.repeat(np.nan,np.abs(n)*\
np.shape(img)[1]).reshape(np.abs(n),np.shape(img)[1])
else:
ins = np.repeat(np.nan,np.abs(n)*\
np.shape(img)[0]).reshape(np.abs(n),np.shape(img)[0])
#If direction=0, shift to positive
if n > 0:
img = np.delete(img,np.arange(np.shape(img)[1]-\
n,np.shape(img)[1]),axis=axis)
img = np.insert(img,0,ins,axis=axis)
else:
n = np.abs(n)
img = np.delete(img,np.arange(n),axis=axis)
img = np.insert(img,-1,ins,axis=axis)
return img
def padNaN(img,n=1,axis=0):
"""Pads an image with rows or columns of NaNs
If n is positive, they are appended to the end of
the specified axis. If n is negative, they are
appended to the beginning
"""
#Construct array to insert
if axis is 0:
ins = np.repeat(np.nan,np.abs(n)*\
np.shape(img)[1]).reshape(np.abs(n),np.shape(img)[1])
else:
ins = np.repeat(np.nan,np.abs(n)*\
np.shape(img)[0]).reshape(np.abs(n),np.shape(img)[0])
ins = np.transpose(ins)
#If direction=0, shift to positive
if n < 0:
img = np.concatenate((ins,img),axis=axis)
else:
img = np.concatenate((img,ins),axis=axis)
return img
def padRect(img,nan_num = 1):
"""Pads an image with an outer NaN rectangle of width nan_num"""
img = padNaN(img,n=nan_num,axis=0)
img = padNaN(img,n=-nan_num,axis=0)
img = padNaN(img,n=nan_num,axis=1)
img = padNaN(img,n=-nan_num,axis=1)
return img
def borderFill(img,n = 1,fill_value = np.NaN):
img[:n],img[-n:] = fill_value,fill_value
img[:,:n],img[:,-n:] = fill_value,fill_value
return img
def tipTiltPiston(img,piston,tip,tilt,tx=None,ty=None):
"""This function adds a constant and
tip and tilt to an array
This makes use of tilt arrays tx,ty
If not provided, compute using meshgrid
Updated
"""
if tx is None:
ty,tx = np.meshgrid(np.arange(np.shape(img)[1]),\
np.arange(np.shape(img)[0]))
tx = (tx-np.mean(tx)) / tx.max()
ty = (ty-np.mean(ty)) / ty.max()
return img + piston + tip*tx + tilt*ty
def nearestNaN(arr,method='nearest'):
"""Fill the NaNs in a 2D image array with the griddata
nearest neighbor interpolation"""
ishape = np.shape(arr)
#Unpack image both with and without removing NaNs
x0,y0,z0 = unpackimage(arr,remove=False)
x1,y1,z1 = unpackimage(arr,remove=True)
#Interpolate onto x0,y0 grid
newarr = griddata((x1,y1),z1,(x0,y0),method=method)
return newarr.reshape(ishape)
def rebin(a,shape):
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return nanmean(nanmean(a.reshape(sh),axis=3),axis=1)
def stripnans(d1,removeAll=False):
"""
Need to fix removeAll. Likely need to remove rows/columns
in a circular fashion until all perimeter NaNs are eliminated.
"""
d = np.copy(d1)
if len(np.shape(d)) is 1:
return d[~np.isnan(d)]
if removeAll is False:
newsize = np.shape(d)[1]
else:
newsize = 1
while sum(np.isnan(d[0]))>=newsize:
d = d[1:]
while sum(np.isnan(d[-1]))>=newsize:
d = d[:-1]
if removeAll is False:
newsize = np.shape(d)[0]
else:
newsize = 1
while sum(np.isnan(d[:,0]))>=newsize:
d = d[:,1:]
while sum(np.isnan(d[:,-1]))>=newsize:
d = d[:,:-1]
return d
def transformation(x,y,r=0.,tx=0.,ty=0.):
"""Return x and y vectors after applying a rotation about
the origin and then translations in x and y
"""
x,y = np.cos(r)*x+np.sin(r)*y,-np.sin(r)*x+np.cos(r)*y
x,y = x+tx,y+ty
return x,y
def rotateImage(img,rot):
"""Apply a rotation about the center of an image using
griddata
"""
sh = np.shape(img)
x,y = np.meshgrid(np.linspace(-1,1,sh[1]),np.linspace(-1,1,sh[0]))
dx = 2./(sh[1]-1)
dy = 2./(sh[0]-1)
x,y = transformation(x,y,r=rot)
x2,y2 = np.meshgrid(np.arange(x.min(),x.max()+dx,dx),\
np.arange(y.min(),y.max()+dy,dy))
#Interpolate from x,y to x2,y2
img2 = griddata((x.flatten(),y.flatten()),img.flatten(),(x2,y2))
return stripnans(img2)
def newGridSize(img,newshape,method='linear'):
"""
Interpolate an image onto a new shape size
"""
shape1 = np.shape(img)
x1,y1 = np.meshgrid(range(shape1[1]),range(shape1[0]))
x2,y2 = np.meshgrid(np.linspace(0,shape1[1]-1,newshape[1]),\
np.linspace(0,shape1[0]-1,newshape[0]))
img2 = griddata((x1.flatten(),y1.flatten()),img.flatten(),(x2,y2),\
method=method)
return img2
def nanflatten(img):
"""
Automatically remove NaNs when flattening an image
"""
d = img.flatten()
d = d[~np.isnan(d)]
return d
def removePoly(l,order=2):
"""
Remove a polynomial up to order from a slice.
NaNs are ignored.
"""
if np.sum(~np.isnan(l))<2:
return
ind = np.invert(np.isnan(l))
x = np.arange(len(l))
fit = np.polyfit(x[ind],l[ind],order)
return l - np.polyval(fit,x)
def removeLegSlice(din,order=2,axis=0):
"""
Remove a Legendre polynomial up to order from each
slice of an image.
"""
d = np.copy(din)
if axis is 0:
d = np.transpose(d)
for i in range(len(d)):
d[i,:] = removePoly(d[i,:],order=order)
if axis is 0:
d = np.transpose(d)
return d
def remove2DLeg(din,xo=2,yo=0):
"""
Remove a 2D Legendre fit to din up to
xo and yo.
"""
f = legendre2d(din,xo=xo,yo=yo)[0]
return din-f
def removeDS9Regions(img,filename):
"""
Read in an SAOImage region file and set all
pixels within regions to NaN.
File should look like:
circle(x,y,rad)
box(x,y,dx,dy,0)
ellipse(x,y,dx,dy,0)
"""
#Construct index grid
y,x = np.meshgrid(range(np.shape(img)[1]),range(np.shape(img)[0]))
#Get region data
f = open(filename,'r')
lines = f.readlines()
for l in lines:
t = l.split('(')[0]
n = np.array(l.split('(')[1].split(','))
# Note: this can throw an error based on Mac vs. Windows -- if there's a carriage return
# plus a new line, the second value should be -3.
n[-1] = n[-1][:-2]
n = n.astype('float')
if t == 'circle':
ind = (x-n[1])**2+(y-n[0])**2 < n[2]**2
elif t == 'box':
yind = np.logical_and(y<n[0]+n[2]/2,y>n[0]-n[2]/2)
xind = np.logical_and(x<n[1]+n[3]/2,x>n[1]-n[3]/2)
ind = np.logical_and(xind,yind)
elif t == 'ellipse':
ind = []
img[ind] = np.nan
return img
| rallured/utilities | imaging/man.py | Python | mit | 8,204 |
import functools
import hashlib
import os
import time
from collections import namedtuple
from azure.mgmt.servicebus import ServiceBusManagementClient
from azure.mgmt.servicebus.models import SBQueue, SBSubscription, AccessRights
from azure_devtools.scenario_tests.exceptions import AzureTestError
from devtools_testutils import (
ResourceGroupPreparer, AzureMgmtPreparer, FakeResource, get_region_override
)
from devtools_testutils.resource_testcase import RESOURCE_GROUP_PARAM
SERVICEBUS_DEFAULT_AUTH_RULE_NAME = 'RootManageSharedAccessKey'
SERVICEBUS_NAMESPACE_PARAM = 'servicebus_namespace'
SERVICEBUS_TOPIC_PARAM = 'servicebus_topic'
SERVICEBUS_SUBSCRIPTION_PARAM = 'servicebus_subscription'
SERVICEBUS_QUEUE_PARAM = 'servicebus_queue'
SERVICEBUS_AUTHORIZATION_RULE_PARAM = 'servicebus_authorization_rule'
SERVICEBUS_QUEUE_AUTHORIZATION_RULE_PARAM = 'servicebus_queue_authorization_rule'
# Service Bus Namespace Preparer and its shorthand decorator
class ServiceBusNamespacePreparer(AzureMgmtPreparer):
def __init__(self,
name_prefix='',
use_cache=False,
sku='Standard', location=get_region_override('westus'),
parameter_name=SERVICEBUS_NAMESPACE_PARAM,
resource_group_parameter_name=RESOURCE_GROUP_PARAM,
disable_recording=True, playback_fake_resource=None,
client_kwargs=None, random_name_enabled=True):
super(ServiceBusNamespacePreparer, self).__init__(name_prefix, 24,
random_name_enabled=random_name_enabled,
disable_recording=disable_recording,
playback_fake_resource=playback_fake_resource,
client_kwargs=client_kwargs)
self.location = location
self.sku = sku
self.resource_group_parameter_name = resource_group_parameter_name
self.parameter_name = parameter_name
self.connection_string = ''
if random_name_enabled:
self.resource_moniker = self.name_prefix + "sbname"
self.set_cache(use_cache, sku, location)
def create_resource(self, name, **kwargs):
if self.is_live:
self.client = self.create_mgmt_client(ServiceBusManagementClient)
group = self._get_resource_group(**kwargs)
retries = 4
for i in range(retries):
try:
namespace_async_operation = self.client.namespaces.create_or_update(
group.name,
name,
{
'sku': {'name': self.sku},
'location': self.location,
}
)
self.resource = namespace_async_operation.result()
break
except Exception as ex:
error = "The requested resource {} does not exist".format(group.name)
not_found_error = "Operation returned an invalid status code 'Not Found'"
if (error not in str(ex) and not_found_error not in str(ex)) or i == retries - 1:
raise
time.sleep(3)
key = self.client.namespaces.list_keys(group.name, name, SERVICEBUS_DEFAULT_AUTH_RULE_NAME)
self.connection_string = key.primary_connection_string
self.key_name = key.key_name
self.primary_key = key.primary_key
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
self.resource = FakeResource(name=name, id=name)
self.connection_string = 'Endpoint=sb://{}.servicebus.windows.net/;SharedAccessKeyName=test;SharedAccessKey=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='.format(name)
self.key_name = SERVICEBUS_DEFAULT_AUTH_RULE_NAME
self.primary_key = 'ZmFrZV9hY29jdW50X2tleQ=='
return {
self.parameter_name: self.resource,
'{}_connection_string'.format(self.parameter_name): self.connection_string,
'{}_key_name'.format(self.parameter_name): self.key_name,
'{}_primary_key'.format(self.parameter_name): self.primary_key,
}
def remove_resource(self, name, **kwargs):
if self.is_live:
group = self._get_resource_group(**kwargs)
self.client.namespaces.delete(group.name, name, polling=False)
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a service bus a resource group is required. Please add ' \
'decorator @{} in front of this service bus preparer.'
raise AzureTestError(template.format(ResourceGroupPreparer.__name__))
# Shared base class for service bus sub-resources that require a namespace and RG to exist.
class _ServiceBusChildResourcePreparer(AzureMgmtPreparer):
def __init__(self,
name_prefix='',
resource_group_parameter_name=RESOURCE_GROUP_PARAM,
servicebus_namespace_parameter_name=SERVICEBUS_NAMESPACE_PARAM,
disable_recording=True, playback_fake_resource=None,
client_kwargs=None, random_name_enabled=True):
super(_ServiceBusChildResourcePreparer, self).__init__(name_prefix, 24,
random_name_enabled=random_name_enabled,
disable_recording=disable_recording,
playback_fake_resource=playback_fake_resource,
client_kwargs=client_kwargs)
self.resource_group_parameter_name = resource_group_parameter_name
self.servicebus_namespace_parameter_name = servicebus_namespace_parameter_name
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create this service bus child resource service bus a resource group is required. Please add ' \
'decorator @{} in front of this service bus preparer.'
raise AzureTestError(template.format(ResourceGroupPreparer.__name__))
def _get_namespace(self, **kwargs):
try:
return kwargs.get(self.servicebus_namespace_parameter_name)
except KeyError:
template = 'To create this service bus child resource a service bus namespace is required. Please add ' \
'decorator @{} in front of this service bus preparer.'
raise AzureTestError(template.format(ServiceBusNamespacePreparer.__name__))
class ServiceBusTopicPreparer(_ServiceBusChildResourcePreparer):
def __init__(self,
name_prefix='',
use_cache=False,
parameter_name=SERVICEBUS_TOPIC_PARAM,
resource_group_parameter_name=RESOURCE_GROUP_PARAM,
servicebus_namespace_parameter_name=SERVICEBUS_NAMESPACE_PARAM,
disable_recording=True, playback_fake_resource=None,
client_kwargs=None, random_name_enabled=True):
super(ServiceBusTopicPreparer, self).__init__(name_prefix,
random_name_enabled=random_name_enabled,
resource_group_parameter_name=resource_group_parameter_name,
servicebus_namespace_parameter_name=servicebus_namespace_parameter_name,
disable_recording=disable_recording,
playback_fake_resource=playback_fake_resource,
client_kwargs=client_kwargs)
self.parameter_name = parameter_name
if random_name_enabled:
self.resource_moniker = self.name_prefix + "sbtopic"
self.set_cache(use_cache)
def create_resource(self, name, **kwargs):
if self.is_live:
self.client = self.create_mgmt_client(ServiceBusManagementClient)
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
retries = 4
for i in range(retries):
try:
self.resource = self.client.topics.create_or_update(
group.name,
namespace.name,
name,
{}
)
break
except Exception as ex:
error = "The requested resource {} does not exist".format(namespace)
not_found_error = "Operation returned an invalid status code 'Not Found'"
if (error not in str(ex) and not_found_error not in str(ex)) or i == retries - 1:
raise
time.sleep(3)
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
self.resource = FakeResource(name=name, id=name)
return {
self.parameter_name: self.resource,
}
def remove_resource(self, name, **kwargs):
if self.is_live:
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
self.client.topics.delete(group.name, namespace.name, name, polling=False)
class ServiceBusSubscriptionPreparer(_ServiceBusChildResourcePreparer):
def __init__(self,
name_prefix='',
use_cache=False,
parameter_name=SERVICEBUS_SUBSCRIPTION_PARAM,
resource_group_parameter_name=RESOURCE_GROUP_PARAM,
servicebus_namespace_parameter_name=SERVICEBUS_NAMESPACE_PARAM,
servicebus_topic_parameter_name=SERVICEBUS_TOPIC_PARAM,
requires_session=False,
disable_recording=True, playback_fake_resource=None,
client_kwargs=None, random_name_enabled=True):
super(ServiceBusSubscriptionPreparer, self).__init__(name_prefix,
random_name_enabled=random_name_enabled,
resource_group_parameter_name=resource_group_parameter_name,
servicebus_namespace_parameter_name=servicebus_namespace_parameter_name,
disable_recording=disable_recording,
playback_fake_resource=playback_fake_resource,
client_kwargs=client_kwargs)
self.servicebus_topic_parameter_name = servicebus_topic_parameter_name
self.parameter_name = parameter_name
if random_name_enabled:
self.resource_moniker = self.name_prefix + "sbsub"
self.set_cache(use_cache, requires_session)
self.requires_session=requires_session
if random_name_enabled:
self.resource_moniker = self.name_prefix + "sbqueue"
def create_resource(self, name, **kwargs):
if self.is_live:
self.client = self.create_mgmt_client(ServiceBusManagementClient)
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
topic = self._get_topic(**kwargs)
retries = 4
for i in range(retries):
try:
self.resource = self.client.subscriptions.create_or_update(
group.name,
namespace.name,
topic.name,
name,
SBSubscription(
requires_session=self.requires_session
)
)
break
except Exception as ex:
error = "The requested resource {} does not exist".format(namespace)
not_found_error = "Operation returned an invalid status code 'Not Found'"
if (error not in str(ex) and not_found_error not in str(ex)) or i == retries - 1:
raise
time.sleep(3)
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
self.resource = FakeResource(name=name, id=name)
return {
self.parameter_name: self.resource,
}
def remove_resource(self, name, **kwargs):
if self.is_live:
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
topic = self._get_topic(**kwargs)
self.client.subscriptions.delete(group.name, namespace.name, topic.name, name, polling=False)
def _get_topic(self, **kwargs):
try:
return kwargs.get(self.servicebus_topic_parameter_name)
except KeyError:
template = 'To create this service bus subscription a service bus topic is required. Please add ' \
'decorator @{} in front of this service bus preparer.'
raise AzureTestError(template.format(ServiceBusTopicPreparer.__name__))
class ServiceBusQueuePreparer(_ServiceBusChildResourcePreparer):
def __init__(self,
name_prefix='',
use_cache=False,
requires_duplicate_detection=False,
dead_lettering_on_message_expiration=False,
requires_session=False,
lock_duration='PT30S',
parameter_name=SERVICEBUS_QUEUE_PARAM,
resource_group_parameter_name=RESOURCE_GROUP_PARAM,
servicebus_namespace_parameter_name=SERVICEBUS_NAMESPACE_PARAM,
disable_recording=True, playback_fake_resource=None,
client_kwargs=None, random_name_enabled=True):
super(ServiceBusQueuePreparer, self).__init__(name_prefix,
random_name_enabled=random_name_enabled,
resource_group_parameter_name=resource_group_parameter_name,
servicebus_namespace_parameter_name=servicebus_namespace_parameter_name,
disable_recording=disable_recording,
playback_fake_resource=playback_fake_resource,
client_kwargs=client_kwargs)
self.parameter_name = parameter_name
self.set_cache(use_cache, requires_duplicate_detection, dead_lettering_on_message_expiration, requires_session, lock_duration)
# Queue parameters
self.requires_duplicate_detection=requires_duplicate_detection
self.dead_lettering_on_message_expiration=dead_lettering_on_message_expiration
self.requires_session=requires_session
self.lock_duration=lock_duration
if random_name_enabled:
self.resource_moniker = self.name_prefix + "sbqueue"
def create_resource(self, name, **kwargs):
if self.is_live:
self.client = self.create_mgmt_client(ServiceBusManagementClient)
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
retries = 4
for i in range(retries):
try:
self.resource = self.client.queues.create_or_update(
group.name,
namespace.name,
name,
SBQueue(
lock_duration=self.lock_duration,
requires_duplicate_detection = self.requires_duplicate_detection,
dead_lettering_on_message_expiration = self.dead_lettering_on_message_expiration,
requires_session = self.requires_session)
)
break
except Exception as ex:
error = "The requested resource {} does not exist".format(namespace)
not_found_error = "Operation returned an invalid status code 'Not Found'"
if (error not in str(ex) and not_found_error not in str(ex)) or i == retries - 1:
raise
time.sleep(3)
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
self.resource = FakeResource(name=name, id=name)
return {
self.parameter_name: self.resource,
}
def remove_resource(self, name, **kwargs):
if self.is_live:
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
self.client.queues.delete(group.name, namespace.name, name, polling=False)
class ServiceBusNamespaceAuthorizationRulePreparer(_ServiceBusChildResourcePreparer):
def __init__(self,
name_prefix='',
use_cache=False,
access_rights=[AccessRights.manage, AccessRights.send, AccessRights.listen],
parameter_name=SERVICEBUS_AUTHORIZATION_RULE_PARAM,
resource_group_parameter_name=RESOURCE_GROUP_PARAM,
servicebus_namespace_parameter_name=SERVICEBUS_NAMESPACE_PARAM,
disable_recording=True, playback_fake_resource=None,
client_kwargs=None, random_name_enabled=True):
super(ServiceBusNamespaceAuthorizationRulePreparer, self).__init__(name_prefix,
random_name_enabled=random_name_enabled,
resource_group_parameter_name=resource_group_parameter_name,
servicebus_namespace_parameter_name=servicebus_namespace_parameter_name,
disable_recording=disable_recording,
playback_fake_resource=playback_fake_resource,
client_kwargs=client_kwargs)
self.parameter_name = parameter_name
self.access_rights = access_rights
if random_name_enabled:
self.resource_moniker = self.name_prefix + "sbnameauth"
self.set_cache(use_cache, access_rights)
def create_resource(self, name, **kwargs):
if self.is_live:
self.client = self.create_mgmt_client(ServiceBusManagementClient)
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
retries = 4
for i in range(retries):
try:
self.resource = self.client.namespaces.create_or_update_authorization_rule(
group.name,
namespace.name,
name,
self.access_rights
)
break
except Exception as ex:
error = "The requested resource {} does not exist".format(namespace)
not_found_error = "Operation returned an invalid status code 'Not Found'"
if (error not in str(ex) and not_found_error not in str(ex)) or i == retries - 1:
raise
time.sleep(3)
key = self.client.namespaces.list_keys(group.name, namespace.name, name)
connection_string = key.primary_connection_string
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
self.resource = FakeResource(name=name, id=name)
connection_string = 'https://microsoft.com'
return {
self.parameter_name: self.resource,
'{}_connection_string'.format(self.parameter_name): connection_string,
}
def remove_resource(self, name, **kwargs):
if self.is_live:
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
self.client.namespaces.delete_authorization_rule(group.name, namespace.name, name, polling=False)
class ServiceBusQueueAuthorizationRulePreparer(_ServiceBusChildResourcePreparer):
def __init__(self,
name_prefix='',
use_cache=False,
access_rights=[AccessRights.manage, AccessRights.send, AccessRights.listen],
parameter_name=SERVICEBUS_QUEUE_AUTHORIZATION_RULE_PARAM,
resource_group_parameter_name=RESOURCE_GROUP_PARAM,
servicebus_namespace_parameter_name=SERVICEBUS_NAMESPACE_PARAM,
servicebus_queue_parameter_name=SERVICEBUS_QUEUE_PARAM,
disable_recording=True, playback_fake_resource=None,
client_kwargs=None, random_name_enabled=True):
super(ServiceBusQueueAuthorizationRulePreparer, self).__init__(name_prefix,
random_name_enabled=random_name_enabled,
resource_group_parameter_name=resource_group_parameter_name,
servicebus_namespace_parameter_name=servicebus_namespace_parameter_name,
disable_recording=disable_recording,
playback_fake_resource=playback_fake_resource,
client_kwargs=client_kwargs)
self.parameter_name = parameter_name
self.access_rights = access_rights
self.servicebus_queue_parameter_name = servicebus_queue_parameter_name
if random_name_enabled:
self.resource_moniker = self.name_prefix + "sbqueueauth"
self.set_cache(use_cache, access_rights)
def create_resource(self, name, **kwargs):
if self.is_live:
self.client = self.create_mgmt_client(ServiceBusManagementClient)
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
queue = self._get_queue(**kwargs)
retries = 4
for i in range(retries):
try:
self.resource = self.client.queues.create_or_update_authorization_rule(
group.name,
namespace.name,
queue.name,
name,
self.access_rights
)
break
except Exception as ex:
error = "The requested resource {} does not exist".format(namespace)
not_found_error = "Operation returned an invalid status code 'Not Found'"
if (error not in str(ex) and not_found_error not in str(ex)) or i == retries - 1:
raise
time.sleep(3)
key = self.client.queues.list_keys(group.name, namespace.name, queue.name, name)
connection_string = key.primary_connection_string
self.test_class_instance.scrubber.register_name_pair(
name,
self.resource_moniker
)
else:
self.resource = FakeResource(name=name, id=name)
connection_string = 'https://microsoft.com'
return {
self.parameter_name: self.resource,
'{}_connection_string'.format(self.parameter_name): connection_string,
}
def remove_resource(self, name, **kwargs):
if self.is_live:
group = self._get_resource_group(**kwargs)
namespace = self._get_namespace(**kwargs)
queue = self._get_queue(**kwargs)
self.client.queues.delete_authorization_rule(group.name, namespace.name, queue.name, name, polling=False)
def _get_queue(self, **kwargs):
try:
return kwargs.get(self.servicebus_queue_parameter_name)
except KeyError:
template = 'To create this service bus queue authorization rule a service bus queue is required. Please add ' \
'decorator @{} in front of this service bus preparer.'
raise AzureTestError(template.format(ServiceBusQueuePreparer.__name__))
CachedServiceBusNamespacePreparer = functools.partial(ServiceBusNamespacePreparer, use_cache=True)
CachedServiceBusQueuePreparer = functools.partial(ServiceBusQueuePreparer, use_cache=True)
CachedServiceBusTopicPreparer = functools.partial(ServiceBusTopicPreparer, use_cache=True)
CachedServiceBusSubscriptionPreparer = functools.partial(ServiceBusSubscriptionPreparer, use_cache=True)
| Azure/azure-sdk-for-python | sdk/servicebus/azure-servicebus/tests/servicebus_preparer.py | Python | mit | 25,718 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.